blob: 8de85dfb1b04584d562b3ff685a26ca44ee780b0 [file] [log] [blame]
Kim Phillips8e8ec592011-03-13 16:54:26 +08001/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050040 * | (output length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080041 * | SEQ_IN_PTR |
42 * | (input buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050043 * | (input length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080044 * ---------------------
45 */
46
47#include "compat.h"
48
49#include "regs.h"
50#include "intern.h"
51#include "desc_constr.h"
52#include "jr.h"
53#include "error.h"
Yuan Kanga299c832012-06-22 19:48:46 -050054#include "sg_sw_sec4.h"
Yuan Kang4c1ec1f2012-06-22 19:48:45 -050055#include "key_gen.h"
Kim Phillips8e8ec592011-03-13 16:54:26 +080056
57/*
58 * crypto alg
59 */
60#define CAAM_CRA_PRIORITY 3000
61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
Catalin Vasiledaebc462014-10-31 12:45:37 +020063 CTR_RFC3686_NONCE_SIZE + \
Kim Phillips8e8ec592011-03-13 16:54:26 +080064 SHA512_DIGEST_SIZE * 2)
65/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
66#define CAAM_MAX_IV_LENGTH 16
67
Herbert Xuf2147b82015-06-16 13:54:23 +080068#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
69#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
70 CAAM_CMD_SZ * 4)
Herbert Xu479bcc72015-07-30 17:53:17 +080071#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
72 CAAM_CMD_SZ * 5)
Herbert Xuf2147b82015-06-16 13:54:23 +080073
Kim Phillips4427b1b2011-05-14 22:08:17 -050074/* length of descriptors text */
Yuan Kang1acebad2011-07-15 11:21:42 +080075#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
Herbert Xu479bcc72015-07-30 17:53:17 +080076#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
77#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
78#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
Yuan Kang1acebad2011-07-15 11:21:42 +080079
Catalin Vasiledaebc462014-10-31 12:45:37 +020080/* Note: Nonce is counted in enckeylen */
Herbert Xu479bcc72015-07-30 17:53:17 +080081#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
Catalin Vasiledaebc462014-10-31 12:45:37 +020082
Horia Geantaae4a8252014-03-14 17:46:52 +020083#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
Herbert Xu479bcc72015-07-30 17:53:17 +080084#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
85#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
Horia Geantaae4a8252014-03-14 17:46:52 +020086
Tudor Ambarus3ef8d942014-10-23 16:11:23 +030087#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
Herbert Xuf2147b82015-06-16 13:54:23 +080088#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
89#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
Tudor Ambarus3ef8d942014-10-23 16:11:23 +030090
Tudor Ambarusbac68f22014-10-23 16:14:03 +030091#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
Horia Geant?4aad0cc2015-07-30 22:11:18 +030092#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
93#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
Tudor Ambarusbac68f22014-10-23 16:14:03 +030094
Tudor Ambarus5d0429a2014-10-30 18:55:07 +020095#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
Herbert Xuf2147b82015-06-16 13:54:23 +080096#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
97#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
Tudor Ambarus5d0429a2014-10-30 18:55:07 +020098
Yuan Kangacdca312011-07-15 11:21:42 +080099#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
100#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
101 20 * CAAM_CMD_SZ)
102#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
103 15 * CAAM_CMD_SZ)
104
Herbert Xu87e51b02015-06-18 14:25:55 +0800105#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
106#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
Kim Phillips4427b1b2011-05-14 22:08:17 -0500107
Kim Phillips8e8ec592011-03-13 16:54:26 +0800108#ifdef DEBUG
109/* for print_hex_dumps with line references */
Kim Phillips8e8ec592011-03-13 16:54:26 +0800110#define debug(format, arg...) printk(format, arg)
111#else
112#define debug(format, arg...)
113#endif
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +0300114
115#ifdef DEBUG
116#include <linux/highmem.h>
117
118static void dbg_dump_sg(const char *level, const char *prefix_str,
119 int prefix_type, int rowsize, int groupsize,
120 struct scatterlist *sg, size_t tlen, bool ascii,
121 bool may_sleep)
122{
123 struct scatterlist *it;
124 void *it_page;
125 size_t len;
126 void *buf;
127
128 for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
129 /*
130 * make sure the scatterlist's page
131 * has a valid virtual memory mapping
132 */
133 it_page = kmap_atomic(sg_page(it));
134 if (unlikely(!it_page)) {
135 printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
136 return;
137 }
138
139 buf = it_page + it->offset;
Arnd Bergmannd69985a2016-10-25 23:29:10 +0200140 len = min_t(size_t, tlen, it->length);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +0300141 print_hex_dump(level, prefix_str, prefix_type, rowsize,
142 groupsize, buf, len, ascii);
143 tlen -= len;
144
145 kunmap_atomic(it_page);
146 }
147}
148#endif
149
Ruchika Guptacfc6f112013-10-25 12:01:03 +0530150static struct list_head alg_list;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800151
Herbert Xu479bcc72015-07-30 17:53:17 +0800152struct caam_alg_entry {
153 int class1_alg_type;
154 int class2_alg_type;
155 int alg_op;
156 bool rfc3686;
157 bool geniv;
158};
159
160struct caam_aead_alg {
161 struct aead_alg aead;
162 struct caam_alg_entry caam;
163 bool registered;
164};
165
Yuan Kang1acebad2011-07-15 11:21:42 +0800166/* Set DK bit in class 1 operation if shared */
167static inline void append_dec_op1(u32 *desc, u32 type)
168{
169 u32 *jump_cmd, *uncond_jump_cmd;
170
Horia Geantaa60384d2014-07-11 15:46:58 +0300171 /* DK bit is valid only for AES */
172 if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
173 append_operation(desc, type | OP_ALG_AS_INITFINAL |
174 OP_ALG_DECRYPT);
175 return;
176 }
177
Yuan Kang1acebad2011-07-15 11:21:42 +0800178 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
179 append_operation(desc, type | OP_ALG_AS_INITFINAL |
180 OP_ALG_DECRYPT);
181 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
182 set_jump_tgt_here(desc, jump_cmd);
183 append_operation(desc, type | OP_ALG_AS_INITFINAL |
184 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
185 set_jump_tgt_here(desc, uncond_jump_cmd);
186}
187
188/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800189 * For aead functions, read payload and write payload,
190 * both of which are specified in req->src and req->dst
191 */
192static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
193{
Horia Geantaae4a8252014-03-14 17:46:52 +0200194 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800195 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
196 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
Yuan Kang1acebad2011-07-15 11:21:42 +0800197}
198
199/*
Yuan Kangacdca312011-07-15 11:21:42 +0800200 * For ablkcipher encrypt and decrypt, read from req->src and
201 * write to req->dst
202 */
203static inline void ablkcipher_append_src_dst(u32 *desc)
204{
Kim Phillips70d793c2012-06-22 19:42:35 -0500205 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
206 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
207 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
208 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
209 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kangacdca312011-07-15 11:21:42 +0800210}
211
212/*
Kim Phillips8e8ec592011-03-13 16:54:26 +0800213 * per-session context
214 */
215struct caam_ctx {
216 struct device *jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +0800217 u32 sh_desc_enc[DESC_MAX_USED_LEN];
218 u32 sh_desc_dec[DESC_MAX_USED_LEN];
219 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
220 dma_addr_t sh_desc_enc_dma;
221 dma_addr_t sh_desc_dec_dma;
222 dma_addr_t sh_desc_givenc_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800223 u32 class1_alg_type;
224 u32 class2_alg_type;
225 u32 alg_op;
Yuan Kang1acebad2011-07-15 11:21:42 +0800226 u8 key[CAAM_MAX_KEY_SIZE];
Yuan Kang885e9e22011-07-15 11:21:41 +0800227 dma_addr_t key_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800228 unsigned int enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800229 unsigned int split_key_len;
230 unsigned int split_key_pad_len;
231 unsigned int authsize;
232};
233
Yuan Kang1acebad2011-07-15 11:21:42 +0800234static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
Catalin Vasiledaebc462014-10-31 12:45:37 +0200235 int keys_fit_inline, bool is_rfc3686)
Yuan Kang1acebad2011-07-15 11:21:42 +0800236{
Catalin Vasiledaebc462014-10-31 12:45:37 +0200237 u32 *nonce;
238 unsigned int enckeylen = ctx->enckeylen;
239
240 /*
241 * RFC3686 specific:
242 * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
243 * | enckeylen = encryption key size + nonce size
244 */
245 if (is_rfc3686)
246 enckeylen -= CTR_RFC3686_NONCE_SIZE;
247
Yuan Kang1acebad2011-07-15 11:21:42 +0800248 if (keys_fit_inline) {
249 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
250 ctx->split_key_len, CLASS_2 |
251 KEY_DEST_MDHA_SPLIT | KEY_ENC);
252 append_key_as_imm(desc, (void *)ctx->key +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200253 ctx->split_key_pad_len, enckeylen,
254 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
Yuan Kang1acebad2011-07-15 11:21:42 +0800255 } else {
256 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
257 KEY_DEST_MDHA_SPLIT | KEY_ENC);
258 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
Catalin Vasiledaebc462014-10-31 12:45:37 +0200259 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
260 }
261
262 /* Load Counter into CONTEXT1 reg */
263 if (is_rfc3686) {
264 nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
265 enckeylen);
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +0300266 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
267 LDST_CLASS_IND_CCB |
268 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
Catalin Vasiledaebc462014-10-31 12:45:37 +0200269 append_move(desc,
270 MOVE_SRC_OUTFIFO |
271 MOVE_DEST_CLASS1CTX |
272 (16 << MOVE_OFFSET_SHIFT) |
273 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800274 }
275}
276
277static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
Catalin Vasiledaebc462014-10-31 12:45:37 +0200278 int keys_fit_inline, bool is_rfc3686)
Yuan Kang1acebad2011-07-15 11:21:42 +0800279{
280 u32 *key_jump_cmd;
281
Catalin Vasiledaebc462014-10-31 12:45:37 +0200282 /* Note: Context registers are saved. */
283 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kang1acebad2011-07-15 11:21:42 +0800284
285 /* Skip if already shared */
286 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
287 JUMP_COND_SHRD);
288
Catalin Vasiledaebc462014-10-31 12:45:37 +0200289 append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800290
291 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kang1acebad2011-07-15 11:21:42 +0800292}
293
Horia Geantaae4a8252014-03-14 17:46:52 +0200294static int aead_null_set_sh_desc(struct crypto_aead *aead)
295{
Horia Geantaae4a8252014-03-14 17:46:52 +0200296 struct caam_ctx *ctx = crypto_aead_ctx(aead);
297 struct device *jrdev = ctx->jrdev;
298 bool keys_fit_inline = false;
299 u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
300 u32 *desc;
301
302 /*
303 * Job Descriptor and Shared Descriptors
304 * must all fit into the 64-word Descriptor h/w Buffer
305 */
Herbert Xu479bcc72015-07-30 17:53:17 +0800306 if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
Horia Geantaae4a8252014-03-14 17:46:52 +0200307 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
308 keys_fit_inline = true;
309
Herbert Xu479bcc72015-07-30 17:53:17 +0800310 /* aead_encrypt shared descriptor */
Horia Geantaae4a8252014-03-14 17:46:52 +0200311 desc = ctx->sh_desc_enc;
312
313 init_sh_desc(desc, HDR_SHARE_SERIAL);
314
315 /* Skip if already shared */
316 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
317 JUMP_COND_SHRD);
318 if (keys_fit_inline)
319 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
320 ctx->split_key_len, CLASS_2 |
321 KEY_DEST_MDHA_SPLIT | KEY_ENC);
322 else
323 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
324 KEY_DEST_MDHA_SPLIT | KEY_ENC);
325 set_jump_tgt_here(desc, key_jump_cmd);
326
Herbert Xu479bcc72015-07-30 17:53:17 +0800327 /* assoclen + cryptlen = seqinlen */
328 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
Horia Geantaae4a8252014-03-14 17:46:52 +0200329
Herbert Xu479bcc72015-07-30 17:53:17 +0800330 /* Prepare to read and write cryptlen + assoclen bytes */
Horia Geantaae4a8252014-03-14 17:46:52 +0200331 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
332 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
333
334 /*
335 * MOVE_LEN opcode is not available in all SEC HW revisions,
336 * thus need to do some magic, i.e. self-patch the descriptor
337 * buffer.
338 */
339 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
340 MOVE_DEST_MATH3 |
341 (0x6 << MOVE_LEN_SHIFT));
342 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
343 MOVE_DEST_DESCBUF |
344 MOVE_WAITCOMP |
345 (0x8 << MOVE_LEN_SHIFT));
346
347 /* Class 2 operation */
348 append_operation(desc, ctx->class2_alg_type |
349 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
350
351 /* Read and write cryptlen bytes */
352 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
353
354 set_move_tgt_here(desc, read_move_cmd);
355 set_move_tgt_here(desc, write_move_cmd);
356 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
357 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
358 MOVE_AUX_LS);
359
360 /* Write ICV */
361 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
362 LDST_SRCDST_BYTE_CONTEXT);
363
364 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
365 desc_bytes(desc),
366 DMA_TO_DEVICE);
367 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
368 dev_err(jrdev, "unable to map shared descriptor\n");
369 return -ENOMEM;
370 }
371#ifdef DEBUG
372 print_hex_dump(KERN_ERR,
373 "aead null enc shdesc@"__stringify(__LINE__)": ",
374 DUMP_PREFIX_ADDRESS, 16, 4, desc,
375 desc_bytes(desc), 1);
376#endif
377
378 /*
379 * Job Descriptor and Shared Descriptors
380 * must all fit into the 64-word Descriptor h/w Buffer
381 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500382 keys_fit_inline = false;
Horia Geantaae4a8252014-03-14 17:46:52 +0200383 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
384 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
385 keys_fit_inline = true;
386
387 desc = ctx->sh_desc_dec;
388
Herbert Xu479bcc72015-07-30 17:53:17 +0800389 /* aead_decrypt shared descriptor */
Horia Geantaae4a8252014-03-14 17:46:52 +0200390 init_sh_desc(desc, HDR_SHARE_SERIAL);
391
392 /* Skip if already shared */
393 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
394 JUMP_COND_SHRD);
395 if (keys_fit_inline)
396 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
397 ctx->split_key_len, CLASS_2 |
398 KEY_DEST_MDHA_SPLIT | KEY_ENC);
399 else
400 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
401 KEY_DEST_MDHA_SPLIT | KEY_ENC);
402 set_jump_tgt_here(desc, key_jump_cmd);
403
404 /* Class 2 operation */
405 append_operation(desc, ctx->class2_alg_type |
406 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
407
Herbert Xu479bcc72015-07-30 17:53:17 +0800408 /* assoclen + cryptlen = seqoutlen */
Horia Geantaae4a8252014-03-14 17:46:52 +0200409 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Horia Geantaae4a8252014-03-14 17:46:52 +0200410
Herbert Xu479bcc72015-07-30 17:53:17 +0800411 /* Prepare to read and write cryptlen + assoclen bytes */
Horia Geantaae4a8252014-03-14 17:46:52 +0200412 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
413 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
414
415 /*
416 * MOVE_LEN opcode is not available in all SEC HW revisions,
417 * thus need to do some magic, i.e. self-patch the descriptor
418 * buffer.
419 */
420 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
421 MOVE_DEST_MATH2 |
422 (0x6 << MOVE_LEN_SHIFT));
423 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
424 MOVE_DEST_DESCBUF |
425 MOVE_WAITCOMP |
426 (0x8 << MOVE_LEN_SHIFT));
427
428 /* Read and write cryptlen bytes */
429 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
430
431 /*
432 * Insert a NOP here, since we need at least 4 instructions between
433 * code patching the descriptor buffer and the location being patched.
434 */
435 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
436 set_jump_tgt_here(desc, jump_cmd);
437
438 set_move_tgt_here(desc, read_move_cmd);
439 set_move_tgt_here(desc, write_move_cmd);
440 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
441 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
442 MOVE_AUX_LS);
443 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
444
445 /* Load ICV */
446 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
447 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
448
449 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
450 desc_bytes(desc),
451 DMA_TO_DEVICE);
452 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
453 dev_err(jrdev, "unable to map shared descriptor\n");
454 return -ENOMEM;
455 }
456#ifdef DEBUG
457 print_hex_dump(KERN_ERR,
458 "aead null dec shdesc@"__stringify(__LINE__)": ",
459 DUMP_PREFIX_ADDRESS, 16, 4, desc,
460 desc_bytes(desc), 1);
461#endif
462
463 return 0;
464}
465
Yuan Kang1acebad2011-07-15 11:21:42 +0800466static int aead_set_sh_desc(struct crypto_aead *aead)
467{
Herbert Xu479bcc72015-07-30 17:53:17 +0800468 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
469 struct caam_aead_alg, aead);
Herbert Xuadd86d52015-05-11 17:47:50 +0800470 unsigned int ivsize = crypto_aead_ivsize(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +0800471 struct caam_ctx *ctx = crypto_aead_ctx(aead);
472 struct device *jrdev = ctx->jrdev;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200473 bool keys_fit_inline;
Yuan Kang1acebad2011-07-15 11:21:42 +0800474 u32 geniv, moveiv;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200475 u32 ctx1_iv_off = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +0800476 u32 *desc;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200477 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
478 OP_ALG_AAI_CTR_MOD128);
Herbert Xu479bcc72015-07-30 17:53:17 +0800479 const bool is_rfc3686 = alg->caam.rfc3686;
Yuan Kang1acebad2011-07-15 11:21:42 +0800480
Horia Geantă2fdea252016-08-04 20:02:47 +0300481 if (!ctx->authsize)
482 return 0;
483
Horia Geantaae4a8252014-03-14 17:46:52 +0200484 /* NULL encryption / decryption */
485 if (!ctx->enckeylen)
486 return aead_null_set_sh_desc(aead);
487
Yuan Kang1acebad2011-07-15 11:21:42 +0800488 /*
Catalin Vasiledaebc462014-10-31 12:45:37 +0200489 * AES-CTR needs to load IV in CONTEXT1 reg
490 * at an offset of 128bits (16bytes)
491 * CONTEXT1[255:128] = IV
492 */
493 if (ctr_mode)
494 ctx1_iv_off = 16;
495
496 /*
497 * RFC3686 specific:
498 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
499 */
500 if (is_rfc3686)
501 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
502
Herbert Xu479bcc72015-07-30 17:53:17 +0800503 if (alg->caam.geniv)
504 goto skip_enc;
505
Catalin Vasiledaebc462014-10-31 12:45:37 +0200506 /*
Yuan Kang1acebad2011-07-15 11:21:42 +0800507 * Job Descriptor and Shared Descriptors
508 * must all fit into the 64-word Descriptor h/w Buffer
509 */
Catalin Vasiledaebc462014-10-31 12:45:37 +0200510 keys_fit_inline = false;
Herbert Xu479bcc72015-07-30 17:53:17 +0800511 if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200512 ctx->split_key_pad_len + ctx->enckeylen +
513 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
Yuan Kang1acebad2011-07-15 11:21:42 +0800514 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800515 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800516
Herbert Xu479bcc72015-07-30 17:53:17 +0800517 /* aead_encrypt shared descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +0800518 desc = ctx->sh_desc_enc;
519
Catalin Vasiledaebc462014-10-31 12:45:37 +0200520 /* Note: Context registers are saved. */
521 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800522
523 /* Class 2 operation */
524 append_operation(desc, ctx->class2_alg_type |
525 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
526
Herbert Xu479bcc72015-07-30 17:53:17 +0800527 /* Read and write assoclen bytes */
528 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
529 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
Yuan Kang1acebad2011-07-15 11:21:42 +0800530
Herbert Xu479bcc72015-07-30 17:53:17 +0800531 /* Skip assoc data */
532 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800533
534 /* read assoc before reading payload */
535 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
Herbert Xu479bcc72015-07-30 17:53:17 +0800536 FIFOLDST_VLF);
Catalin Vasiledaebc462014-10-31 12:45:37 +0200537
538 /* Load Counter into CONTEXT1 reg */
539 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +0300540 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
541 LDST_SRCDST_BYTE_CONTEXT |
542 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
543 LDST_OFFSET_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800544
545 /* Class 1 operation */
546 append_operation(desc, ctx->class1_alg_type |
547 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
548
549 /* Read and write cryptlen bytes */
Herbert Xu479bcc72015-07-30 17:53:17 +0800550 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
551 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Yuan Kang1acebad2011-07-15 11:21:42 +0800552 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
553
554 /* Write ICV */
555 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
556 LDST_SRCDST_BYTE_CONTEXT);
557
558 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
559 desc_bytes(desc),
560 DMA_TO_DEVICE);
561 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
562 dev_err(jrdev, "unable to map shared descriptor\n");
563 return -ENOMEM;
564 }
565#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300566 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800567 DUMP_PREFIX_ADDRESS, 16, 4, desc,
568 desc_bytes(desc), 1);
569#endif
570
Herbert Xu479bcc72015-07-30 17:53:17 +0800571skip_enc:
Yuan Kang1acebad2011-07-15 11:21:42 +0800572 /*
573 * Job Descriptor and Shared Descriptors
574 * must all fit into the 64-word Descriptor h/w Buffer
575 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500576 keys_fit_inline = false;
Herbert Xu479bcc72015-07-30 17:53:17 +0800577 if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200578 ctx->split_key_pad_len + ctx->enckeylen +
579 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
Yuan Kang1acebad2011-07-15 11:21:42 +0800580 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800581 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800582
Herbert Xu479bcc72015-07-30 17:53:17 +0800583 /* aead_decrypt shared descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +0800584 desc = ctx->sh_desc_dec;
585
Catalin Vasiledaebc462014-10-31 12:45:37 +0200586 /* Note: Context registers are saved. */
587 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800588
589 /* Class 2 operation */
590 append_operation(desc, ctx->class2_alg_type |
591 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
592
Herbert Xu479bcc72015-07-30 17:53:17 +0800593 /* Read and write assoclen bytes */
594 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
Horia Geantă8b18e232016-08-29 14:52:14 +0300595 if (alg->caam.geniv)
596 append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
597 else
598 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
Herbert Xu479bcc72015-07-30 17:53:17 +0800599
600 /* Skip assoc data */
601 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800602
603 /* read assoc before reading payload */
604 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
605 KEY_VLF);
606
Horia Geantă8b18e232016-08-29 14:52:14 +0300607 if (alg->caam.geniv) {
608 append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
609 LDST_SRCDST_BYTE_CONTEXT |
610 (ctx1_iv_off << LDST_OFFSET_SHIFT));
611 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
612 (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
613 }
614
Catalin Vasiledaebc462014-10-31 12:45:37 +0200615 /* Load Counter into CONTEXT1 reg */
616 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +0300617 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
618 LDST_SRCDST_BYTE_CONTEXT |
619 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
620 LDST_OFFSET_SHIFT));
Catalin Vasiledaebc462014-10-31 12:45:37 +0200621
622 /* Choose operation */
623 if (ctr_mode)
624 append_operation(desc, ctx->class1_alg_type |
625 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
626 else
627 append_dec_op1(desc, ctx->class1_alg_type);
Yuan Kang1acebad2011-07-15 11:21:42 +0800628
629 /* Read and write cryptlen bytes */
Herbert Xu479bcc72015-07-30 17:53:17 +0800630 append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
631 append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Yuan Kang1acebad2011-07-15 11:21:42 +0800632 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
633
634 /* Load ICV */
635 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
636 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
Yuan Kang1acebad2011-07-15 11:21:42 +0800637
638 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
639 desc_bytes(desc),
640 DMA_TO_DEVICE);
641 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
642 dev_err(jrdev, "unable to map shared descriptor\n");
643 return -ENOMEM;
644 }
645#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300646 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800647 DUMP_PREFIX_ADDRESS, 16, 4, desc,
648 desc_bytes(desc), 1);
649#endif
650
Herbert Xu479bcc72015-07-30 17:53:17 +0800651 if (!alg->caam.geniv)
652 goto skip_givenc;
653
Yuan Kang1acebad2011-07-15 11:21:42 +0800654 /*
655 * Job Descriptor and Shared Descriptors
656 * must all fit into the 64-word Descriptor h/w Buffer
657 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500658 keys_fit_inline = false;
Herbert Xu479bcc72015-07-30 17:53:17 +0800659 if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200660 ctx->split_key_pad_len + ctx->enckeylen +
661 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
Yuan Kang1acebad2011-07-15 11:21:42 +0800662 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800663 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800664
665 /* aead_givencrypt shared descriptor */
Horia Geantă1d2d87e2016-08-04 20:02:46 +0300666 desc = ctx->sh_desc_enc;
Yuan Kang1acebad2011-07-15 11:21:42 +0800667
Catalin Vasiledaebc462014-10-31 12:45:37 +0200668 /* Note: Context registers are saved. */
669 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800670
Herbert Xu479bcc72015-07-30 17:53:17 +0800671 if (is_rfc3686)
672 goto copy_iv;
673
Yuan Kang1acebad2011-07-15 11:21:42 +0800674 /* Generate IV */
675 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
676 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
Herbert Xuadd86d52015-05-11 17:47:50 +0800677 NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
Yuan Kang1acebad2011-07-15 11:21:42 +0800678 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
679 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
680 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
Catalin Vasiledaebc462014-10-31 12:45:37 +0200681 append_move(desc, MOVE_WAITCOMP |
682 MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
683 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
Herbert Xuadd86d52015-05-11 17:47:50 +0800684 (ivsize << MOVE_LEN_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800685 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
686
Herbert Xu479bcc72015-07-30 17:53:17 +0800687copy_iv:
Yuan Kang1acebad2011-07-15 11:21:42 +0800688 /* Copy IV to class 1 context */
Catalin Vasiledaebc462014-10-31 12:45:37 +0200689 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
690 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
Herbert Xuadd86d52015-05-11 17:47:50 +0800691 (ivsize << MOVE_LEN_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800692
693 /* Return to encryption */
694 append_operation(desc, ctx->class2_alg_type |
695 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
696
Herbert Xu479bcc72015-07-30 17:53:17 +0800697 /* Read and write assoclen bytes */
698 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
699 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
700
Horia Geantă1d2d87e2016-08-04 20:02:46 +0300701 /* ivsize + cryptlen = seqoutlen - authsize */
702 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
703
Herbert Xu479bcc72015-07-30 17:53:17 +0800704 /* Skip assoc data */
705 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800706
707 /* read assoc before reading payload */
708 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
709 KEY_VLF);
710
Catalin Vasiledaebc462014-10-31 12:45:37 +0200711 /* Copy iv from outfifo to class 2 fifo */
Yuan Kang1acebad2011-07-15 11:21:42 +0800712 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
Herbert Xuadd86d52015-05-11 17:47:50 +0800713 NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
Yuan Kang1acebad2011-07-15 11:21:42 +0800714 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
715 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
Herbert Xuadd86d52015-05-11 17:47:50 +0800716 append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
Yuan Kang1acebad2011-07-15 11:21:42 +0800717 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
718
Catalin Vasiledaebc462014-10-31 12:45:37 +0200719 /* Load Counter into CONTEXT1 reg */
720 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +0300721 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
722 LDST_SRCDST_BYTE_CONTEXT |
723 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
724 LDST_OFFSET_SHIFT));
Catalin Vasiledaebc462014-10-31 12:45:37 +0200725
Yuan Kang1acebad2011-07-15 11:21:42 +0800726 /* Class 1 operation */
727 append_operation(desc, ctx->class1_alg_type |
728 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
729
730 /* Will write ivsize + cryptlen */
731 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
732
733 /* Not need to reload iv */
Herbert Xuadd86d52015-05-11 17:47:50 +0800734 append_seq_fifo_load(desc, ivsize,
Yuan Kang1acebad2011-07-15 11:21:42 +0800735 FIFOLD_CLASS_SKIP);
736
737 /* Will read cryptlen */
738 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
739 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
740
741 /* Write ICV */
742 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
743 LDST_SRCDST_BYTE_CONTEXT);
744
Herbert Xu479bcc72015-07-30 17:53:17 +0800745 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
746 desc_bytes(desc),
747 DMA_TO_DEVICE);
Horia Geantă1d2d87e2016-08-04 20:02:46 +0300748 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
Yuan Kang1acebad2011-07-15 11:21:42 +0800749 dev_err(jrdev, "unable to map shared descriptor\n");
750 return -ENOMEM;
751 }
752#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300753 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800754 DUMP_PREFIX_ADDRESS, 16, 4, desc,
755 desc_bytes(desc), 1);
756#endif
757
Herbert Xu479bcc72015-07-30 17:53:17 +0800758skip_givenc:
Yuan Kang1acebad2011-07-15 11:21:42 +0800759 return 0;
760}
761
Yuan Kang0e479302011-07-15 11:21:41 +0800762static int aead_setauthsize(struct crypto_aead *authenc,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800763 unsigned int authsize)
764{
765 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
766
767 ctx->authsize = authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +0800768 aead_set_sh_desc(authenc);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800769
770 return 0;
771}
772
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300773static int gcm_set_sh_desc(struct crypto_aead *aead)
774{
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300775 struct caam_ctx *ctx = crypto_aead_ctx(aead);
776 struct device *jrdev = ctx->jrdev;
777 bool keys_fit_inline = false;
778 u32 *key_jump_cmd, *zero_payload_jump_cmd,
779 *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
780 u32 *desc;
781
782 if (!ctx->enckeylen || !ctx->authsize)
783 return 0;
784
785 /*
786 * AES GCM encrypt shared descriptor
787 * Job Descriptor and Shared Descriptor
788 * must fit into the 64-word Descriptor h/w Buffer
789 */
Herbert Xuf2147b82015-06-16 13:54:23 +0800790 if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300791 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
792 keys_fit_inline = true;
793
794 desc = ctx->sh_desc_enc;
795
796 init_sh_desc(desc, HDR_SHARE_SERIAL);
797
798 /* skip key loading if they are loaded due to sharing */
799 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
800 JUMP_COND_SHRD | JUMP_COND_SELF);
801 if (keys_fit_inline)
802 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
803 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
804 else
805 append_key(desc, ctx->key_dma, ctx->enckeylen,
806 CLASS_1 | KEY_DEST_CLASS_REG);
807 set_jump_tgt_here(desc, key_jump_cmd);
808
809 /* class 1 operation */
810 append_operation(desc, ctx->class1_alg_type |
811 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
812
Herbert Xuf2147b82015-06-16 13:54:23 +0800813 /* if assoclen + cryptlen is ZERO, skip to ICV write */
814 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
815 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
816 JUMP_COND_MATH_Z);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300817
818 /* if assoclen is ZERO, skip reading the assoc data */
Herbert Xuf2147b82015-06-16 13:54:23 +0800819 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300820 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
Herbert Xuf2147b82015-06-16 13:54:23 +0800821 JUMP_COND_MATH_Z);
822
823 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
824
825 /* skip assoc data */
826 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
827
828 /* cryptlen = seqinlen - assoclen */
829 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
830
831 /* if cryptlen is ZERO jump to zero-payload commands */
832 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
833 JUMP_COND_MATH_Z);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300834
835 /* read assoc data */
836 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
837 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
838 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
839
Herbert Xuf2147b82015-06-16 13:54:23 +0800840 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300841
842 /* write encrypted data */
843 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
844
845 /* read payload data */
846 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
847 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
848
849 /* jump the zero-payload commands */
Herbert Xuf2147b82015-06-16 13:54:23 +0800850 append_jump(desc, JUMP_TEST_ALL | 2);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300851
852 /* zero-payload commands */
853 set_jump_tgt_here(desc, zero_payload_jump_cmd);
854
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300855 /* read assoc data */
856 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
857 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
858
Herbert Xuf2147b82015-06-16 13:54:23 +0800859 /* There is no input data */
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300860 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300861
862 /* write ICV */
863 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
864 LDST_SRCDST_BYTE_CONTEXT);
865
866 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
867 desc_bytes(desc),
868 DMA_TO_DEVICE);
869 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
870 dev_err(jrdev, "unable to map shared descriptor\n");
871 return -ENOMEM;
872 }
873#ifdef DEBUG
874 print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
875 DUMP_PREFIX_ADDRESS, 16, 4, desc,
876 desc_bytes(desc), 1);
877#endif
878
879 /*
880 * Job Descriptor and Shared Descriptors
881 * must all fit into the 64-word Descriptor h/w Buffer
882 */
883 keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +0800884 if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300885 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
886 keys_fit_inline = true;
887
888 desc = ctx->sh_desc_dec;
889
890 init_sh_desc(desc, HDR_SHARE_SERIAL);
891
892 /* skip key loading if they are loaded due to sharing */
893 key_jump_cmd = append_jump(desc, JUMP_JSL |
894 JUMP_TEST_ALL | JUMP_COND_SHRD |
895 JUMP_COND_SELF);
896 if (keys_fit_inline)
897 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
898 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
899 else
900 append_key(desc, ctx->key_dma, ctx->enckeylen,
901 CLASS_1 | KEY_DEST_CLASS_REG);
902 set_jump_tgt_here(desc, key_jump_cmd);
903
904 /* class 1 operation */
905 append_operation(desc, ctx->class1_alg_type |
906 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
907
Herbert Xuf2147b82015-06-16 13:54:23 +0800908 /* if assoclen is ZERO, skip reading the assoc data */
909 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300910 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
Herbert Xuf2147b82015-06-16 13:54:23 +0800911 JUMP_COND_MATH_Z);
912
913 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
914
915 /* skip assoc data */
916 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
917
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300918 /* read assoc data */
919 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
920 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
Herbert Xuf2147b82015-06-16 13:54:23 +0800921
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300922 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
923
Herbert Xuf2147b82015-06-16 13:54:23 +0800924 /* cryptlen = seqoutlen - assoclen */
925 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
926
927 /* jump to zero-payload command if cryptlen is zero */
928 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
929 JUMP_COND_MATH_Z);
930
931 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300932
933 /* store encrypted data */
934 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
935
936 /* read payload data */
937 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
938 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
939
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300940 /* zero-payload command */
941 set_jump_tgt_here(desc, zero_payload_jump_cmd);
942
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300943 /* read ICV */
944 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
945 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
946
947 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
948 desc_bytes(desc),
949 DMA_TO_DEVICE);
950 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
951 dev_err(jrdev, "unable to map shared descriptor\n");
952 return -ENOMEM;
953 }
954#ifdef DEBUG
955 print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
956 DUMP_PREFIX_ADDRESS, 16, 4, desc,
957 desc_bytes(desc), 1);
958#endif
959
960 return 0;
961}
962
963static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
964{
965 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
966
967 ctx->authsize = authsize;
968 gcm_set_sh_desc(authenc);
969
970 return 0;
971}
972
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300973static int rfc4106_set_sh_desc(struct crypto_aead *aead)
974{
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300975 struct caam_ctx *ctx = crypto_aead_ctx(aead);
976 struct device *jrdev = ctx->jrdev;
977 bool keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +0800978 u32 *key_jump_cmd;
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300979 u32 *desc;
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300980
981 if (!ctx->enckeylen || !ctx->authsize)
982 return 0;
983
984 /*
985 * RFC4106 encrypt shared descriptor
986 * Job Descriptor and Shared Descriptor
987 * must fit into the 64-word Descriptor h/w Buffer
988 */
Herbert Xuf2147b82015-06-16 13:54:23 +0800989 if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300990 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
991 keys_fit_inline = true;
992
993 desc = ctx->sh_desc_enc;
994
995 init_sh_desc(desc, HDR_SHARE_SERIAL);
996
997 /* Skip key loading if it is loaded due to sharing */
998 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
999 JUMP_COND_SHRD);
1000 if (keys_fit_inline)
1001 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1002 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1003 else
1004 append_key(desc, ctx->key_dma, ctx->enckeylen,
1005 CLASS_1 | KEY_DEST_CLASS_REG);
1006 set_jump_tgt_here(desc, key_jump_cmd);
1007
1008 /* Class 1 operation */
1009 append_operation(desc, ctx->class1_alg_type |
1010 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1011
Herbert Xu46218752015-07-09 07:17:33 +08001012 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001013 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1014
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001015 /* Read assoc data */
1016 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1017 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1018
Herbert Xu46218752015-07-09 07:17:33 +08001019 /* Skip IV */
1020 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
Herbert Xuf2147b82015-06-16 13:54:23 +08001021
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001022 /* Will read cryptlen bytes */
Herbert Xuf2147b82015-06-16 13:54:23 +08001023 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001024
Horia Geant?4aad0cc2015-07-30 22:11:18 +03001025 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1026 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001027
Herbert Xu46218752015-07-09 07:17:33 +08001028 /* Skip assoc data */
1029 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1030
1031 /* cryptlen = seqoutlen - assoclen */
Horia Geant?4aad0cc2015-07-30 22:11:18 +03001032 append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
Herbert Xu46218752015-07-09 07:17:33 +08001033
1034 /* Write encrypted data */
1035 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1036
Horia Geant?4aad0cc2015-07-30 22:11:18 +03001037 /* Read payload data */
1038 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1039 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1040
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001041 /* Write ICV */
1042 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1043 LDST_SRCDST_BYTE_CONTEXT);
1044
1045 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1046 desc_bytes(desc),
1047 DMA_TO_DEVICE);
1048 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1049 dev_err(jrdev, "unable to map shared descriptor\n");
1050 return -ENOMEM;
1051 }
1052#ifdef DEBUG
1053 print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
1054 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1055 desc_bytes(desc), 1);
1056#endif
1057
1058 /*
1059 * Job Descriptor and Shared Descriptors
1060 * must all fit into the 64-word Descriptor h/w Buffer
1061 */
1062 keys_fit_inline = false;
1063 if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
1064 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1065 keys_fit_inline = true;
1066
1067 desc = ctx->sh_desc_dec;
1068
1069 init_sh_desc(desc, HDR_SHARE_SERIAL);
1070
1071 /* Skip key loading if it is loaded due to sharing */
1072 key_jump_cmd = append_jump(desc, JUMP_JSL |
1073 JUMP_TEST_ALL | JUMP_COND_SHRD);
1074 if (keys_fit_inline)
1075 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1076 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1077 else
1078 append_key(desc, ctx->key_dma, ctx->enckeylen,
1079 CLASS_1 | KEY_DEST_CLASS_REG);
1080 set_jump_tgt_here(desc, key_jump_cmd);
1081
1082 /* Class 1 operation */
1083 append_operation(desc, ctx->class1_alg_type |
1084 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1085
Herbert Xu46218752015-07-09 07:17:33 +08001086 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
Herbert Xuf2147b82015-06-16 13:54:23 +08001087 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001088
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001089 /* Read assoc data */
1090 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1091 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1092
Herbert Xu46218752015-07-09 07:17:33 +08001093 /* Skip IV */
1094 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
Herbert Xuf2147b82015-06-16 13:54:23 +08001095
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001096 /* Will read cryptlen bytes */
Herbert Xu46218752015-07-09 07:17:33 +08001097 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001098
Horia Geant?4aad0cc2015-07-30 22:11:18 +03001099 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1100 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001101
Herbert Xu46218752015-07-09 07:17:33 +08001102 /* Skip assoc data */
1103 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1104
1105 /* Will write cryptlen bytes */
1106 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1107
1108 /* Store payload data */
1109 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1110
Horia Geant?4aad0cc2015-07-30 22:11:18 +03001111 /* Read encrypted data */
1112 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1113 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1114
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001115 /* Read ICV */
1116 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1117 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1118
1119 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1120 desc_bytes(desc),
1121 DMA_TO_DEVICE);
1122 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1123 dev_err(jrdev, "unable to map shared descriptor\n");
1124 return -ENOMEM;
1125 }
1126#ifdef DEBUG
1127 print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1128 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1129 desc_bytes(desc), 1);
1130#endif
1131
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001132 return 0;
1133}
1134
1135static int rfc4106_setauthsize(struct crypto_aead *authenc,
1136 unsigned int authsize)
1137{
1138 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1139
1140 ctx->authsize = authsize;
1141 rfc4106_set_sh_desc(authenc);
1142
1143 return 0;
1144}
1145
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001146static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1147{
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001148 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1149 struct device *jrdev = ctx->jrdev;
1150 bool keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +08001151 u32 *key_jump_cmd;
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001152 u32 *read_move_cmd, *write_move_cmd;
1153 u32 *desc;
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001154
1155 if (!ctx->enckeylen || !ctx->authsize)
1156 return 0;
1157
1158 /*
1159 * RFC4543 encrypt shared descriptor
1160 * Job Descriptor and Shared Descriptor
1161 * must fit into the 64-word Descriptor h/w Buffer
1162 */
Herbert Xuf2147b82015-06-16 13:54:23 +08001163 if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001164 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1165 keys_fit_inline = true;
1166
1167 desc = ctx->sh_desc_enc;
1168
1169 init_sh_desc(desc, HDR_SHARE_SERIAL);
1170
1171 /* Skip key loading if it is loaded due to sharing */
1172 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1173 JUMP_COND_SHRD);
1174 if (keys_fit_inline)
1175 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1176 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1177 else
1178 append_key(desc, ctx->key_dma, ctx->enckeylen,
1179 CLASS_1 | KEY_DEST_CLASS_REG);
1180 set_jump_tgt_here(desc, key_jump_cmd);
1181
1182 /* Class 1 operation */
1183 append_operation(desc, ctx->class1_alg_type |
1184 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1185
Herbert Xuf2147b82015-06-16 13:54:23 +08001186 /* assoclen + cryptlen = seqinlen */
1187 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001188
1189 /*
1190 * MOVE_LEN opcode is not available in all SEC HW revisions,
1191 * thus need to do some magic, i.e. self-patch the descriptor
1192 * buffer.
1193 */
1194 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1195 (0x6 << MOVE_LEN_SHIFT));
1196 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1197 (0x8 << MOVE_LEN_SHIFT));
1198
Herbert Xuf2147b82015-06-16 13:54:23 +08001199 /* Will read assoclen + cryptlen bytes */
1200 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001201
Herbert Xuf2147b82015-06-16 13:54:23 +08001202 /* Will write assoclen + cryptlen bytes */
1203 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1204
1205 /* Read and write assoclen + cryptlen bytes */
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001206 aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1207
1208 set_move_tgt_here(desc, read_move_cmd);
1209 set_move_tgt_here(desc, write_move_cmd);
1210 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1211 /* Move payload data to OFIFO */
1212 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1213
1214 /* Write ICV */
1215 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1216 LDST_SRCDST_BYTE_CONTEXT);
1217
1218 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1219 desc_bytes(desc),
1220 DMA_TO_DEVICE);
1221 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1222 dev_err(jrdev, "unable to map shared descriptor\n");
1223 return -ENOMEM;
1224 }
1225#ifdef DEBUG
1226 print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1227 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1228 desc_bytes(desc), 1);
1229#endif
1230
1231 /*
1232 * Job Descriptor and Shared Descriptors
1233 * must all fit into the 64-word Descriptor h/w Buffer
1234 */
1235 keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +08001236 if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001237 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1238 keys_fit_inline = true;
1239
1240 desc = ctx->sh_desc_dec;
1241
1242 init_sh_desc(desc, HDR_SHARE_SERIAL);
1243
1244 /* Skip key loading if it is loaded due to sharing */
1245 key_jump_cmd = append_jump(desc, JUMP_JSL |
1246 JUMP_TEST_ALL | JUMP_COND_SHRD);
1247 if (keys_fit_inline)
1248 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1249 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1250 else
1251 append_key(desc, ctx->key_dma, ctx->enckeylen,
1252 CLASS_1 | KEY_DEST_CLASS_REG);
1253 set_jump_tgt_here(desc, key_jump_cmd);
1254
1255 /* Class 1 operation */
1256 append_operation(desc, ctx->class1_alg_type |
1257 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1258
Herbert Xuf2147b82015-06-16 13:54:23 +08001259 /* assoclen + cryptlen = seqoutlen */
1260 append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001261
1262 /*
1263 * MOVE_LEN opcode is not available in all SEC HW revisions,
1264 * thus need to do some magic, i.e. self-patch the descriptor
1265 * buffer.
1266 */
1267 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1268 (0x6 << MOVE_LEN_SHIFT));
1269 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1270 (0x8 << MOVE_LEN_SHIFT));
1271
Herbert Xuf2147b82015-06-16 13:54:23 +08001272 /* Will read assoclen + cryptlen bytes */
1273 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001274
Herbert Xuf2147b82015-06-16 13:54:23 +08001275 /* Will write assoclen + cryptlen bytes */
1276 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001277
1278 /* Store payload data */
1279 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1280
Herbert Xuf2147b82015-06-16 13:54:23 +08001281 /* In-snoop assoclen + cryptlen data */
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001282 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1283 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1284
1285 set_move_tgt_here(desc, read_move_cmd);
1286 set_move_tgt_here(desc, write_move_cmd);
1287 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1288 /* Move payload data to OFIFO */
1289 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1290 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1291
1292 /* Read ICV */
1293 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1294 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1295
1296 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1297 desc_bytes(desc),
1298 DMA_TO_DEVICE);
1299 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1300 dev_err(jrdev, "unable to map shared descriptor\n");
1301 return -ENOMEM;
1302 }
1303#ifdef DEBUG
1304 print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1305 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1306 desc_bytes(desc), 1);
1307#endif
1308
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001309 return 0;
1310}
1311
1312static int rfc4543_setauthsize(struct crypto_aead *authenc,
1313 unsigned int authsize)
1314{
1315 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1316
1317 ctx->authsize = authsize;
1318 rfc4543_set_sh_desc(authenc);
1319
1320 return 0;
1321}
1322
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001323static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1324 u32 authkeylen)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001325{
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001326 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1327 ctx->split_key_pad_len, key_in, authkeylen,
1328 ctx->alg_op);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001329}
1330
Yuan Kang0e479302011-07-15 11:21:41 +08001331static int aead_setkey(struct crypto_aead *aead,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001332 const u8 *key, unsigned int keylen)
1333{
1334 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1335 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1336 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1337 struct device *jrdev = ctx->jrdev;
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001338 struct crypto_authenc_keys keys;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001339 int ret = 0;
1340
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001341 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001342 goto badkey;
1343
1344 /* Pick class 2 key length from algorithm submask */
1345 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1346 OP_ALG_ALGSEL_SHIFT] * 2;
1347 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1348
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001349 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1350 goto badkey;
1351
Kim Phillips8e8ec592011-03-13 16:54:26 +08001352#ifdef DEBUG
1353 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001354 keys.authkeylen + keys.enckeylen, keys.enckeylen,
1355 keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001356 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1357 ctx->split_key_len, ctx->split_key_pad_len);
Alex Porosanu514df282013-08-14 18:56:45 +03001358 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001359 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1360#endif
Kim Phillips8e8ec592011-03-13 16:54:26 +08001361
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001362 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001363 if (ret) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001364 goto badkey;
1365 }
1366
1367 /* postpend encryption key to auth split key */
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001368 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001369
Yuan Kang885e9e22011-07-15 11:21:41 +08001370 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001371 keys.enckeylen, DMA_TO_DEVICE);
Yuan Kang885e9e22011-07-15 11:21:41 +08001372 if (dma_mapping_error(jrdev, ctx->key_dma)) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001373 dev_err(jrdev, "unable to map key i/o memory\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08001374 return -ENOMEM;
1375 }
1376#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001377 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001378 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001379 ctx->split_key_pad_len + keys.enckeylen, 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001380#endif
1381
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001382 ctx->enckeylen = keys.enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001383
Yuan Kang1acebad2011-07-15 11:21:42 +08001384 ret = aead_set_sh_desc(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001385 if (ret) {
Yuan Kang885e9e22011-07-15 11:21:41 +08001386 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001387 keys.enckeylen, DMA_TO_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001388 }
1389
1390 return ret;
1391badkey:
1392 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1393 return -EINVAL;
1394}
1395
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001396static int gcm_setkey(struct crypto_aead *aead,
1397 const u8 *key, unsigned int keylen)
1398{
1399 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1400 struct device *jrdev = ctx->jrdev;
1401 int ret = 0;
1402
1403#ifdef DEBUG
1404 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1405 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1406#endif
1407
1408 memcpy(ctx->key, key, keylen);
1409 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1410 DMA_TO_DEVICE);
1411 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1412 dev_err(jrdev, "unable to map key i/o memory\n");
1413 return -ENOMEM;
1414 }
1415 ctx->enckeylen = keylen;
1416
1417 ret = gcm_set_sh_desc(aead);
1418 if (ret) {
1419 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1420 DMA_TO_DEVICE);
1421 }
1422
1423 return ret;
1424}
1425
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001426static int rfc4106_setkey(struct crypto_aead *aead,
1427 const u8 *key, unsigned int keylen)
1428{
1429 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1430 struct device *jrdev = ctx->jrdev;
1431 int ret = 0;
1432
1433 if (keylen < 4)
1434 return -EINVAL;
1435
1436#ifdef DEBUG
1437 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1438 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1439#endif
1440
1441 memcpy(ctx->key, key, keylen);
1442
1443 /*
1444 * The last four bytes of the key material are used as the salt value
1445 * in the nonce. Update the AES key length.
1446 */
1447 ctx->enckeylen = keylen - 4;
1448
1449 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1450 DMA_TO_DEVICE);
1451 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1452 dev_err(jrdev, "unable to map key i/o memory\n");
1453 return -ENOMEM;
1454 }
1455
1456 ret = rfc4106_set_sh_desc(aead);
1457 if (ret) {
1458 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1459 DMA_TO_DEVICE);
1460 }
1461
1462 return ret;
1463}
1464
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001465static int rfc4543_setkey(struct crypto_aead *aead,
1466 const u8 *key, unsigned int keylen)
1467{
1468 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1469 struct device *jrdev = ctx->jrdev;
1470 int ret = 0;
1471
1472 if (keylen < 4)
1473 return -EINVAL;
1474
1475#ifdef DEBUG
1476 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1477 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1478#endif
1479
1480 memcpy(ctx->key, key, keylen);
1481
1482 /*
1483 * The last four bytes of the key material are used as the salt value
1484 * in the nonce. Update the AES key length.
1485 */
1486 ctx->enckeylen = keylen - 4;
1487
1488 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1489 DMA_TO_DEVICE);
1490 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1491 dev_err(jrdev, "unable to map key i/o memory\n");
1492 return -ENOMEM;
1493 }
1494
1495 ret = rfc4543_set_sh_desc(aead);
1496 if (ret) {
1497 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1498 DMA_TO_DEVICE);
1499 }
1500
1501 return ret;
1502}
1503
Yuan Kangacdca312011-07-15 11:21:42 +08001504static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1505 const u8 *key, unsigned int keylen)
1506{
1507 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001508 struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
1509 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
1510 const char *alg_name = crypto_tfm_alg_name(tfm);
Yuan Kangacdca312011-07-15 11:21:42 +08001511 struct device *jrdev = ctx->jrdev;
1512 int ret = 0;
Horia Geanta4464a7d2014-03-14 17:46:49 +02001513 u32 *key_jump_cmd;
Yuan Kangacdca312011-07-15 11:21:42 +08001514 u32 *desc;
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001515 u8 *nonce;
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001516 u32 geniv;
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001517 u32 ctx1_iv_off = 0;
1518 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
1519 OP_ALG_AAI_CTR_MOD128);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001520 const bool is_rfc3686 = (ctr_mode &&
1521 (strstr(alg_name, "rfc3686") != NULL));
Yuan Kangacdca312011-07-15 11:21:42 +08001522
1523#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001524 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001525 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1526#endif
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001527 /*
1528 * AES-CTR needs to load IV in CONTEXT1 reg
1529 * at an offset of 128bits (16bytes)
1530 * CONTEXT1[255:128] = IV
1531 */
1532 if (ctr_mode)
1533 ctx1_iv_off = 16;
Yuan Kangacdca312011-07-15 11:21:42 +08001534
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001535 /*
1536 * RFC3686 specific:
1537 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1538 * | *key = {KEY, NONCE}
1539 */
1540 if (is_rfc3686) {
1541 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1542 keylen -= CTR_RFC3686_NONCE_SIZE;
1543 }
1544
Yuan Kangacdca312011-07-15 11:21:42 +08001545 memcpy(ctx->key, key, keylen);
1546 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1547 DMA_TO_DEVICE);
1548 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1549 dev_err(jrdev, "unable to map key i/o memory\n");
1550 return -ENOMEM;
1551 }
1552 ctx->enckeylen = keylen;
1553
1554 /* ablkcipher_encrypt shared descriptor */
1555 desc = ctx->sh_desc_enc;
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001556 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kangacdca312011-07-15 11:21:42 +08001557 /* Skip if already shared */
1558 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1559 JUMP_COND_SHRD);
1560
1561 /* Load class1 key only */
1562 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1563 ctx->enckeylen, CLASS_1 |
1564 KEY_DEST_CLASS_REG);
1565
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001566 /* Load nonce into CONTEXT1 reg */
1567 if (is_rfc3686) {
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001568 nonce = (u8 *)key + keylen;
1569 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1570 LDST_CLASS_IND_CCB |
1571 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001572 append_move(desc, MOVE_WAITCOMP |
1573 MOVE_SRC_OUTFIFO |
1574 MOVE_DEST_CLASS1CTX |
1575 (16 << MOVE_OFFSET_SHIFT) |
1576 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1577 }
1578
Yuan Kangacdca312011-07-15 11:21:42 +08001579 set_jump_tgt_here(desc, key_jump_cmd);
1580
Yuan Kangacdca312011-07-15 11:21:42 +08001581 /* Load iv */
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001582 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001583 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
Yuan Kangacdca312011-07-15 11:21:42 +08001584
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001585 /* Load counter into CONTEXT1 reg */
1586 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001587 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1588 LDST_SRCDST_BYTE_CONTEXT |
1589 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1590 LDST_OFFSET_SHIFT));
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001591
Yuan Kangacdca312011-07-15 11:21:42 +08001592 /* Load operation */
1593 append_operation(desc, ctx->class1_alg_type |
1594 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1595
1596 /* Perform operation */
1597 ablkcipher_append_src_dst(desc);
1598
1599 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1600 desc_bytes(desc),
1601 DMA_TO_DEVICE);
1602 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1603 dev_err(jrdev, "unable to map shared descriptor\n");
1604 return -ENOMEM;
1605 }
1606#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001607 print_hex_dump(KERN_ERR,
1608 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001609 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1610 desc_bytes(desc), 1);
1611#endif
1612 /* ablkcipher_decrypt shared descriptor */
1613 desc = ctx->sh_desc_dec;
1614
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001615 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kangacdca312011-07-15 11:21:42 +08001616 /* Skip if already shared */
1617 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1618 JUMP_COND_SHRD);
1619
1620 /* Load class1 key only */
1621 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1622 ctx->enckeylen, CLASS_1 |
1623 KEY_DEST_CLASS_REG);
1624
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001625 /* Load nonce into CONTEXT1 reg */
1626 if (is_rfc3686) {
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001627 nonce = (u8 *)key + keylen;
1628 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1629 LDST_CLASS_IND_CCB |
1630 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001631 append_move(desc, MOVE_WAITCOMP |
1632 MOVE_SRC_OUTFIFO |
1633 MOVE_DEST_CLASS1CTX |
1634 (16 << MOVE_OFFSET_SHIFT) |
1635 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1636 }
1637
Yuan Kangacdca312011-07-15 11:21:42 +08001638 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kangacdca312011-07-15 11:21:42 +08001639
1640 /* load IV */
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001641 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001642 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
Yuan Kangacdca312011-07-15 11:21:42 +08001643
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001644 /* Load counter into CONTEXT1 reg */
1645 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001646 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1647 LDST_SRCDST_BYTE_CONTEXT |
1648 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1649 LDST_OFFSET_SHIFT));
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001650
Yuan Kangacdca312011-07-15 11:21:42 +08001651 /* Choose operation */
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001652 if (ctr_mode)
1653 append_operation(desc, ctx->class1_alg_type |
1654 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
1655 else
1656 append_dec_op1(desc, ctx->class1_alg_type);
Yuan Kangacdca312011-07-15 11:21:42 +08001657
1658 /* Perform operation */
1659 ablkcipher_append_src_dst(desc);
1660
Yuan Kangacdca312011-07-15 11:21:42 +08001661 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1662 desc_bytes(desc),
1663 DMA_TO_DEVICE);
Horia Geanta71c65f72014-07-11 15:34:48 +03001664 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
Yuan Kangacdca312011-07-15 11:21:42 +08001665 dev_err(jrdev, "unable to map shared descriptor\n");
1666 return -ENOMEM;
1667 }
1668
1669#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001670 print_hex_dump(KERN_ERR,
1671 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001672 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1673 desc_bytes(desc), 1);
1674#endif
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001675 /* ablkcipher_givencrypt shared descriptor */
1676 desc = ctx->sh_desc_givenc;
1677
1678 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1679 /* Skip if already shared */
1680 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1681 JUMP_COND_SHRD);
1682
1683 /* Load class1 key only */
1684 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1685 ctx->enckeylen, CLASS_1 |
1686 KEY_DEST_CLASS_REG);
1687
1688 /* Load Nonce into CONTEXT1 reg */
1689 if (is_rfc3686) {
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001690 nonce = (u8 *)key + keylen;
1691 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1692 LDST_CLASS_IND_CCB |
1693 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001694 append_move(desc, MOVE_WAITCOMP |
1695 MOVE_SRC_OUTFIFO |
1696 MOVE_DEST_CLASS1CTX |
1697 (16 << MOVE_OFFSET_SHIFT) |
1698 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1699 }
1700 set_jump_tgt_here(desc, key_jump_cmd);
1701
1702 /* Generate IV */
1703 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1704 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1705 NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
1706 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1707 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1708 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1709 append_move(desc, MOVE_WAITCOMP |
1710 MOVE_SRC_INFIFO |
1711 MOVE_DEST_CLASS1CTX |
1712 (crt->ivsize << MOVE_LEN_SHIFT) |
1713 (ctx1_iv_off << MOVE_OFFSET_SHIFT));
1714 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1715
1716 /* Copy generated IV to memory */
1717 append_seq_store(desc, crt->ivsize,
1718 LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1719 (ctx1_iv_off << LDST_OFFSET_SHIFT));
1720
1721 /* Load Counter into CONTEXT1 reg */
1722 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001723 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1724 LDST_SRCDST_BYTE_CONTEXT |
1725 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1726 LDST_OFFSET_SHIFT));
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001727
1728 if (ctx1_iv_off)
1729 append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
1730 (1 << JUMP_OFFSET_SHIFT));
1731
1732 /* Load operation */
1733 append_operation(desc, ctx->class1_alg_type |
1734 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1735
1736 /* Perform operation */
1737 ablkcipher_append_src_dst(desc);
1738
1739 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1740 desc_bytes(desc),
1741 DMA_TO_DEVICE);
1742 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1743 dev_err(jrdev, "unable to map shared descriptor\n");
1744 return -ENOMEM;
1745 }
1746#ifdef DEBUG
1747 print_hex_dump(KERN_ERR,
1748 "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
1749 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1750 desc_bytes(desc), 1);
1751#endif
Yuan Kangacdca312011-07-15 11:21:42 +08001752
1753 return ret;
1754}
1755
Catalin Vasilec6415a62015-10-02 13:13:18 +03001756static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1757 const u8 *key, unsigned int keylen)
1758{
1759 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1760 struct device *jrdev = ctx->jrdev;
1761 u32 *key_jump_cmd, *desc;
1762 __be64 sector_size = cpu_to_be64(512);
1763
1764 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
1765 crypto_ablkcipher_set_flags(ablkcipher,
1766 CRYPTO_TFM_RES_BAD_KEY_LEN);
1767 dev_err(jrdev, "key size mismatch\n");
1768 return -EINVAL;
1769 }
1770
1771 memcpy(ctx->key, key, keylen);
1772 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
1773 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1774 dev_err(jrdev, "unable to map key i/o memory\n");
1775 return -ENOMEM;
1776 }
1777 ctx->enckeylen = keylen;
1778
1779 /* xts_ablkcipher_encrypt shared descriptor */
1780 desc = ctx->sh_desc_enc;
1781 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1782 /* Skip if already shared */
1783 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1784 JUMP_COND_SHRD);
1785
1786 /* Load class1 keys only */
1787 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1788 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1789
1790 /* Load sector size with index 40 bytes (0x28) */
1791 append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1792 LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1793 append_data(desc, (void *)&sector_size, 8);
1794
1795 set_jump_tgt_here(desc, key_jump_cmd);
1796
1797 /*
1798 * create sequence for loading the sector index
1799 * Upper 8B of IV - will be used as sector index
1800 * Lower 8B of IV - will be discarded
1801 */
1802 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1803 LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1804 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1805
1806 /* Load operation */
1807 append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
1808 OP_ALG_ENCRYPT);
1809
1810 /* Perform operation */
1811 ablkcipher_append_src_dst(desc);
1812
1813 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1814 DMA_TO_DEVICE);
1815 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1816 dev_err(jrdev, "unable to map shared descriptor\n");
1817 return -ENOMEM;
1818 }
1819#ifdef DEBUG
1820 print_hex_dump(KERN_ERR,
1821 "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
1822 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1823#endif
1824
1825 /* xts_ablkcipher_decrypt shared descriptor */
1826 desc = ctx->sh_desc_dec;
1827
1828 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1829 /* Skip if already shared */
1830 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1831 JUMP_COND_SHRD);
1832
1833 /* Load class1 key only */
1834 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1835 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1836
1837 /* Load sector size with index 40 bytes (0x28) */
1838 append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1839 LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1840 append_data(desc, (void *)&sector_size, 8);
1841
1842 set_jump_tgt_here(desc, key_jump_cmd);
1843
1844 /*
1845 * create sequence for loading the sector index
1846 * Upper 8B of IV - will be used as sector index
1847 * Lower 8B of IV - will be discarded
1848 */
1849 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1850 LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1851 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1852
1853 /* Load operation */
1854 append_dec_op1(desc, ctx->class1_alg_type);
1855
1856 /* Perform operation */
1857 ablkcipher_append_src_dst(desc);
1858
1859 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1860 DMA_TO_DEVICE);
1861 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1862 dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
1863 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
1864 dev_err(jrdev, "unable to map shared descriptor\n");
1865 return -ENOMEM;
1866 }
1867#ifdef DEBUG
1868 print_hex_dump(KERN_ERR,
1869 "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
1870 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1871#endif
1872
1873 return 0;
1874}
1875
Kim Phillips8e8ec592011-03-13 16:54:26 +08001876/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001877 * aead_edesc - s/w-extended aead descriptor
1878 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
Kim Phillips8e8ec592011-03-13 16:54:26 +08001879 * @src_nents: number of segments in input scatterlist
1880 * @dst_nents: number of segments in output scatterlist
Yuan Kang1acebad2011-07-15 11:21:42 +08001881 * @iv_dma: dma address of iv for checking continuity and link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001882 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001883 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1884 * @sec4_sg_dma: bus physical mapped address of h/w link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001885 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1886 */
Yuan Kang0e479302011-07-15 11:21:41 +08001887struct aead_edesc {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001888 int assoc_nents;
1889 int src_nents;
1890 int dst_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001891 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001892 int sec4_sg_bytes;
1893 dma_addr_t sec4_sg_dma;
1894 struct sec4_sg_entry *sec4_sg;
Herbert Xuf2147b82015-06-16 13:54:23 +08001895 u32 hw_desc[];
Kim Phillips8e8ec592011-03-13 16:54:26 +08001896};
1897
Yuan Kangacdca312011-07-15 11:21:42 +08001898/*
1899 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1900 * @src_nents: number of segments in input scatterlist
1901 * @dst_nents: number of segments in output scatterlist
1902 * @iv_dma: dma address of iv for checking continuity and link table
1903 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001904 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1905 * @sec4_sg_dma: bus physical mapped address of h/w link table
Yuan Kangacdca312011-07-15 11:21:42 +08001906 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1907 */
1908struct ablkcipher_edesc {
1909 int src_nents;
1910 int dst_nents;
1911 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001912 int sec4_sg_bytes;
1913 dma_addr_t sec4_sg_dma;
1914 struct sec4_sg_entry *sec4_sg;
Yuan Kangacdca312011-07-15 11:21:42 +08001915 u32 hw_desc[0];
1916};
1917
Yuan Kang1acebad2011-07-15 11:21:42 +08001918static void caam_unmap(struct device *dev, struct scatterlist *src,
Yuan Kang643b39b2012-06-22 19:48:49 -05001919 struct scatterlist *dst, int src_nents,
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001920 int dst_nents,
Yuan Kanga299c832012-06-22 19:48:46 -05001921 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1922 int sec4_sg_bytes)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001923{
Yuan Kang643b39b2012-06-22 19:48:49 -05001924 if (dst != src) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001925 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
1926 dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001927 } else {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001928 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001929 }
1930
Yuan Kang1acebad2011-07-15 11:21:42 +08001931 if (iv_dma)
1932 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Yuan Kanga299c832012-06-22 19:48:46 -05001933 if (sec4_sg_bytes)
1934 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001935 DMA_TO_DEVICE);
1936}
1937
Yuan Kang1acebad2011-07-15 11:21:42 +08001938static void aead_unmap(struct device *dev,
1939 struct aead_edesc *edesc,
1940 struct aead_request *req)
1941{
Herbert Xuf2147b82015-06-16 13:54:23 +08001942 caam_unmap(dev, req->src, req->dst,
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001943 edesc->src_nents, edesc->dst_nents, 0, 0,
Herbert Xuf2147b82015-06-16 13:54:23 +08001944 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1945}
1946
Yuan Kangacdca312011-07-15 11:21:42 +08001947static void ablkcipher_unmap(struct device *dev,
1948 struct ablkcipher_edesc *edesc,
1949 struct ablkcipher_request *req)
1950{
1951 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1952 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1953
1954 caam_unmap(dev, req->src, req->dst,
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001955 edesc->src_nents, edesc->dst_nents,
1956 edesc->iv_dma, ivsize,
Yuan Kang643b39b2012-06-22 19:48:49 -05001957 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kangacdca312011-07-15 11:21:42 +08001958}
1959
Yuan Kang0e479302011-07-15 11:21:41 +08001960static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001961 void *context)
1962{
Yuan Kang0e479302011-07-15 11:21:41 +08001963 struct aead_request *req = context;
1964 struct aead_edesc *edesc;
Herbert Xuf2147b82015-06-16 13:54:23 +08001965
1966#ifdef DEBUG
1967 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1968#endif
1969
1970 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1971
1972 if (err)
1973 caam_jr_strstatus(jrdev, err);
1974
1975 aead_unmap(jrdev, edesc, req);
1976
1977 kfree(edesc);
1978
1979 aead_request_complete(req, err);
1980}
1981
Yuan Kang0e479302011-07-15 11:21:41 +08001982static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001983 void *context)
1984{
Yuan Kang0e479302011-07-15 11:21:41 +08001985 struct aead_request *req = context;
1986 struct aead_edesc *edesc;
Herbert Xuf2147b82015-06-16 13:54:23 +08001987
1988#ifdef DEBUG
1989 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1990#endif
1991
1992 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1993
1994 if (err)
1995 caam_jr_strstatus(jrdev, err);
1996
1997 aead_unmap(jrdev, edesc, req);
1998
1999 /*
2000 * verify hw auth check passed else return -EBADMSG
2001 */
2002 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
2003 err = -EBADMSG;
2004
2005 kfree(edesc);
2006
2007 aead_request_complete(req, err);
2008}
2009
Yuan Kangacdca312011-07-15 11:21:42 +08002010static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
2011 void *context)
2012{
2013 struct ablkcipher_request *req = context;
2014 struct ablkcipher_edesc *edesc;
2015#ifdef DEBUG
2016 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2017 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2018
2019 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2020#endif
2021
2022 edesc = (struct ablkcipher_edesc *)((char *)desc -
2023 offsetof(struct ablkcipher_edesc, hw_desc));
2024
Marek Vasutfa9659c2014-04-24 20:05:12 +02002025 if (err)
2026 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08002027
2028#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002029 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002030 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2031 edesc->src_nents > 1 ? 100 : ivsize, 1);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03002032 dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
2033 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
2034 edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
Yuan Kangacdca312011-07-15 11:21:42 +08002035#endif
2036
2037 ablkcipher_unmap(jrdev, edesc, req);
2038 kfree(edesc);
2039
2040 ablkcipher_request_complete(req, err);
2041}
2042
2043static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
2044 void *context)
2045{
2046 struct ablkcipher_request *req = context;
2047 struct ablkcipher_edesc *edesc;
2048#ifdef DEBUG
2049 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2050 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2051
2052 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2053#endif
2054
2055 edesc = (struct ablkcipher_edesc *)((char *)desc -
2056 offsetof(struct ablkcipher_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +02002057 if (err)
2058 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08002059
2060#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002061 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002062 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2063 ivsize, 1);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03002064 dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
2065 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
2066 edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
Yuan Kangacdca312011-07-15 11:21:42 +08002067#endif
2068
2069 ablkcipher_unmap(jrdev, edesc, req);
2070 kfree(edesc);
2071
2072 ablkcipher_request_complete(req, err);
2073}
2074
Kim Phillips8e8ec592011-03-13 16:54:26 +08002075/*
Yuan Kang1acebad2011-07-15 11:21:42 +08002076 * Fill in aead job descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08002077 */
Herbert Xuf2147b82015-06-16 13:54:23 +08002078static void init_aead_job(struct aead_request *req,
2079 struct aead_edesc *edesc,
2080 bool all_contig, bool encrypt)
2081{
2082 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2083 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2084 int authsize = ctx->authsize;
2085 u32 *desc = edesc->hw_desc;
2086 u32 out_options, in_options;
2087 dma_addr_t dst_dma, src_dma;
2088 int len, sec4_sg_index = 0;
2089 dma_addr_t ptr;
2090 u32 *sh_desc;
2091
2092 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
2093 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
2094
2095 len = desc_len(sh_desc);
2096 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2097
2098 if (all_contig) {
2099 src_dma = sg_dma_address(req->src);
2100 in_options = 0;
2101 } else {
2102 src_dma = edesc->sec4_sg_dma;
2103 sec4_sg_index += edesc->src_nents;
2104 in_options = LDST_SGF;
2105 }
2106
2107 append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
2108 in_options);
2109
2110 dst_dma = src_dma;
2111 out_options = in_options;
2112
2113 if (unlikely(req->src != req->dst)) {
2114 if (!edesc->dst_nents) {
2115 dst_dma = sg_dma_address(req->dst);
2116 } else {
2117 dst_dma = edesc->sec4_sg_dma +
2118 sec4_sg_index *
2119 sizeof(struct sec4_sg_entry);
2120 out_options = LDST_SGF;
2121 }
2122 }
2123
2124 if (encrypt)
2125 append_seq_out_ptr(desc, dst_dma,
2126 req->assoclen + req->cryptlen + authsize,
2127 out_options);
2128 else
2129 append_seq_out_ptr(desc, dst_dma,
2130 req->assoclen + req->cryptlen - authsize,
2131 out_options);
2132
2133 /* REG3 = assoclen */
2134 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
2135}
2136
2137static void init_gcm_job(struct aead_request *req,
2138 struct aead_edesc *edesc,
2139 bool all_contig, bool encrypt)
2140{
2141 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2142 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2143 unsigned int ivsize = crypto_aead_ivsize(aead);
2144 u32 *desc = edesc->hw_desc;
2145 bool generic_gcm = (ivsize == 12);
2146 unsigned int last;
2147
2148 init_aead_job(req, edesc, all_contig, encrypt);
2149
2150 /* BUG This should not be specific to generic GCM. */
2151 last = 0;
2152 if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
2153 last = FIFOLD_TYPE_LAST1;
2154
2155 /* Read GCM IV */
2156 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
2157 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
2158 /* Append Salt */
2159 if (!generic_gcm)
2160 append_data(desc, ctx->key + ctx->enckeylen, 4);
2161 /* Append IV */
2162 append_data(desc, req->iv, ivsize);
2163 /* End of blank commands */
2164}
2165
Herbert Xu479bcc72015-07-30 17:53:17 +08002166static void init_authenc_job(struct aead_request *req,
2167 struct aead_edesc *edesc,
2168 bool all_contig, bool encrypt)
Yuan Kang1acebad2011-07-15 11:21:42 +08002169{
2170 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Herbert Xu479bcc72015-07-30 17:53:17 +08002171 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
2172 struct caam_aead_alg, aead);
2173 unsigned int ivsize = crypto_aead_ivsize(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +08002174 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Herbert Xu479bcc72015-07-30 17:53:17 +08002175 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
2176 OP_ALG_AAI_CTR_MOD128);
2177 const bool is_rfc3686 = alg->caam.rfc3686;
Yuan Kang1acebad2011-07-15 11:21:42 +08002178 u32 *desc = edesc->hw_desc;
Herbert Xu479bcc72015-07-30 17:53:17 +08002179 u32 ivoffset = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002180
Herbert Xu479bcc72015-07-30 17:53:17 +08002181 /*
2182 * AES-CTR needs to load IV in CONTEXT1 reg
2183 * at an offset of 128bits (16bytes)
2184 * CONTEXT1[255:128] = IV
2185 */
2186 if (ctr_mode)
2187 ivoffset = 16;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002188
Herbert Xu479bcc72015-07-30 17:53:17 +08002189 /*
2190 * RFC3686 specific:
2191 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
2192 */
2193 if (is_rfc3686)
2194 ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002195
Herbert Xu479bcc72015-07-30 17:53:17 +08002196 init_aead_job(req, edesc, all_contig, encrypt);
Yuan Kang1acebad2011-07-15 11:21:42 +08002197
Horia Geantă8b18e232016-08-29 14:52:14 +03002198 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
Herbert Xu479bcc72015-07-30 17:53:17 +08002199 append_load_as_imm(desc, req->iv, ivsize,
2200 LDST_CLASS_1_CCB |
2201 LDST_SRCDST_BYTE_CONTEXT |
2202 (ivoffset << LDST_OFFSET_SHIFT));
Kim Phillips8e8ec592011-03-13 16:54:26 +08002203}
2204
2205/*
Yuan Kangacdca312011-07-15 11:21:42 +08002206 * Fill in ablkcipher job descriptor
2207 */
2208static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
2209 struct ablkcipher_edesc *edesc,
2210 struct ablkcipher_request *req,
2211 bool iv_contig)
2212{
2213 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2214 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2215 u32 *desc = edesc->hw_desc;
2216 u32 out_options = 0, in_options;
2217 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002218 int len, sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08002219
2220#ifdef DEBUG
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03002221 bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2222 CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
Alex Porosanu514df282013-08-14 18:56:45 +03002223 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002224 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2225 ivsize, 1);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03002226 printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
2227 dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
2228 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
2229 edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
Yuan Kangacdca312011-07-15 11:21:42 +08002230#endif
2231
2232 len = desc_len(sh_desc);
2233 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2234
2235 if (iv_contig) {
2236 src_dma = edesc->iv_dma;
2237 in_options = 0;
2238 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002239 src_dma = edesc->sec4_sg_dma;
Cristian Stoica35b82e52015-01-21 11:53:30 +02002240 sec4_sg_index += edesc->src_nents + 1;
Yuan Kangacdca312011-07-15 11:21:42 +08002241 in_options = LDST_SGF;
2242 }
2243 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
2244
2245 if (likely(req->src == req->dst)) {
2246 if (!edesc->src_nents && iv_contig) {
2247 dst_dma = sg_dma_address(req->src);
2248 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002249 dst_dma = edesc->sec4_sg_dma +
2250 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002251 out_options = LDST_SGF;
2252 }
2253 } else {
2254 if (!edesc->dst_nents) {
2255 dst_dma = sg_dma_address(req->dst);
2256 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002257 dst_dma = edesc->sec4_sg_dma +
2258 sec4_sg_index * sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002259 out_options = LDST_SGF;
2260 }
2261 }
2262 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
2263}
2264
2265/*
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002266 * Fill in ablkcipher givencrypt job descriptor
2267 */
2268static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
2269 struct ablkcipher_edesc *edesc,
2270 struct ablkcipher_request *req,
2271 bool iv_contig)
2272{
2273 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2274 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2275 u32 *desc = edesc->hw_desc;
2276 u32 out_options, in_options;
2277 dma_addr_t dst_dma, src_dma;
2278 int len, sec4_sg_index = 0;
2279
2280#ifdef DEBUG
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03002281 bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2282 CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002283 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
2284 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2285 ivsize, 1);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03002286 dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
2287 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
2288 edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002289#endif
2290
2291 len = desc_len(sh_desc);
2292 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2293
2294 if (!edesc->src_nents) {
2295 src_dma = sg_dma_address(req->src);
2296 in_options = 0;
2297 } else {
2298 src_dma = edesc->sec4_sg_dma;
2299 sec4_sg_index += edesc->src_nents;
2300 in_options = LDST_SGF;
2301 }
2302 append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
2303
2304 if (iv_contig) {
2305 dst_dma = edesc->iv_dma;
2306 out_options = 0;
2307 } else {
2308 dst_dma = edesc->sec4_sg_dma +
2309 sec4_sg_index * sizeof(struct sec4_sg_entry);
2310 out_options = LDST_SGF;
2311 }
2312 append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
2313}
2314
2315/*
Yuan Kang1acebad2011-07-15 11:21:42 +08002316 * allocate and map the aead extended descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08002317 */
Herbert Xuf2147b82015-06-16 13:54:23 +08002318static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
2319 int desc_bytes, bool *all_contig_ptr,
2320 bool encrypt)
2321{
2322 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2323 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2324 struct device *jrdev = ctx->jrdev;
2325 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2326 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2327 int src_nents, dst_nents = 0;
2328 struct aead_edesc *edesc;
2329 int sgc;
2330 bool all_contig = true;
Herbert Xuf2147b82015-06-16 13:54:23 +08002331 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2332 unsigned int authsize = ctx->authsize;
2333
2334 if (unlikely(req->dst != req->src)) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002335 src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
Herbert Xuf2147b82015-06-16 13:54:23 +08002336 dst_nents = sg_count(req->dst,
2337 req->assoclen + req->cryptlen +
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002338 (encrypt ? authsize : (-authsize)));
Herbert Xuf2147b82015-06-16 13:54:23 +08002339 } else {
2340 src_nents = sg_count(req->src,
2341 req->assoclen + req->cryptlen +
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002342 (encrypt ? authsize : 0));
Herbert Xuf2147b82015-06-16 13:54:23 +08002343 }
2344
2345 /* Check if data are contiguous. */
2346 all_contig = !src_nents;
2347 if (!all_contig) {
2348 src_nents = src_nents ? : 1;
2349 sec4_sg_len = src_nents;
2350 }
2351
2352 sec4_sg_len += dst_nents;
2353
2354 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2355
2356 /* allocate space for base edesc and hw desc commands, link tables */
Victoria Milhoandde20ae2015-08-05 11:28:39 -07002357 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2358 GFP_DMA | flags);
Herbert Xuf2147b82015-06-16 13:54:23 +08002359 if (!edesc) {
2360 dev_err(jrdev, "could not allocate extended descriptor\n");
2361 return ERR_PTR(-ENOMEM);
2362 }
2363
2364 if (likely(req->src == req->dst)) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002365 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2366 DMA_BIDIRECTIONAL);
Herbert Xuf2147b82015-06-16 13:54:23 +08002367 if (unlikely(!sgc)) {
2368 dev_err(jrdev, "unable to map source\n");
2369 kfree(edesc);
2370 return ERR_PTR(-ENOMEM);
2371 }
2372 } else {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002373 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2374 DMA_TO_DEVICE);
Herbert Xuf2147b82015-06-16 13:54:23 +08002375 if (unlikely(!sgc)) {
2376 dev_err(jrdev, "unable to map source\n");
2377 kfree(edesc);
2378 return ERR_PTR(-ENOMEM);
2379 }
2380
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002381 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2382 DMA_FROM_DEVICE);
Herbert Xuf2147b82015-06-16 13:54:23 +08002383 if (unlikely(!sgc)) {
2384 dev_err(jrdev, "unable to map destination\n");
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002385 dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
2386 DMA_TO_DEVICE);
Herbert Xuf2147b82015-06-16 13:54:23 +08002387 kfree(edesc);
2388 return ERR_PTR(-ENOMEM);
2389 }
2390 }
2391
2392 edesc->src_nents = src_nents;
Herbert Xuf2147b82015-06-16 13:54:23 +08002393 edesc->dst_nents = dst_nents;
Herbert Xuf2147b82015-06-16 13:54:23 +08002394 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2395 desc_bytes;
2396 *all_contig_ptr = all_contig;
2397
2398 sec4_sg_index = 0;
2399 if (!all_contig) {
Herbert Xu7793bda2015-06-18 14:25:56 +08002400 sg_to_sec4_sg_last(req->src, src_nents,
Herbert Xuf2147b82015-06-16 13:54:23 +08002401 edesc->sec4_sg + sec4_sg_index, 0);
2402 sec4_sg_index += src_nents;
2403 }
2404 if (dst_nents) {
2405 sg_to_sec4_sg_last(req->dst, dst_nents,
2406 edesc->sec4_sg + sec4_sg_index, 0);
2407 }
2408
2409 if (!sec4_sg_bytes)
2410 return edesc;
2411
2412 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2413 sec4_sg_bytes, DMA_TO_DEVICE);
2414 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2415 dev_err(jrdev, "unable to map S/G table\n");
2416 aead_unmap(jrdev, edesc, req);
2417 kfree(edesc);
2418 return ERR_PTR(-ENOMEM);
2419 }
2420
2421 edesc->sec4_sg_bytes = sec4_sg_bytes;
2422
2423 return edesc;
2424}
2425
2426static int gcm_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002427{
Yuan Kang0e479302011-07-15 11:21:41 +08002428 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002429 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002430 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2431 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002432 bool all_contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002433 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002434 int ret = 0;
2435
Kim Phillips8e8ec592011-03-13 16:54:26 +08002436 /* allocate extended descriptor */
Herbert Xuf2147b82015-06-16 13:54:23 +08002437 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002438 if (IS_ERR(edesc))
2439 return PTR_ERR(edesc);
2440
Yuan Kang1acebad2011-07-15 11:21:42 +08002441 /* Create and submit job descriptor */
Herbert Xuf2147b82015-06-16 13:54:23 +08002442 init_gcm_job(req, edesc, all_contig, true);
Yuan Kang1acebad2011-07-15 11:21:42 +08002443#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002444 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002445 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2446 desc_bytes(edesc->hw_desc), 1);
2447#endif
2448
Kim Phillips8e8ec592011-03-13 16:54:26 +08002449 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002450 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2451 if (!ret) {
2452 ret = -EINPROGRESS;
2453 } else {
2454 aead_unmap(jrdev, edesc, req);
2455 kfree(edesc);
2456 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002457
Yuan Kang1acebad2011-07-15 11:21:42 +08002458 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002459}
2460
Herbert Xu46218752015-07-09 07:17:33 +08002461static int ipsec_gcm_encrypt(struct aead_request *req)
2462{
2463 if (req->assoclen < 8)
2464 return -EINVAL;
2465
2466 return gcm_encrypt(req);
2467}
2468
Herbert Xu479bcc72015-07-30 17:53:17 +08002469static int aead_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002470{
Yuan Kang1acebad2011-07-15 11:21:42 +08002471 struct aead_edesc *edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08002472 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Yuan Kang0e479302011-07-15 11:21:41 +08002473 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2474 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002475 bool all_contig;
Yuan Kang0e479302011-07-15 11:21:41 +08002476 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002477 int ret = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08002478
2479 /* allocate extended descriptor */
Herbert Xu479bcc72015-07-30 17:53:17 +08002480 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2481 &all_contig, true);
Yuan Kang0e479302011-07-15 11:21:41 +08002482 if (IS_ERR(edesc))
2483 return PTR_ERR(edesc);
2484
Herbert Xuf2147b82015-06-16 13:54:23 +08002485 /* Create and submit job descriptor */
Herbert Xu479bcc72015-07-30 17:53:17 +08002486 init_authenc_job(req, edesc, all_contig, true);
Yuan Kang1acebad2011-07-15 11:21:42 +08002487#ifdef DEBUG
Herbert Xuf2147b82015-06-16 13:54:23 +08002488 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2489 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2490 desc_bytes(edesc->hw_desc), 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08002491#endif
2492
Herbert Xuf2147b82015-06-16 13:54:23 +08002493 desc = edesc->hw_desc;
Herbert Xu479bcc72015-07-30 17:53:17 +08002494 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
Herbert Xuf2147b82015-06-16 13:54:23 +08002495 if (!ret) {
2496 ret = -EINPROGRESS;
2497 } else {
Herbert Xu479bcc72015-07-30 17:53:17 +08002498 aead_unmap(jrdev, edesc, req);
Herbert Xuf2147b82015-06-16 13:54:23 +08002499 kfree(edesc);
2500 }
2501
2502 return ret;
2503}
2504
2505static int gcm_decrypt(struct aead_request *req)
2506{
2507 struct aead_edesc *edesc;
2508 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2509 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2510 struct device *jrdev = ctx->jrdev;
2511 bool all_contig;
2512 u32 *desc;
2513 int ret = 0;
2514
2515 /* allocate extended descriptor */
2516 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
2517 if (IS_ERR(edesc))
2518 return PTR_ERR(edesc);
2519
Yuan Kang1acebad2011-07-15 11:21:42 +08002520 /* Create and submit job descriptor*/
Herbert Xuf2147b82015-06-16 13:54:23 +08002521 init_gcm_job(req, edesc, all_contig, false);
Yuan Kang1acebad2011-07-15 11:21:42 +08002522#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002523 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002524 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2525 desc_bytes(edesc->hw_desc), 1);
2526#endif
2527
Yuan Kang0e479302011-07-15 11:21:41 +08002528 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002529 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2530 if (!ret) {
2531 ret = -EINPROGRESS;
2532 } else {
2533 aead_unmap(jrdev, edesc, req);
2534 kfree(edesc);
2535 }
Yuan Kang0e479302011-07-15 11:21:41 +08002536
Yuan Kang1acebad2011-07-15 11:21:42 +08002537 return ret;
2538}
Yuan Kang0e479302011-07-15 11:21:41 +08002539
Herbert Xu46218752015-07-09 07:17:33 +08002540static int ipsec_gcm_decrypt(struct aead_request *req)
2541{
2542 if (req->assoclen < 8)
2543 return -EINVAL;
2544
2545 return gcm_decrypt(req);
2546}
2547
Herbert Xu479bcc72015-07-30 17:53:17 +08002548static int aead_decrypt(struct aead_request *req)
Herbert Xuf2147b82015-06-16 13:54:23 +08002549{
2550 struct aead_edesc *edesc;
2551 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2552 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2553 struct device *jrdev = ctx->jrdev;
2554 bool all_contig;
2555 u32 *desc;
2556 int ret = 0;
2557
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03002558#ifdef DEBUG
2559 bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2560 CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
2561 dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
2562 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
2563 req->assoclen + req->cryptlen, 1, may_sleep);
2564#endif
2565
Herbert Xuf2147b82015-06-16 13:54:23 +08002566 /* allocate extended descriptor */
Herbert Xu479bcc72015-07-30 17:53:17 +08002567 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2568 &all_contig, false);
Herbert Xuf2147b82015-06-16 13:54:23 +08002569 if (IS_ERR(edesc))
2570 return PTR_ERR(edesc);
2571
Herbert Xuf2147b82015-06-16 13:54:23 +08002572 /* Create and submit job descriptor*/
Herbert Xu479bcc72015-07-30 17:53:17 +08002573 init_authenc_job(req, edesc, all_contig, false);
Herbert Xuf2147b82015-06-16 13:54:23 +08002574#ifdef DEBUG
2575 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2576 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2577 desc_bytes(edesc->hw_desc), 1);
2578#endif
2579
2580 desc = edesc->hw_desc;
Herbert Xu479bcc72015-07-30 17:53:17 +08002581 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
Herbert Xuf2147b82015-06-16 13:54:23 +08002582 if (!ret) {
2583 ret = -EINPROGRESS;
2584 } else {
Herbert Xu479bcc72015-07-30 17:53:17 +08002585 aead_unmap(jrdev, edesc, req);
Herbert Xuf2147b82015-06-16 13:54:23 +08002586 kfree(edesc);
2587 }
2588
2589 return ret;
2590}
2591
Yuan Kangacdca312011-07-15 11:21:42 +08002592/*
2593 * allocate and map the ablkcipher extended descriptor for ablkcipher
2594 */
2595static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2596 *req, int desc_bytes,
2597 bool *iv_contig_out)
2598{
2599 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2600 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2601 struct device *jrdev = ctx->jrdev;
2602 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2603 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2604 GFP_KERNEL : GFP_ATOMIC;
Yuan Kanga299c832012-06-22 19:48:46 -05002605 int src_nents, dst_nents = 0, sec4_sg_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08002606 struct ablkcipher_edesc *edesc;
2607 dma_addr_t iv_dma = 0;
2608 bool iv_contig = false;
2609 int sgc;
2610 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Yuan Kanga299c832012-06-22 19:48:46 -05002611 int sec4_sg_index;
Yuan Kangacdca312011-07-15 11:21:42 +08002612
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002613 src_nents = sg_count(req->src, req->nbytes);
Yuan Kangacdca312011-07-15 11:21:42 +08002614
Yuan Kang643b39b2012-06-22 19:48:49 -05002615 if (req->dst != req->src)
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002616 dst_nents = sg_count(req->dst, req->nbytes);
Yuan Kangacdca312011-07-15 11:21:42 +08002617
2618 if (likely(req->src == req->dst)) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002619 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2620 DMA_BIDIRECTIONAL);
Yuan Kangacdca312011-07-15 11:21:42 +08002621 } else {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002622 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2623 DMA_TO_DEVICE);
2624 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2625 DMA_FROM_DEVICE);
Yuan Kangacdca312011-07-15 11:21:42 +08002626 }
2627
Horia Geantace572082014-07-11 15:34:49 +03002628 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
2629 if (dma_mapping_error(jrdev, iv_dma)) {
2630 dev_err(jrdev, "unable to map IV\n");
2631 return ERR_PTR(-ENOMEM);
2632 }
2633
Yuan Kangacdca312011-07-15 11:21:42 +08002634 /*
2635 * Check if iv can be contiguous with source and destination.
2636 * If so, include it. If not, create scatterlist.
2637 */
Yuan Kangacdca312011-07-15 11:21:42 +08002638 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
2639 iv_contig = true;
2640 else
2641 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002642 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2643 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002644
2645 /* allocate space for base edesc and hw desc commands, link tables */
Victoria Milhoandde20ae2015-08-05 11:28:39 -07002646 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2647 GFP_DMA | flags);
Yuan Kangacdca312011-07-15 11:21:42 +08002648 if (!edesc) {
2649 dev_err(jrdev, "could not allocate extended descriptor\n");
2650 return ERR_PTR(-ENOMEM);
2651 }
2652
2653 edesc->src_nents = src_nents;
2654 edesc->dst_nents = dst_nents;
Yuan Kanga299c832012-06-22 19:48:46 -05002655 edesc->sec4_sg_bytes = sec4_sg_bytes;
2656 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2657 desc_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08002658
Yuan Kanga299c832012-06-22 19:48:46 -05002659 sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08002660 if (!iv_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05002661 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
2662 sg_to_sec4_sg_last(req->src, src_nents,
2663 edesc->sec4_sg + 1, 0);
2664 sec4_sg_index += 1 + src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08002665 }
2666
Yuan Kang643b39b2012-06-22 19:48:49 -05002667 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05002668 sg_to_sec4_sg_last(req->dst, dst_nents,
2669 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kangacdca312011-07-15 11:21:42 +08002670 }
2671
Yuan Kanga299c832012-06-22 19:48:46 -05002672 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2673 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002674 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2675 dev_err(jrdev, "unable to map S/G table\n");
2676 return ERR_PTR(-ENOMEM);
2677 }
2678
Yuan Kangacdca312011-07-15 11:21:42 +08002679 edesc->iv_dma = iv_dma;
2680
2681#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002682 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
Yuan Kanga299c832012-06-22 19:48:46 -05002683 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2684 sec4_sg_bytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08002685#endif
2686
2687 *iv_contig_out = iv_contig;
2688 return edesc;
2689}
2690
2691static int ablkcipher_encrypt(struct ablkcipher_request *req)
2692{
2693 struct ablkcipher_edesc *edesc;
2694 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2695 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2696 struct device *jrdev = ctx->jrdev;
2697 bool iv_contig;
2698 u32 *desc;
2699 int ret = 0;
2700
2701 /* allocate extended descriptor */
2702 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2703 CAAM_CMD_SZ, &iv_contig);
2704 if (IS_ERR(edesc))
2705 return PTR_ERR(edesc);
2706
2707 /* Create and submit job descriptor*/
2708 init_ablkcipher_job(ctx->sh_desc_enc,
2709 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
2710#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002711 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002712 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2713 desc_bytes(edesc->hw_desc), 1);
2714#endif
2715 desc = edesc->hw_desc;
2716 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2717
2718 if (!ret) {
2719 ret = -EINPROGRESS;
2720 } else {
2721 ablkcipher_unmap(jrdev, edesc, req);
2722 kfree(edesc);
2723 }
2724
2725 return ret;
2726}
2727
2728static int ablkcipher_decrypt(struct ablkcipher_request *req)
2729{
2730 struct ablkcipher_edesc *edesc;
2731 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2732 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2733 struct device *jrdev = ctx->jrdev;
2734 bool iv_contig;
2735 u32 *desc;
2736 int ret = 0;
2737
2738 /* allocate extended descriptor */
2739 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2740 CAAM_CMD_SZ, &iv_contig);
2741 if (IS_ERR(edesc))
2742 return PTR_ERR(edesc);
2743
2744 /* Create and submit job descriptor*/
2745 init_ablkcipher_job(ctx->sh_desc_dec,
2746 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
2747 desc = edesc->hw_desc;
2748#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002749 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002750 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2751 desc_bytes(edesc->hw_desc), 1);
2752#endif
2753
2754 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
2755 if (!ret) {
2756 ret = -EINPROGRESS;
2757 } else {
2758 ablkcipher_unmap(jrdev, edesc, req);
2759 kfree(edesc);
2760 }
2761
2762 return ret;
2763}
2764
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002765/*
2766 * allocate and map the ablkcipher extended descriptor
2767 * for ablkcipher givencrypt
2768 */
2769static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
2770 struct skcipher_givcrypt_request *greq,
2771 int desc_bytes,
2772 bool *iv_contig_out)
2773{
2774 struct ablkcipher_request *req = &greq->creq;
2775 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2776 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2777 struct device *jrdev = ctx->jrdev;
2778 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2779 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2780 GFP_KERNEL : GFP_ATOMIC;
2781 int src_nents, dst_nents = 0, sec4_sg_bytes;
2782 struct ablkcipher_edesc *edesc;
2783 dma_addr_t iv_dma = 0;
2784 bool iv_contig = false;
2785 int sgc;
2786 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002787 int sec4_sg_index;
2788
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002789 src_nents = sg_count(req->src, req->nbytes);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002790
2791 if (unlikely(req->dst != req->src))
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002792 dst_nents = sg_count(req->dst, req->nbytes);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002793
2794 if (likely(req->src == req->dst)) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002795 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2796 DMA_BIDIRECTIONAL);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002797 } else {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002798 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2799 DMA_TO_DEVICE);
2800 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2801 DMA_FROM_DEVICE);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002802 }
2803
2804 /*
2805 * Check if iv can be contiguous with source and destination.
2806 * If so, include it. If not, create scatterlist.
2807 */
2808 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
2809 if (dma_mapping_error(jrdev, iv_dma)) {
2810 dev_err(jrdev, "unable to map IV\n");
2811 return ERR_PTR(-ENOMEM);
2812 }
2813
2814 if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
2815 iv_contig = true;
2816 else
2817 dst_nents = dst_nents ? : 1;
2818 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2819 sizeof(struct sec4_sg_entry);
2820
2821 /* allocate space for base edesc and hw desc commands, link tables */
Victoria Milhoandde20ae2015-08-05 11:28:39 -07002822 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2823 GFP_DMA | flags);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002824 if (!edesc) {
2825 dev_err(jrdev, "could not allocate extended descriptor\n");
2826 return ERR_PTR(-ENOMEM);
2827 }
2828
2829 edesc->src_nents = src_nents;
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002830 edesc->dst_nents = dst_nents;
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002831 edesc->sec4_sg_bytes = sec4_sg_bytes;
2832 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2833 desc_bytes;
2834
2835 sec4_sg_index = 0;
2836 if (src_nents) {
2837 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
2838 sec4_sg_index += src_nents;
2839 }
2840
2841 if (!iv_contig) {
2842 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2843 iv_dma, ivsize, 0);
2844 sec4_sg_index += 1;
2845 sg_to_sec4_sg_last(req->dst, dst_nents,
2846 edesc->sec4_sg + sec4_sg_index, 0);
2847 }
2848
2849 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2850 sec4_sg_bytes, DMA_TO_DEVICE);
2851 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2852 dev_err(jrdev, "unable to map S/G table\n");
2853 return ERR_PTR(-ENOMEM);
2854 }
2855 edesc->iv_dma = iv_dma;
2856
2857#ifdef DEBUG
2858 print_hex_dump(KERN_ERR,
2859 "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
2860 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2861 sec4_sg_bytes, 1);
2862#endif
2863
2864 *iv_contig_out = iv_contig;
2865 return edesc;
2866}
2867
2868static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
2869{
2870 struct ablkcipher_request *req = &creq->creq;
2871 struct ablkcipher_edesc *edesc;
2872 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2873 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2874 struct device *jrdev = ctx->jrdev;
2875 bool iv_contig;
2876 u32 *desc;
2877 int ret = 0;
2878
2879 /* allocate extended descriptor */
2880 edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
2881 CAAM_CMD_SZ, &iv_contig);
2882 if (IS_ERR(edesc))
2883 return PTR_ERR(edesc);
2884
2885 /* Create and submit job descriptor*/
2886 init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
2887 edesc, req, iv_contig);
2888#ifdef DEBUG
2889 print_hex_dump(KERN_ERR,
2890 "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
2891 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2892 desc_bytes(edesc->hw_desc), 1);
2893#endif
2894 desc = edesc->hw_desc;
2895 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2896
2897 if (!ret) {
2898 ret = -EINPROGRESS;
2899 } else {
2900 ablkcipher_unmap(jrdev, edesc, req);
2901 kfree(edesc);
2902 }
2903
2904 return ret;
2905}
2906
Yuan Kang885e9e22011-07-15 11:21:41 +08002907#define template_aead template_u.aead
Yuan Kangacdca312011-07-15 11:21:42 +08002908#define template_ablkcipher template_u.ablkcipher
Kim Phillips8e8ec592011-03-13 16:54:26 +08002909struct caam_alg_template {
2910 char name[CRYPTO_MAX_ALG_NAME];
2911 char driver_name[CRYPTO_MAX_ALG_NAME];
2912 unsigned int blocksize;
Yuan Kang885e9e22011-07-15 11:21:41 +08002913 u32 type;
2914 union {
2915 struct ablkcipher_alg ablkcipher;
Yuan Kang885e9e22011-07-15 11:21:41 +08002916 } template_u;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002917 u32 class1_alg_type;
2918 u32 class2_alg_type;
2919 u32 alg_op;
2920};
2921
2922static struct caam_alg_template driver_algs[] = {
Yuan Kangacdca312011-07-15 11:21:42 +08002923 /* ablkcipher descriptor */
2924 {
2925 .name = "cbc(aes)",
2926 .driver_name = "cbc-aes-caam",
2927 .blocksize = AES_BLOCK_SIZE,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002928 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Yuan Kangacdca312011-07-15 11:21:42 +08002929 .template_ablkcipher = {
2930 .setkey = ablkcipher_setkey,
2931 .encrypt = ablkcipher_encrypt,
2932 .decrypt = ablkcipher_decrypt,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002933 .givencrypt = ablkcipher_givencrypt,
2934 .geniv = "<built-in>",
Yuan Kangacdca312011-07-15 11:21:42 +08002935 .min_keysize = AES_MIN_KEY_SIZE,
2936 .max_keysize = AES_MAX_KEY_SIZE,
2937 .ivsize = AES_BLOCK_SIZE,
2938 },
2939 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2940 },
2941 {
2942 .name = "cbc(des3_ede)",
2943 .driver_name = "cbc-3des-caam",
2944 .blocksize = DES3_EDE_BLOCK_SIZE,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02002945 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Yuan Kangacdca312011-07-15 11:21:42 +08002946 .template_ablkcipher = {
2947 .setkey = ablkcipher_setkey,
2948 .encrypt = ablkcipher_encrypt,
2949 .decrypt = ablkcipher_decrypt,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02002950 .givencrypt = ablkcipher_givencrypt,
2951 .geniv = "<built-in>",
Yuan Kangacdca312011-07-15 11:21:42 +08002952 .min_keysize = DES3_EDE_KEY_SIZE,
2953 .max_keysize = DES3_EDE_KEY_SIZE,
2954 .ivsize = DES3_EDE_BLOCK_SIZE,
2955 },
2956 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2957 },
2958 {
2959 .name = "cbc(des)",
2960 .driver_name = "cbc-des-caam",
2961 .blocksize = DES_BLOCK_SIZE,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02002962 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Yuan Kangacdca312011-07-15 11:21:42 +08002963 .template_ablkcipher = {
2964 .setkey = ablkcipher_setkey,
2965 .encrypt = ablkcipher_encrypt,
2966 .decrypt = ablkcipher_decrypt,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02002967 .givencrypt = ablkcipher_givencrypt,
2968 .geniv = "<built-in>",
Yuan Kangacdca312011-07-15 11:21:42 +08002969 .min_keysize = DES_KEY_SIZE,
2970 .max_keysize = DES_KEY_SIZE,
2971 .ivsize = DES_BLOCK_SIZE,
2972 },
2973 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02002974 },
2975 {
2976 .name = "ctr(aes)",
2977 .driver_name = "ctr-aes-caam",
2978 .blocksize = 1,
2979 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2980 .template_ablkcipher = {
2981 .setkey = ablkcipher_setkey,
2982 .encrypt = ablkcipher_encrypt,
2983 .decrypt = ablkcipher_decrypt,
2984 .geniv = "chainiv",
2985 .min_keysize = AES_MIN_KEY_SIZE,
2986 .max_keysize = AES_MAX_KEY_SIZE,
2987 .ivsize = AES_BLOCK_SIZE,
2988 },
2989 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02002990 },
2991 {
2992 .name = "rfc3686(ctr(aes))",
2993 .driver_name = "rfc3686-ctr-aes-caam",
2994 .blocksize = 1,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002995 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02002996 .template_ablkcipher = {
2997 .setkey = ablkcipher_setkey,
2998 .encrypt = ablkcipher_encrypt,
2999 .decrypt = ablkcipher_decrypt,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02003000 .givencrypt = ablkcipher_givencrypt,
3001 .geniv = "<built-in>",
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02003002 .min_keysize = AES_MIN_KEY_SIZE +
3003 CTR_RFC3686_NONCE_SIZE,
3004 .max_keysize = AES_MAX_KEY_SIZE +
3005 CTR_RFC3686_NONCE_SIZE,
3006 .ivsize = CTR_RFC3686_IV_SIZE,
3007 },
3008 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
Catalin Vasilec6415a62015-10-02 13:13:18 +03003009 },
3010 {
3011 .name = "xts(aes)",
3012 .driver_name = "xts-aes-caam",
3013 .blocksize = AES_BLOCK_SIZE,
3014 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3015 .template_ablkcipher = {
3016 .setkey = xts_ablkcipher_setkey,
3017 .encrypt = ablkcipher_encrypt,
3018 .decrypt = ablkcipher_decrypt,
3019 .geniv = "eseqiv",
3020 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3021 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3022 .ivsize = AES_BLOCK_SIZE,
3023 },
3024 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
3025 },
Kim Phillips8e8ec592011-03-13 16:54:26 +08003026};
3027
Herbert Xuf2147b82015-06-16 13:54:23 +08003028static struct caam_aead_alg driver_aeads[] = {
3029 {
3030 .aead = {
3031 .base = {
3032 .cra_name = "rfc4106(gcm(aes))",
3033 .cra_driver_name = "rfc4106-gcm-aes-caam",
3034 .cra_blocksize = 1,
3035 },
3036 .setkey = rfc4106_setkey,
3037 .setauthsize = rfc4106_setauthsize,
Herbert Xu46218752015-07-09 07:17:33 +08003038 .encrypt = ipsec_gcm_encrypt,
3039 .decrypt = ipsec_gcm_decrypt,
Herbert Xuf2147b82015-06-16 13:54:23 +08003040 .ivsize = 8,
3041 .maxauthsize = AES_BLOCK_SIZE,
3042 },
3043 .caam = {
3044 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3045 },
3046 },
3047 {
3048 .aead = {
3049 .base = {
3050 .cra_name = "rfc4543(gcm(aes))",
3051 .cra_driver_name = "rfc4543-gcm-aes-caam",
3052 .cra_blocksize = 1,
3053 },
3054 .setkey = rfc4543_setkey,
3055 .setauthsize = rfc4543_setauthsize,
Herbert Xu46218752015-07-09 07:17:33 +08003056 .encrypt = ipsec_gcm_encrypt,
3057 .decrypt = ipsec_gcm_decrypt,
Herbert Xuf2147b82015-06-16 13:54:23 +08003058 .ivsize = 8,
3059 .maxauthsize = AES_BLOCK_SIZE,
3060 },
3061 .caam = {
3062 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3063 },
3064 },
3065 /* Galois Counter Mode */
3066 {
3067 .aead = {
3068 .base = {
3069 .cra_name = "gcm(aes)",
3070 .cra_driver_name = "gcm-aes-caam",
3071 .cra_blocksize = 1,
3072 },
3073 .setkey = gcm_setkey,
3074 .setauthsize = gcm_setauthsize,
3075 .encrypt = gcm_encrypt,
3076 .decrypt = gcm_decrypt,
3077 .ivsize = 12,
3078 .maxauthsize = AES_BLOCK_SIZE,
3079 },
3080 .caam = {
3081 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3082 },
3083 },
Herbert Xu479bcc72015-07-30 17:53:17 +08003084 /* single-pass ipsec_esp descriptor */
3085 {
3086 .aead = {
3087 .base = {
3088 .cra_name = "authenc(hmac(md5),"
3089 "ecb(cipher_null))",
3090 .cra_driver_name = "authenc-hmac-md5-"
3091 "ecb-cipher_null-caam",
3092 .cra_blocksize = NULL_BLOCK_SIZE,
3093 },
3094 .setkey = aead_setkey,
3095 .setauthsize = aead_setauthsize,
3096 .encrypt = aead_encrypt,
3097 .decrypt = aead_decrypt,
3098 .ivsize = NULL_IV_SIZE,
3099 .maxauthsize = MD5_DIGEST_SIZE,
3100 },
3101 .caam = {
3102 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3103 OP_ALG_AAI_HMAC_PRECOMP,
3104 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3105 },
3106 },
3107 {
3108 .aead = {
3109 .base = {
3110 .cra_name = "authenc(hmac(sha1),"
3111 "ecb(cipher_null))",
3112 .cra_driver_name = "authenc-hmac-sha1-"
3113 "ecb-cipher_null-caam",
3114 .cra_blocksize = NULL_BLOCK_SIZE,
3115 },
3116 .setkey = aead_setkey,
3117 .setauthsize = aead_setauthsize,
3118 .encrypt = aead_encrypt,
3119 .decrypt = aead_decrypt,
3120 .ivsize = NULL_IV_SIZE,
3121 .maxauthsize = SHA1_DIGEST_SIZE,
3122 },
3123 .caam = {
3124 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3125 OP_ALG_AAI_HMAC_PRECOMP,
3126 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3127 },
3128 },
3129 {
3130 .aead = {
3131 .base = {
3132 .cra_name = "authenc(hmac(sha224),"
3133 "ecb(cipher_null))",
3134 .cra_driver_name = "authenc-hmac-sha224-"
3135 "ecb-cipher_null-caam",
3136 .cra_blocksize = NULL_BLOCK_SIZE,
3137 },
3138 .setkey = aead_setkey,
3139 .setauthsize = aead_setauthsize,
3140 .encrypt = aead_encrypt,
3141 .decrypt = aead_decrypt,
3142 .ivsize = NULL_IV_SIZE,
3143 .maxauthsize = SHA224_DIGEST_SIZE,
3144 },
3145 .caam = {
3146 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3147 OP_ALG_AAI_HMAC_PRECOMP,
3148 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3149 },
3150 },
3151 {
3152 .aead = {
3153 .base = {
3154 .cra_name = "authenc(hmac(sha256),"
3155 "ecb(cipher_null))",
3156 .cra_driver_name = "authenc-hmac-sha256-"
3157 "ecb-cipher_null-caam",
3158 .cra_blocksize = NULL_BLOCK_SIZE,
3159 },
3160 .setkey = aead_setkey,
3161 .setauthsize = aead_setauthsize,
3162 .encrypt = aead_encrypt,
3163 .decrypt = aead_decrypt,
3164 .ivsize = NULL_IV_SIZE,
3165 .maxauthsize = SHA256_DIGEST_SIZE,
3166 },
3167 .caam = {
3168 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3169 OP_ALG_AAI_HMAC_PRECOMP,
3170 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3171 },
3172 },
3173 {
3174 .aead = {
3175 .base = {
3176 .cra_name = "authenc(hmac(sha384),"
3177 "ecb(cipher_null))",
3178 .cra_driver_name = "authenc-hmac-sha384-"
3179 "ecb-cipher_null-caam",
3180 .cra_blocksize = NULL_BLOCK_SIZE,
3181 },
3182 .setkey = aead_setkey,
3183 .setauthsize = aead_setauthsize,
3184 .encrypt = aead_encrypt,
3185 .decrypt = aead_decrypt,
3186 .ivsize = NULL_IV_SIZE,
3187 .maxauthsize = SHA384_DIGEST_SIZE,
3188 },
3189 .caam = {
3190 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3191 OP_ALG_AAI_HMAC_PRECOMP,
3192 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3193 },
3194 },
3195 {
3196 .aead = {
3197 .base = {
3198 .cra_name = "authenc(hmac(sha512),"
3199 "ecb(cipher_null))",
3200 .cra_driver_name = "authenc-hmac-sha512-"
3201 "ecb-cipher_null-caam",
3202 .cra_blocksize = NULL_BLOCK_SIZE,
3203 },
3204 .setkey = aead_setkey,
3205 .setauthsize = aead_setauthsize,
3206 .encrypt = aead_encrypt,
3207 .decrypt = aead_decrypt,
3208 .ivsize = NULL_IV_SIZE,
3209 .maxauthsize = SHA512_DIGEST_SIZE,
3210 },
3211 .caam = {
3212 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3213 OP_ALG_AAI_HMAC_PRECOMP,
3214 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3215 },
3216 },
3217 {
3218 .aead = {
3219 .base = {
3220 .cra_name = "authenc(hmac(md5),cbc(aes))",
3221 .cra_driver_name = "authenc-hmac-md5-"
3222 "cbc-aes-caam",
3223 .cra_blocksize = AES_BLOCK_SIZE,
3224 },
3225 .setkey = aead_setkey,
3226 .setauthsize = aead_setauthsize,
3227 .encrypt = aead_encrypt,
3228 .decrypt = aead_decrypt,
3229 .ivsize = AES_BLOCK_SIZE,
3230 .maxauthsize = MD5_DIGEST_SIZE,
3231 },
3232 .caam = {
3233 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3234 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3235 OP_ALG_AAI_HMAC_PRECOMP,
3236 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3237 },
3238 },
3239 {
3240 .aead = {
3241 .base = {
3242 .cra_name = "echainiv(authenc(hmac(md5),"
3243 "cbc(aes)))",
3244 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3245 "cbc-aes-caam",
3246 .cra_blocksize = AES_BLOCK_SIZE,
3247 },
3248 .setkey = aead_setkey,
3249 .setauthsize = aead_setauthsize,
3250 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003251 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003252 .ivsize = AES_BLOCK_SIZE,
3253 .maxauthsize = MD5_DIGEST_SIZE,
3254 },
3255 .caam = {
3256 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3257 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3258 OP_ALG_AAI_HMAC_PRECOMP,
3259 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3260 .geniv = true,
3261 },
3262 },
3263 {
3264 .aead = {
3265 .base = {
3266 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3267 .cra_driver_name = "authenc-hmac-sha1-"
3268 "cbc-aes-caam",
3269 .cra_blocksize = AES_BLOCK_SIZE,
3270 },
3271 .setkey = aead_setkey,
3272 .setauthsize = aead_setauthsize,
3273 .encrypt = aead_encrypt,
3274 .decrypt = aead_decrypt,
3275 .ivsize = AES_BLOCK_SIZE,
3276 .maxauthsize = SHA1_DIGEST_SIZE,
3277 },
3278 .caam = {
3279 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3280 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3281 OP_ALG_AAI_HMAC_PRECOMP,
3282 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3283 },
3284 },
3285 {
3286 .aead = {
3287 .base = {
3288 .cra_name = "echainiv(authenc(hmac(sha1),"
3289 "cbc(aes)))",
3290 .cra_driver_name = "echainiv-authenc-"
3291 "hmac-sha1-cbc-aes-caam",
3292 .cra_blocksize = AES_BLOCK_SIZE,
3293 },
3294 .setkey = aead_setkey,
3295 .setauthsize = aead_setauthsize,
3296 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003297 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003298 .ivsize = AES_BLOCK_SIZE,
3299 .maxauthsize = SHA1_DIGEST_SIZE,
3300 },
3301 .caam = {
3302 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3303 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3304 OP_ALG_AAI_HMAC_PRECOMP,
3305 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3306 .geniv = true,
3307 },
3308 },
3309 {
3310 .aead = {
3311 .base = {
3312 .cra_name = "authenc(hmac(sha224),cbc(aes))",
3313 .cra_driver_name = "authenc-hmac-sha224-"
3314 "cbc-aes-caam",
3315 .cra_blocksize = AES_BLOCK_SIZE,
3316 },
3317 .setkey = aead_setkey,
3318 .setauthsize = aead_setauthsize,
3319 .encrypt = aead_encrypt,
3320 .decrypt = aead_decrypt,
3321 .ivsize = AES_BLOCK_SIZE,
3322 .maxauthsize = SHA224_DIGEST_SIZE,
3323 },
3324 .caam = {
3325 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3326 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3327 OP_ALG_AAI_HMAC_PRECOMP,
3328 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3329 },
3330 },
3331 {
3332 .aead = {
3333 .base = {
3334 .cra_name = "echainiv(authenc(hmac(sha224),"
3335 "cbc(aes)))",
3336 .cra_driver_name = "echainiv-authenc-"
3337 "hmac-sha224-cbc-aes-caam",
3338 .cra_blocksize = AES_BLOCK_SIZE,
3339 },
3340 .setkey = aead_setkey,
3341 .setauthsize = aead_setauthsize,
3342 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003343 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003344 .ivsize = AES_BLOCK_SIZE,
3345 .maxauthsize = SHA224_DIGEST_SIZE,
3346 },
3347 .caam = {
3348 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3349 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3350 OP_ALG_AAI_HMAC_PRECOMP,
3351 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3352 .geniv = true,
3353 },
3354 },
3355 {
3356 .aead = {
3357 .base = {
3358 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3359 .cra_driver_name = "authenc-hmac-sha256-"
3360 "cbc-aes-caam",
3361 .cra_blocksize = AES_BLOCK_SIZE,
3362 },
3363 .setkey = aead_setkey,
3364 .setauthsize = aead_setauthsize,
3365 .encrypt = aead_encrypt,
3366 .decrypt = aead_decrypt,
3367 .ivsize = AES_BLOCK_SIZE,
3368 .maxauthsize = SHA256_DIGEST_SIZE,
3369 },
3370 .caam = {
3371 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3372 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3373 OP_ALG_AAI_HMAC_PRECOMP,
3374 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3375 },
3376 },
3377 {
3378 .aead = {
3379 .base = {
3380 .cra_name = "echainiv(authenc(hmac(sha256),"
3381 "cbc(aes)))",
3382 .cra_driver_name = "echainiv-authenc-"
3383 "hmac-sha256-cbc-aes-caam",
3384 .cra_blocksize = AES_BLOCK_SIZE,
3385 },
3386 .setkey = aead_setkey,
3387 .setauthsize = aead_setauthsize,
3388 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003389 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003390 .ivsize = AES_BLOCK_SIZE,
3391 .maxauthsize = SHA256_DIGEST_SIZE,
3392 },
3393 .caam = {
3394 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3395 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3396 OP_ALG_AAI_HMAC_PRECOMP,
3397 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3398 .geniv = true,
3399 },
3400 },
3401 {
3402 .aead = {
3403 .base = {
3404 .cra_name = "authenc(hmac(sha384),cbc(aes))",
3405 .cra_driver_name = "authenc-hmac-sha384-"
3406 "cbc-aes-caam",
3407 .cra_blocksize = AES_BLOCK_SIZE,
3408 },
3409 .setkey = aead_setkey,
3410 .setauthsize = aead_setauthsize,
3411 .encrypt = aead_encrypt,
3412 .decrypt = aead_decrypt,
3413 .ivsize = AES_BLOCK_SIZE,
3414 .maxauthsize = SHA384_DIGEST_SIZE,
3415 },
3416 .caam = {
3417 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3418 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3419 OP_ALG_AAI_HMAC_PRECOMP,
3420 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3421 },
3422 },
3423 {
3424 .aead = {
3425 .base = {
3426 .cra_name = "echainiv(authenc(hmac(sha384),"
3427 "cbc(aes)))",
3428 .cra_driver_name = "echainiv-authenc-"
3429 "hmac-sha384-cbc-aes-caam",
3430 .cra_blocksize = AES_BLOCK_SIZE,
3431 },
3432 .setkey = aead_setkey,
3433 .setauthsize = aead_setauthsize,
3434 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003435 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003436 .ivsize = AES_BLOCK_SIZE,
3437 .maxauthsize = SHA384_DIGEST_SIZE,
3438 },
3439 .caam = {
3440 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3441 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3442 OP_ALG_AAI_HMAC_PRECOMP,
3443 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3444 .geniv = true,
3445 },
3446 },
3447 {
3448 .aead = {
3449 .base = {
3450 .cra_name = "authenc(hmac(sha512),cbc(aes))",
3451 .cra_driver_name = "authenc-hmac-sha512-"
3452 "cbc-aes-caam",
3453 .cra_blocksize = AES_BLOCK_SIZE,
3454 },
3455 .setkey = aead_setkey,
3456 .setauthsize = aead_setauthsize,
3457 .encrypt = aead_encrypt,
3458 .decrypt = aead_decrypt,
3459 .ivsize = AES_BLOCK_SIZE,
3460 .maxauthsize = SHA512_DIGEST_SIZE,
3461 },
3462 .caam = {
3463 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3464 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3465 OP_ALG_AAI_HMAC_PRECOMP,
3466 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3467 },
3468 },
3469 {
3470 .aead = {
3471 .base = {
3472 .cra_name = "echainiv(authenc(hmac(sha512),"
3473 "cbc(aes)))",
3474 .cra_driver_name = "echainiv-authenc-"
3475 "hmac-sha512-cbc-aes-caam",
3476 .cra_blocksize = AES_BLOCK_SIZE,
3477 },
3478 .setkey = aead_setkey,
3479 .setauthsize = aead_setauthsize,
3480 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003481 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003482 .ivsize = AES_BLOCK_SIZE,
3483 .maxauthsize = SHA512_DIGEST_SIZE,
3484 },
3485 .caam = {
3486 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3487 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3488 OP_ALG_AAI_HMAC_PRECOMP,
3489 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3490 .geniv = true,
3491 },
3492 },
3493 {
3494 .aead = {
3495 .base = {
3496 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3497 .cra_driver_name = "authenc-hmac-md5-"
3498 "cbc-des3_ede-caam",
3499 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3500 },
3501 .setkey = aead_setkey,
3502 .setauthsize = aead_setauthsize,
3503 .encrypt = aead_encrypt,
3504 .decrypt = aead_decrypt,
3505 .ivsize = DES3_EDE_BLOCK_SIZE,
3506 .maxauthsize = MD5_DIGEST_SIZE,
3507 },
3508 .caam = {
3509 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3510 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3511 OP_ALG_AAI_HMAC_PRECOMP,
3512 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3513 }
3514 },
3515 {
3516 .aead = {
3517 .base = {
3518 .cra_name = "echainiv(authenc(hmac(md5),"
3519 "cbc(des3_ede)))",
3520 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3521 "cbc-des3_ede-caam",
3522 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3523 },
3524 .setkey = aead_setkey,
3525 .setauthsize = aead_setauthsize,
3526 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003527 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003528 .ivsize = DES3_EDE_BLOCK_SIZE,
3529 .maxauthsize = MD5_DIGEST_SIZE,
3530 },
3531 .caam = {
3532 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3533 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3534 OP_ALG_AAI_HMAC_PRECOMP,
3535 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3536 .geniv = true,
3537 }
3538 },
3539 {
3540 .aead = {
3541 .base = {
3542 .cra_name = "authenc(hmac(sha1),"
3543 "cbc(des3_ede))",
3544 .cra_driver_name = "authenc-hmac-sha1-"
3545 "cbc-des3_ede-caam",
3546 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3547 },
3548 .setkey = aead_setkey,
3549 .setauthsize = aead_setauthsize,
3550 .encrypt = aead_encrypt,
3551 .decrypt = aead_decrypt,
3552 .ivsize = DES3_EDE_BLOCK_SIZE,
3553 .maxauthsize = SHA1_DIGEST_SIZE,
3554 },
3555 .caam = {
3556 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3557 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3558 OP_ALG_AAI_HMAC_PRECOMP,
3559 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3560 },
3561 },
3562 {
3563 .aead = {
3564 .base = {
3565 .cra_name = "echainiv(authenc(hmac(sha1),"
3566 "cbc(des3_ede)))",
3567 .cra_driver_name = "echainiv-authenc-"
3568 "hmac-sha1-"
3569 "cbc-des3_ede-caam",
3570 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3571 },
3572 .setkey = aead_setkey,
3573 .setauthsize = aead_setauthsize,
3574 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003575 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003576 .ivsize = DES3_EDE_BLOCK_SIZE,
3577 .maxauthsize = SHA1_DIGEST_SIZE,
3578 },
3579 .caam = {
3580 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3581 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3582 OP_ALG_AAI_HMAC_PRECOMP,
3583 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3584 .geniv = true,
3585 },
3586 },
3587 {
3588 .aead = {
3589 .base = {
3590 .cra_name = "authenc(hmac(sha224),"
3591 "cbc(des3_ede))",
3592 .cra_driver_name = "authenc-hmac-sha224-"
3593 "cbc-des3_ede-caam",
3594 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3595 },
3596 .setkey = aead_setkey,
3597 .setauthsize = aead_setauthsize,
3598 .encrypt = aead_encrypt,
3599 .decrypt = aead_decrypt,
3600 .ivsize = DES3_EDE_BLOCK_SIZE,
3601 .maxauthsize = SHA224_DIGEST_SIZE,
3602 },
3603 .caam = {
3604 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3605 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3606 OP_ALG_AAI_HMAC_PRECOMP,
3607 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3608 },
3609 },
3610 {
3611 .aead = {
3612 .base = {
3613 .cra_name = "echainiv(authenc(hmac(sha224),"
3614 "cbc(des3_ede)))",
3615 .cra_driver_name = "echainiv-authenc-"
3616 "hmac-sha224-"
3617 "cbc-des3_ede-caam",
3618 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3619 },
3620 .setkey = aead_setkey,
3621 .setauthsize = aead_setauthsize,
3622 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003623 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003624 .ivsize = DES3_EDE_BLOCK_SIZE,
3625 .maxauthsize = SHA224_DIGEST_SIZE,
3626 },
3627 .caam = {
3628 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3629 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3630 OP_ALG_AAI_HMAC_PRECOMP,
3631 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3632 .geniv = true,
3633 },
3634 },
3635 {
3636 .aead = {
3637 .base = {
3638 .cra_name = "authenc(hmac(sha256),"
3639 "cbc(des3_ede))",
3640 .cra_driver_name = "authenc-hmac-sha256-"
3641 "cbc-des3_ede-caam",
3642 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3643 },
3644 .setkey = aead_setkey,
3645 .setauthsize = aead_setauthsize,
3646 .encrypt = aead_encrypt,
3647 .decrypt = aead_decrypt,
3648 .ivsize = DES3_EDE_BLOCK_SIZE,
3649 .maxauthsize = SHA256_DIGEST_SIZE,
3650 },
3651 .caam = {
3652 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3653 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3654 OP_ALG_AAI_HMAC_PRECOMP,
3655 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3656 },
3657 },
3658 {
3659 .aead = {
3660 .base = {
3661 .cra_name = "echainiv(authenc(hmac(sha256),"
3662 "cbc(des3_ede)))",
3663 .cra_driver_name = "echainiv-authenc-"
3664 "hmac-sha256-"
3665 "cbc-des3_ede-caam",
3666 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3667 },
3668 .setkey = aead_setkey,
3669 .setauthsize = aead_setauthsize,
3670 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003671 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003672 .ivsize = DES3_EDE_BLOCK_SIZE,
3673 .maxauthsize = SHA256_DIGEST_SIZE,
3674 },
3675 .caam = {
3676 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3677 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3678 OP_ALG_AAI_HMAC_PRECOMP,
3679 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3680 .geniv = true,
3681 },
3682 },
3683 {
3684 .aead = {
3685 .base = {
3686 .cra_name = "authenc(hmac(sha384),"
3687 "cbc(des3_ede))",
3688 .cra_driver_name = "authenc-hmac-sha384-"
3689 "cbc-des3_ede-caam",
3690 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3691 },
3692 .setkey = aead_setkey,
3693 .setauthsize = aead_setauthsize,
3694 .encrypt = aead_encrypt,
3695 .decrypt = aead_decrypt,
3696 .ivsize = DES3_EDE_BLOCK_SIZE,
3697 .maxauthsize = SHA384_DIGEST_SIZE,
3698 },
3699 .caam = {
3700 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3701 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3702 OP_ALG_AAI_HMAC_PRECOMP,
3703 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3704 },
3705 },
3706 {
3707 .aead = {
3708 .base = {
3709 .cra_name = "echainiv(authenc(hmac(sha384),"
3710 "cbc(des3_ede)))",
3711 .cra_driver_name = "echainiv-authenc-"
3712 "hmac-sha384-"
3713 "cbc-des3_ede-caam",
3714 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3715 },
3716 .setkey = aead_setkey,
3717 .setauthsize = aead_setauthsize,
3718 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003719 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003720 .ivsize = DES3_EDE_BLOCK_SIZE,
3721 .maxauthsize = SHA384_DIGEST_SIZE,
3722 },
3723 .caam = {
3724 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3725 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3726 OP_ALG_AAI_HMAC_PRECOMP,
3727 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3728 .geniv = true,
3729 },
3730 },
3731 {
3732 .aead = {
3733 .base = {
3734 .cra_name = "authenc(hmac(sha512),"
3735 "cbc(des3_ede))",
3736 .cra_driver_name = "authenc-hmac-sha512-"
3737 "cbc-des3_ede-caam",
3738 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3739 },
3740 .setkey = aead_setkey,
3741 .setauthsize = aead_setauthsize,
3742 .encrypt = aead_encrypt,
3743 .decrypt = aead_decrypt,
3744 .ivsize = DES3_EDE_BLOCK_SIZE,
3745 .maxauthsize = SHA512_DIGEST_SIZE,
3746 },
3747 .caam = {
3748 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3749 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3750 OP_ALG_AAI_HMAC_PRECOMP,
3751 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3752 },
3753 },
3754 {
3755 .aead = {
3756 .base = {
3757 .cra_name = "echainiv(authenc(hmac(sha512),"
3758 "cbc(des3_ede)))",
3759 .cra_driver_name = "echainiv-authenc-"
3760 "hmac-sha512-"
3761 "cbc-des3_ede-caam",
3762 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3763 },
3764 .setkey = aead_setkey,
3765 .setauthsize = aead_setauthsize,
3766 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003767 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003768 .ivsize = DES3_EDE_BLOCK_SIZE,
3769 .maxauthsize = SHA512_DIGEST_SIZE,
3770 },
3771 .caam = {
3772 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3773 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3774 OP_ALG_AAI_HMAC_PRECOMP,
3775 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3776 .geniv = true,
3777 },
3778 },
3779 {
3780 .aead = {
3781 .base = {
3782 .cra_name = "authenc(hmac(md5),cbc(des))",
3783 .cra_driver_name = "authenc-hmac-md5-"
3784 "cbc-des-caam",
3785 .cra_blocksize = DES_BLOCK_SIZE,
3786 },
3787 .setkey = aead_setkey,
3788 .setauthsize = aead_setauthsize,
3789 .encrypt = aead_encrypt,
3790 .decrypt = aead_decrypt,
3791 .ivsize = DES_BLOCK_SIZE,
3792 .maxauthsize = MD5_DIGEST_SIZE,
3793 },
3794 .caam = {
3795 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3796 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3797 OP_ALG_AAI_HMAC_PRECOMP,
3798 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3799 },
3800 },
3801 {
3802 .aead = {
3803 .base = {
3804 .cra_name = "echainiv(authenc(hmac(md5),"
3805 "cbc(des)))",
3806 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3807 "cbc-des-caam",
3808 .cra_blocksize = DES_BLOCK_SIZE,
3809 },
3810 .setkey = aead_setkey,
3811 .setauthsize = aead_setauthsize,
3812 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003813 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003814 .ivsize = DES_BLOCK_SIZE,
3815 .maxauthsize = MD5_DIGEST_SIZE,
3816 },
3817 .caam = {
3818 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3819 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3820 OP_ALG_AAI_HMAC_PRECOMP,
3821 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3822 .geniv = true,
3823 },
3824 },
3825 {
3826 .aead = {
3827 .base = {
3828 .cra_name = "authenc(hmac(sha1),cbc(des))",
3829 .cra_driver_name = "authenc-hmac-sha1-"
3830 "cbc-des-caam",
3831 .cra_blocksize = DES_BLOCK_SIZE,
3832 },
3833 .setkey = aead_setkey,
3834 .setauthsize = aead_setauthsize,
3835 .encrypt = aead_encrypt,
3836 .decrypt = aead_decrypt,
3837 .ivsize = DES_BLOCK_SIZE,
3838 .maxauthsize = SHA1_DIGEST_SIZE,
3839 },
3840 .caam = {
3841 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3842 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3843 OP_ALG_AAI_HMAC_PRECOMP,
3844 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3845 },
3846 },
3847 {
3848 .aead = {
3849 .base = {
3850 .cra_name = "echainiv(authenc(hmac(sha1),"
3851 "cbc(des)))",
3852 .cra_driver_name = "echainiv-authenc-"
3853 "hmac-sha1-cbc-des-caam",
3854 .cra_blocksize = DES_BLOCK_SIZE,
3855 },
3856 .setkey = aead_setkey,
3857 .setauthsize = aead_setauthsize,
3858 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003859 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003860 .ivsize = DES_BLOCK_SIZE,
3861 .maxauthsize = SHA1_DIGEST_SIZE,
3862 },
3863 .caam = {
3864 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3865 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3866 OP_ALG_AAI_HMAC_PRECOMP,
3867 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3868 .geniv = true,
3869 },
3870 },
3871 {
3872 .aead = {
3873 .base = {
3874 .cra_name = "authenc(hmac(sha224),cbc(des))",
3875 .cra_driver_name = "authenc-hmac-sha224-"
3876 "cbc-des-caam",
3877 .cra_blocksize = DES_BLOCK_SIZE,
3878 },
3879 .setkey = aead_setkey,
3880 .setauthsize = aead_setauthsize,
3881 .encrypt = aead_encrypt,
3882 .decrypt = aead_decrypt,
3883 .ivsize = DES_BLOCK_SIZE,
3884 .maxauthsize = SHA224_DIGEST_SIZE,
3885 },
3886 .caam = {
3887 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3888 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3889 OP_ALG_AAI_HMAC_PRECOMP,
3890 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3891 },
3892 },
3893 {
3894 .aead = {
3895 .base = {
3896 .cra_name = "echainiv(authenc(hmac(sha224),"
3897 "cbc(des)))",
3898 .cra_driver_name = "echainiv-authenc-"
3899 "hmac-sha224-cbc-des-caam",
3900 .cra_blocksize = DES_BLOCK_SIZE,
3901 },
3902 .setkey = aead_setkey,
3903 .setauthsize = aead_setauthsize,
3904 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003905 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003906 .ivsize = DES_BLOCK_SIZE,
3907 .maxauthsize = SHA224_DIGEST_SIZE,
3908 },
3909 .caam = {
3910 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3911 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3912 OP_ALG_AAI_HMAC_PRECOMP,
3913 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3914 .geniv = true,
3915 },
3916 },
3917 {
3918 .aead = {
3919 .base = {
3920 .cra_name = "authenc(hmac(sha256),cbc(des))",
3921 .cra_driver_name = "authenc-hmac-sha256-"
3922 "cbc-des-caam",
3923 .cra_blocksize = DES_BLOCK_SIZE,
3924 },
3925 .setkey = aead_setkey,
3926 .setauthsize = aead_setauthsize,
3927 .encrypt = aead_encrypt,
3928 .decrypt = aead_decrypt,
3929 .ivsize = DES_BLOCK_SIZE,
3930 .maxauthsize = SHA256_DIGEST_SIZE,
3931 },
3932 .caam = {
3933 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3934 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3935 OP_ALG_AAI_HMAC_PRECOMP,
3936 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3937 },
3938 },
3939 {
3940 .aead = {
3941 .base = {
3942 .cra_name = "echainiv(authenc(hmac(sha256),"
3943 "cbc(des)))",
3944 .cra_driver_name = "echainiv-authenc-"
3945 "hmac-sha256-cbc-des-caam",
3946 .cra_blocksize = DES_BLOCK_SIZE,
3947 },
3948 .setkey = aead_setkey,
3949 .setauthsize = aead_setauthsize,
3950 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003951 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003952 .ivsize = DES_BLOCK_SIZE,
3953 .maxauthsize = SHA256_DIGEST_SIZE,
3954 },
3955 .caam = {
3956 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3957 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3958 OP_ALG_AAI_HMAC_PRECOMP,
3959 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3960 .geniv = true,
3961 },
3962 },
3963 {
3964 .aead = {
3965 .base = {
3966 .cra_name = "authenc(hmac(sha384),cbc(des))",
3967 .cra_driver_name = "authenc-hmac-sha384-"
3968 "cbc-des-caam",
3969 .cra_blocksize = DES_BLOCK_SIZE,
3970 },
3971 .setkey = aead_setkey,
3972 .setauthsize = aead_setauthsize,
3973 .encrypt = aead_encrypt,
3974 .decrypt = aead_decrypt,
3975 .ivsize = DES_BLOCK_SIZE,
3976 .maxauthsize = SHA384_DIGEST_SIZE,
3977 },
3978 .caam = {
3979 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3980 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3981 OP_ALG_AAI_HMAC_PRECOMP,
3982 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3983 },
3984 },
3985 {
3986 .aead = {
3987 .base = {
3988 .cra_name = "echainiv(authenc(hmac(sha384),"
3989 "cbc(des)))",
3990 .cra_driver_name = "echainiv-authenc-"
3991 "hmac-sha384-cbc-des-caam",
3992 .cra_blocksize = DES_BLOCK_SIZE,
3993 },
3994 .setkey = aead_setkey,
3995 .setauthsize = aead_setauthsize,
3996 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003997 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003998 .ivsize = DES_BLOCK_SIZE,
3999 .maxauthsize = SHA384_DIGEST_SIZE,
4000 },
4001 .caam = {
4002 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4003 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4004 OP_ALG_AAI_HMAC_PRECOMP,
4005 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4006 .geniv = true,
4007 },
4008 },
4009 {
4010 .aead = {
4011 .base = {
4012 .cra_name = "authenc(hmac(sha512),cbc(des))",
4013 .cra_driver_name = "authenc-hmac-sha512-"
4014 "cbc-des-caam",
4015 .cra_blocksize = DES_BLOCK_SIZE,
4016 },
4017 .setkey = aead_setkey,
4018 .setauthsize = aead_setauthsize,
4019 .encrypt = aead_encrypt,
4020 .decrypt = aead_decrypt,
4021 .ivsize = DES_BLOCK_SIZE,
4022 .maxauthsize = SHA512_DIGEST_SIZE,
4023 },
4024 .caam = {
4025 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4026 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4027 OP_ALG_AAI_HMAC_PRECOMP,
4028 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4029 },
4030 },
4031 {
4032 .aead = {
4033 .base = {
4034 .cra_name = "echainiv(authenc(hmac(sha512),"
4035 "cbc(des)))",
4036 .cra_driver_name = "echainiv-authenc-"
4037 "hmac-sha512-cbc-des-caam",
4038 .cra_blocksize = DES_BLOCK_SIZE,
4039 },
4040 .setkey = aead_setkey,
4041 .setauthsize = aead_setauthsize,
4042 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03004043 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004044 .ivsize = DES_BLOCK_SIZE,
4045 .maxauthsize = SHA512_DIGEST_SIZE,
4046 },
4047 .caam = {
4048 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4049 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4050 OP_ALG_AAI_HMAC_PRECOMP,
4051 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4052 .geniv = true,
4053 },
4054 },
4055 {
4056 .aead = {
4057 .base = {
4058 .cra_name = "authenc(hmac(md5),"
4059 "rfc3686(ctr(aes)))",
4060 .cra_driver_name = "authenc-hmac-md5-"
4061 "rfc3686-ctr-aes-caam",
4062 .cra_blocksize = 1,
4063 },
4064 .setkey = aead_setkey,
4065 .setauthsize = aead_setauthsize,
4066 .encrypt = aead_encrypt,
4067 .decrypt = aead_decrypt,
4068 .ivsize = CTR_RFC3686_IV_SIZE,
4069 .maxauthsize = MD5_DIGEST_SIZE,
4070 },
4071 .caam = {
4072 .class1_alg_type = OP_ALG_ALGSEL_AES |
4073 OP_ALG_AAI_CTR_MOD128,
4074 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
4075 OP_ALG_AAI_HMAC_PRECOMP,
4076 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4077 .rfc3686 = true,
4078 },
4079 },
4080 {
4081 .aead = {
4082 .base = {
4083 .cra_name = "seqiv(authenc("
4084 "hmac(md5),rfc3686(ctr(aes))))",
4085 .cra_driver_name = "seqiv-authenc-hmac-md5-"
4086 "rfc3686-ctr-aes-caam",
4087 .cra_blocksize = 1,
4088 },
4089 .setkey = aead_setkey,
4090 .setauthsize = aead_setauthsize,
4091 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03004092 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004093 .ivsize = CTR_RFC3686_IV_SIZE,
4094 .maxauthsize = MD5_DIGEST_SIZE,
4095 },
4096 .caam = {
4097 .class1_alg_type = OP_ALG_ALGSEL_AES |
4098 OP_ALG_AAI_CTR_MOD128,
4099 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
4100 OP_ALG_AAI_HMAC_PRECOMP,
4101 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4102 .rfc3686 = true,
4103 .geniv = true,
4104 },
4105 },
4106 {
4107 .aead = {
4108 .base = {
4109 .cra_name = "authenc(hmac(sha1),"
4110 "rfc3686(ctr(aes)))",
4111 .cra_driver_name = "authenc-hmac-sha1-"
4112 "rfc3686-ctr-aes-caam",
4113 .cra_blocksize = 1,
4114 },
4115 .setkey = aead_setkey,
4116 .setauthsize = aead_setauthsize,
4117 .encrypt = aead_encrypt,
4118 .decrypt = aead_decrypt,
4119 .ivsize = CTR_RFC3686_IV_SIZE,
4120 .maxauthsize = SHA1_DIGEST_SIZE,
4121 },
4122 .caam = {
4123 .class1_alg_type = OP_ALG_ALGSEL_AES |
4124 OP_ALG_AAI_CTR_MOD128,
4125 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4126 OP_ALG_AAI_HMAC_PRECOMP,
4127 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4128 .rfc3686 = true,
4129 },
4130 },
4131 {
4132 .aead = {
4133 .base = {
4134 .cra_name = "seqiv(authenc("
4135 "hmac(sha1),rfc3686(ctr(aes))))",
4136 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
4137 "rfc3686-ctr-aes-caam",
4138 .cra_blocksize = 1,
4139 },
4140 .setkey = aead_setkey,
4141 .setauthsize = aead_setauthsize,
4142 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03004143 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004144 .ivsize = CTR_RFC3686_IV_SIZE,
4145 .maxauthsize = SHA1_DIGEST_SIZE,
4146 },
4147 .caam = {
4148 .class1_alg_type = OP_ALG_ALGSEL_AES |
4149 OP_ALG_AAI_CTR_MOD128,
4150 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4151 OP_ALG_AAI_HMAC_PRECOMP,
4152 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4153 .rfc3686 = true,
4154 .geniv = true,
4155 },
4156 },
4157 {
4158 .aead = {
4159 .base = {
4160 .cra_name = "authenc(hmac(sha224),"
4161 "rfc3686(ctr(aes)))",
4162 .cra_driver_name = "authenc-hmac-sha224-"
4163 "rfc3686-ctr-aes-caam",
4164 .cra_blocksize = 1,
4165 },
4166 .setkey = aead_setkey,
4167 .setauthsize = aead_setauthsize,
4168 .encrypt = aead_encrypt,
4169 .decrypt = aead_decrypt,
4170 .ivsize = CTR_RFC3686_IV_SIZE,
4171 .maxauthsize = SHA224_DIGEST_SIZE,
4172 },
4173 .caam = {
4174 .class1_alg_type = OP_ALG_ALGSEL_AES |
4175 OP_ALG_AAI_CTR_MOD128,
4176 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4177 OP_ALG_AAI_HMAC_PRECOMP,
4178 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4179 .rfc3686 = true,
4180 },
4181 },
4182 {
4183 .aead = {
4184 .base = {
4185 .cra_name = "seqiv(authenc("
4186 "hmac(sha224),rfc3686(ctr(aes))))",
4187 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
4188 "rfc3686-ctr-aes-caam",
4189 .cra_blocksize = 1,
4190 },
4191 .setkey = aead_setkey,
4192 .setauthsize = aead_setauthsize,
4193 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03004194 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004195 .ivsize = CTR_RFC3686_IV_SIZE,
4196 .maxauthsize = SHA224_DIGEST_SIZE,
4197 },
4198 .caam = {
4199 .class1_alg_type = OP_ALG_ALGSEL_AES |
4200 OP_ALG_AAI_CTR_MOD128,
4201 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4202 OP_ALG_AAI_HMAC_PRECOMP,
4203 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4204 .rfc3686 = true,
4205 .geniv = true,
4206 },
4207 },
4208 {
4209 .aead = {
4210 .base = {
4211 .cra_name = "authenc(hmac(sha256),"
4212 "rfc3686(ctr(aes)))",
4213 .cra_driver_name = "authenc-hmac-sha256-"
4214 "rfc3686-ctr-aes-caam",
4215 .cra_blocksize = 1,
4216 },
4217 .setkey = aead_setkey,
4218 .setauthsize = aead_setauthsize,
4219 .encrypt = aead_encrypt,
4220 .decrypt = aead_decrypt,
4221 .ivsize = CTR_RFC3686_IV_SIZE,
4222 .maxauthsize = SHA256_DIGEST_SIZE,
4223 },
4224 .caam = {
4225 .class1_alg_type = OP_ALG_ALGSEL_AES |
4226 OP_ALG_AAI_CTR_MOD128,
4227 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4228 OP_ALG_AAI_HMAC_PRECOMP,
4229 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4230 .rfc3686 = true,
4231 },
4232 },
4233 {
4234 .aead = {
4235 .base = {
4236 .cra_name = "seqiv(authenc(hmac(sha256),"
4237 "rfc3686(ctr(aes))))",
4238 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
4239 "rfc3686-ctr-aes-caam",
4240 .cra_blocksize = 1,
4241 },
4242 .setkey = aead_setkey,
4243 .setauthsize = aead_setauthsize,
4244 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03004245 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004246 .ivsize = CTR_RFC3686_IV_SIZE,
4247 .maxauthsize = SHA256_DIGEST_SIZE,
4248 },
4249 .caam = {
4250 .class1_alg_type = OP_ALG_ALGSEL_AES |
4251 OP_ALG_AAI_CTR_MOD128,
4252 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4253 OP_ALG_AAI_HMAC_PRECOMP,
4254 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4255 .rfc3686 = true,
4256 .geniv = true,
4257 },
4258 },
4259 {
4260 .aead = {
4261 .base = {
4262 .cra_name = "authenc(hmac(sha384),"
4263 "rfc3686(ctr(aes)))",
4264 .cra_driver_name = "authenc-hmac-sha384-"
4265 "rfc3686-ctr-aes-caam",
4266 .cra_blocksize = 1,
4267 },
4268 .setkey = aead_setkey,
4269 .setauthsize = aead_setauthsize,
4270 .encrypt = aead_encrypt,
4271 .decrypt = aead_decrypt,
4272 .ivsize = CTR_RFC3686_IV_SIZE,
4273 .maxauthsize = SHA384_DIGEST_SIZE,
4274 },
4275 .caam = {
4276 .class1_alg_type = OP_ALG_ALGSEL_AES |
4277 OP_ALG_AAI_CTR_MOD128,
4278 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4279 OP_ALG_AAI_HMAC_PRECOMP,
4280 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4281 .rfc3686 = true,
4282 },
4283 },
4284 {
4285 .aead = {
4286 .base = {
4287 .cra_name = "seqiv(authenc(hmac(sha384),"
4288 "rfc3686(ctr(aes))))",
4289 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
4290 "rfc3686-ctr-aes-caam",
4291 .cra_blocksize = 1,
4292 },
4293 .setkey = aead_setkey,
4294 .setauthsize = aead_setauthsize,
4295 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03004296 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004297 .ivsize = CTR_RFC3686_IV_SIZE,
4298 .maxauthsize = SHA384_DIGEST_SIZE,
4299 },
4300 .caam = {
4301 .class1_alg_type = OP_ALG_ALGSEL_AES |
4302 OP_ALG_AAI_CTR_MOD128,
4303 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4304 OP_ALG_AAI_HMAC_PRECOMP,
4305 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4306 .rfc3686 = true,
4307 .geniv = true,
4308 },
4309 },
4310 {
4311 .aead = {
4312 .base = {
4313 .cra_name = "authenc(hmac(sha512),"
4314 "rfc3686(ctr(aes)))",
4315 .cra_driver_name = "authenc-hmac-sha512-"
4316 "rfc3686-ctr-aes-caam",
4317 .cra_blocksize = 1,
4318 },
4319 .setkey = aead_setkey,
4320 .setauthsize = aead_setauthsize,
4321 .encrypt = aead_encrypt,
4322 .decrypt = aead_decrypt,
4323 .ivsize = CTR_RFC3686_IV_SIZE,
4324 .maxauthsize = SHA512_DIGEST_SIZE,
4325 },
4326 .caam = {
4327 .class1_alg_type = OP_ALG_ALGSEL_AES |
4328 OP_ALG_AAI_CTR_MOD128,
4329 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4330 OP_ALG_AAI_HMAC_PRECOMP,
4331 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4332 .rfc3686 = true,
4333 },
4334 },
4335 {
4336 .aead = {
4337 .base = {
4338 .cra_name = "seqiv(authenc(hmac(sha512),"
4339 "rfc3686(ctr(aes))))",
4340 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
4341 "rfc3686-ctr-aes-caam",
4342 .cra_blocksize = 1,
4343 },
4344 .setkey = aead_setkey,
4345 .setauthsize = aead_setauthsize,
4346 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03004347 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004348 .ivsize = CTR_RFC3686_IV_SIZE,
4349 .maxauthsize = SHA512_DIGEST_SIZE,
4350 },
4351 .caam = {
4352 .class1_alg_type = OP_ALG_ALGSEL_AES |
4353 OP_ALG_AAI_CTR_MOD128,
4354 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4355 OP_ALG_AAI_HMAC_PRECOMP,
4356 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4357 .rfc3686 = true,
4358 .geniv = true,
4359 },
4360 },
Herbert Xuf2147b82015-06-16 13:54:23 +08004361};
4362
4363struct caam_crypto_alg {
4364 struct crypto_alg crypto_alg;
4365 struct list_head entry;
4366 struct caam_alg_entry caam;
4367};
4368
4369static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
4370{
4371 ctx->jrdev = caam_jr_alloc();
4372 if (IS_ERR(ctx->jrdev)) {
4373 pr_err("Job Ring Device allocation for transform failed\n");
4374 return PTR_ERR(ctx->jrdev);
4375 }
4376
4377 /* copy descriptor header template value */
4378 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
4379 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
4380 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
4381
4382 return 0;
4383}
4384
Kim Phillips8e8ec592011-03-13 16:54:26 +08004385static int caam_cra_init(struct crypto_tfm *tfm)
4386{
4387 struct crypto_alg *alg = tfm->__crt_alg;
4388 struct caam_crypto_alg *caam_alg =
4389 container_of(alg, struct caam_crypto_alg, crypto_alg);
4390 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004391
Herbert Xuf2147b82015-06-16 13:54:23 +08004392 return caam_init_common(ctx, &caam_alg->caam);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004393}
4394
Herbert Xuf2147b82015-06-16 13:54:23 +08004395static int caam_aead_init(struct crypto_aead *tfm)
Kim Phillips8e8ec592011-03-13 16:54:26 +08004396{
Herbert Xuf2147b82015-06-16 13:54:23 +08004397 struct aead_alg *alg = crypto_aead_alg(tfm);
4398 struct caam_aead_alg *caam_alg =
4399 container_of(alg, struct caam_aead_alg, aead);
4400 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004401
Herbert Xuf2147b82015-06-16 13:54:23 +08004402 return caam_init_common(ctx, &caam_alg->caam);
4403}
4404
4405static void caam_exit_common(struct caam_ctx *ctx)
4406{
Yuan Kang1acebad2011-07-15 11:21:42 +08004407 if (ctx->sh_desc_enc_dma &&
4408 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
4409 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
4410 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
4411 if (ctx->sh_desc_dec_dma &&
4412 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
4413 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
4414 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
4415 if (ctx->sh_desc_givenc_dma &&
4416 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
4417 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
4418 desc_bytes(ctx->sh_desc_givenc),
Kim Phillips4427b1b2011-05-14 22:08:17 -05004419 DMA_TO_DEVICE);
Horia Geantaec31eed2014-03-14 17:48:30 +02004420 if (ctx->key_dma &&
4421 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
4422 dma_unmap_single(ctx->jrdev, ctx->key_dma,
4423 ctx->enckeylen + ctx->split_key_pad_len,
4424 DMA_TO_DEVICE);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304425
4426 caam_jr_free(ctx->jrdev);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004427}
4428
Herbert Xuf2147b82015-06-16 13:54:23 +08004429static void caam_cra_exit(struct crypto_tfm *tfm)
4430{
4431 caam_exit_common(crypto_tfm_ctx(tfm));
4432}
4433
4434static void caam_aead_exit(struct crypto_aead *tfm)
4435{
4436 caam_exit_common(crypto_aead_ctx(tfm));
4437}
4438
Kim Phillips8e8ec592011-03-13 16:54:26 +08004439static void __exit caam_algapi_exit(void)
4440{
4441
Kim Phillips8e8ec592011-03-13 16:54:26 +08004442 struct caam_crypto_alg *t_alg, *n;
Herbert Xuf2147b82015-06-16 13:54:23 +08004443 int i;
4444
4445 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4446 struct caam_aead_alg *t_alg = driver_aeads + i;
4447
4448 if (t_alg->registered)
4449 crypto_unregister_aead(&t_alg->aead);
4450 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08004451
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304452 if (!alg_list.next)
Kim Phillips8e8ec592011-03-13 16:54:26 +08004453 return;
4454
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304455 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08004456 crypto_unregister_alg(&t_alg->crypto_alg);
4457 list_del(&t_alg->entry);
4458 kfree(t_alg);
4459 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08004460}
4461
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304462static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
Kim Phillips8e8ec592011-03-13 16:54:26 +08004463 *template)
4464{
4465 struct caam_crypto_alg *t_alg;
4466 struct crypto_alg *alg;
4467
Fabio Estevam9c4f9732015-08-21 13:52:00 -03004468 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004469 if (!t_alg) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304470 pr_err("failed to allocate t_alg\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08004471 return ERR_PTR(-ENOMEM);
4472 }
4473
4474 alg = &t_alg->crypto_alg;
4475
4476 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
4477 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4478 template->driver_name);
4479 alg->cra_module = THIS_MODULE;
4480 alg->cra_init = caam_cra_init;
4481 alg->cra_exit = caam_cra_exit;
4482 alg->cra_priority = CAAM_CRA_PRIORITY;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004483 alg->cra_blocksize = template->blocksize;
4484 alg->cra_alignmask = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004485 alg->cra_ctxsize = sizeof(struct caam_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01004486 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
4487 template->type;
Yuan Kang885e9e22011-07-15 11:21:41 +08004488 switch (template->type) {
Catalin Vasile7222d1a2014-10-31 12:45:38 +02004489 case CRYPTO_ALG_TYPE_GIVCIPHER:
4490 alg->cra_type = &crypto_givcipher_type;
4491 alg->cra_ablkcipher = template->template_ablkcipher;
4492 break;
Yuan Kangacdca312011-07-15 11:21:42 +08004493 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4494 alg->cra_type = &crypto_ablkcipher_type;
4495 alg->cra_ablkcipher = template->template_ablkcipher;
4496 break;
Yuan Kang885e9e22011-07-15 11:21:41 +08004497 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08004498
Herbert Xuf2147b82015-06-16 13:54:23 +08004499 t_alg->caam.class1_alg_type = template->class1_alg_type;
4500 t_alg->caam.class2_alg_type = template->class2_alg_type;
4501 t_alg->caam.alg_op = template->alg_op;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004502
4503 return t_alg;
4504}
4505
Herbert Xuf2147b82015-06-16 13:54:23 +08004506static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
4507{
4508 struct aead_alg *alg = &t_alg->aead;
4509
4510 alg->base.cra_module = THIS_MODULE;
4511 alg->base.cra_priority = CAAM_CRA_PRIORITY;
4512 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
Herbert Xu5e4b8c12015-08-13 17:29:06 +08004513 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
Herbert Xuf2147b82015-06-16 13:54:23 +08004514
4515 alg->init = caam_aead_init;
4516 alg->exit = caam_aead_exit;
4517}
4518
Kim Phillips8e8ec592011-03-13 16:54:26 +08004519static int __init caam_algapi_init(void)
4520{
Ruchika Gupta35af6402014-07-07 10:42:12 +05304521 struct device_node *dev_node;
4522 struct platform_device *pdev;
4523 struct device *ctrldev;
Victoria Milhoanbf834902015-08-05 11:28:48 -07004524 struct caam_drv_private *priv;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004525 int i = 0, err = 0;
Victoria Milhoanbf834902015-08-05 11:28:48 -07004526 u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
4527 unsigned int md_limit = SHA512_DIGEST_SIZE;
Herbert Xuf2147b82015-06-16 13:54:23 +08004528 bool registered = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004529
Ruchika Gupta35af6402014-07-07 10:42:12 +05304530 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
4531 if (!dev_node) {
4532 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
4533 if (!dev_node)
4534 return -ENODEV;
4535 }
4536
4537 pdev = of_find_device_by_node(dev_node);
4538 if (!pdev) {
4539 of_node_put(dev_node);
4540 return -ENODEV;
4541 }
4542
4543 ctrldev = &pdev->dev;
4544 priv = dev_get_drvdata(ctrldev);
4545 of_node_put(dev_node);
4546
4547 /*
4548 * If priv is NULL, it's probably because the caam driver wasn't
4549 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4550 */
4551 if (!priv)
4552 return -ENODEV;
4553
4554
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304555 INIT_LIST_HEAD(&alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004556
Victoria Milhoanbf834902015-08-05 11:28:48 -07004557 /*
4558 * Register crypto algorithms the device supports.
4559 * First, detect presence and attributes of DES, AES, and MD blocks.
4560 */
4561 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
4562 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
4563 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
4564 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
4565 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004566
Victoria Milhoanbf834902015-08-05 11:28:48 -07004567 /* If MD is present, limit digest size based on LP256 */
4568 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
4569 md_limit = SHA256_DIGEST_SIZE;
4570
4571 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4572 struct caam_crypto_alg *t_alg;
4573 struct caam_alg_template *alg = driver_algs + i;
4574 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
4575
4576 /* Skip DES algorithms if not supported by device */
4577 if (!des_inst &&
4578 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
4579 (alg_sel == OP_ALG_ALGSEL_DES)))
4580 continue;
4581
4582 /* Skip AES algorithms if not supported by device */
4583 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
4584 continue;
4585
4586 t_alg = caam_alg_alloc(alg);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004587 if (IS_ERR(t_alg)) {
4588 err = PTR_ERR(t_alg);
Victoria Milhoanbf834902015-08-05 11:28:48 -07004589 pr_warn("%s alg allocation failed\n", alg->driver_name);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004590 continue;
4591 }
4592
4593 err = crypto_register_alg(&t_alg->crypto_alg);
4594 if (err) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304595 pr_warn("%s alg registration failed\n",
Kim Phillips8e8ec592011-03-13 16:54:26 +08004596 t_alg->crypto_alg.cra_driver_name);
4597 kfree(t_alg);
Herbert Xuf2147b82015-06-16 13:54:23 +08004598 continue;
4599 }
4600
4601 list_add_tail(&t_alg->entry, &alg_list);
4602 registered = true;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004603 }
Herbert Xuf2147b82015-06-16 13:54:23 +08004604
4605 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4606 struct caam_aead_alg *t_alg = driver_aeads + i;
Victoria Milhoanbf834902015-08-05 11:28:48 -07004607 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
4608 OP_ALG_ALGSEL_MASK;
4609 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
4610 OP_ALG_ALGSEL_MASK;
4611 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
4612
4613 /* Skip DES algorithms if not supported by device */
4614 if (!des_inst &&
4615 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
4616 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
4617 continue;
4618
4619 /* Skip AES algorithms if not supported by device */
4620 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
4621 continue;
4622
4623 /*
4624 * Check support for AES algorithms not available
4625 * on LP devices.
4626 */
4627 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
4628 if (alg_aai == OP_ALG_AAI_GCM)
4629 continue;
4630
4631 /*
4632 * Skip algorithms requiring message digests
4633 * if MD or MD size is not supported by device.
4634 */
4635 if (c2_alg_sel &&
4636 (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
4637 continue;
Herbert Xuf2147b82015-06-16 13:54:23 +08004638
4639 caam_aead_alg_init(t_alg);
4640
4641 err = crypto_register_aead(&t_alg->aead);
4642 if (err) {
4643 pr_warn("%s alg registration failed\n",
4644 t_alg->aead.base.cra_driver_name);
4645 continue;
4646 }
4647
4648 t_alg->registered = true;
4649 registered = true;
4650 }
4651
4652 if (registered)
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304653 pr_info("caam algorithms registered in /proc/crypto\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08004654
4655 return err;
4656}
4657
4658module_init(caam_algapi_init);
4659module_exit(caam_algapi_exit);
4660
4661MODULE_LICENSE("GPL");
4662MODULE_DESCRIPTION("FSL CAAM support for crypto API");
4663MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");