blob: 662fe94cb2f8e876b3bf7343df5709b22e2176a7 [file] [log] [blame]
Kim Phillips8e8ec592011-03-13 16:54:26 +08001/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
Horia Geantă8cea7b62016-11-22 15:44:09 +02005 * Copyright 2016 NXP
Kim Phillips8e8ec592011-03-13 16:54:26 +08006 *
7 * Based on talitos crypto API driver.
8 *
9 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
10 *
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (PDB) |
14 * --------------- |------------->| (hashKey) |
15 * . | | (cipherKey) |
16 * . | |-------->| (operation) |
17 * --------------- | | ---------------
18 * | JobDesc #2 |------| |
19 * | *(packet 2) | |
20 * --------------- |
21 * . |
22 * . |
23 * --------------- |
24 * | JobDesc #3 |------------
25 * | *(packet 3) |
26 * ---------------
27 *
28 * The SharedDesc never changes for a connection unless rekeyed, but
29 * each packet will likely be in a different place. So all we need
30 * to know to process the packet is where the input is, where the
31 * output goes, and what context we want to process with. Context is
32 * in the SharedDesc, packet references in the JobDesc.
33 *
34 * So, a job desc looks like:
35 *
36 * ---------------------
37 * | Header |
38 * | ShareDesc Pointer |
39 * | SEQ_OUT_PTR |
40 * | (output buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050041 * | (output length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080042 * | SEQ_IN_PTR |
43 * | (input buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050044 * | (input length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080045 * ---------------------
46 */
47
48#include "compat.h"
49
50#include "regs.h"
51#include "intern.h"
52#include "desc_constr.h"
53#include "jr.h"
54#include "error.h"
Yuan Kanga299c832012-06-22 19:48:46 -050055#include "sg_sw_sec4.h"
Yuan Kang4c1ec1f2012-06-22 19:48:45 -050056#include "key_gen.h"
Horia Geantă8cea7b62016-11-22 15:44:09 +020057#include "caamalg_desc.h"
Kim Phillips8e8ec592011-03-13 16:54:26 +080058
59/*
60 * crypto alg
61 */
62#define CAAM_CRA_PRIORITY 3000
63/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
64#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
Catalin Vasiledaebc462014-10-31 12:45:37 +020065 CTR_RFC3686_NONCE_SIZE + \
Kim Phillips8e8ec592011-03-13 16:54:26 +080066 SHA512_DIGEST_SIZE * 2)
Kim Phillips8e8ec592011-03-13 16:54:26 +080067
Herbert Xuf2147b82015-06-16 13:54:23 +080068#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
69#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
70 CAAM_CMD_SZ * 4)
Herbert Xu479bcc72015-07-30 17:53:17 +080071#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
72 CAAM_CMD_SZ * 5)
Herbert Xuf2147b82015-06-16 13:54:23 +080073
Herbert Xu87e51b02015-06-18 14:25:55 +080074#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
75#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
Kim Phillips4427b1b2011-05-14 22:08:17 -050076
Kim Phillips8e8ec592011-03-13 16:54:26 +080077#ifdef DEBUG
78/* for print_hex_dumps with line references */
Kim Phillips8e8ec592011-03-13 16:54:26 +080079#define debug(format, arg...) printk(format, arg)
80#else
81#define debug(format, arg...)
82#endif
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +030083
84#ifdef DEBUG
85#include <linux/highmem.h>
86
87static void dbg_dump_sg(const char *level, const char *prefix_str,
88 int prefix_type, int rowsize, int groupsize,
Horia Geantă00fef2b2016-11-09 10:46:16 +020089 struct scatterlist *sg, size_t tlen, bool ascii)
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +030090{
91 struct scatterlist *it;
92 void *it_page;
93 size_t len;
94 void *buf;
95
96 for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
97 /*
98 * make sure the scatterlist's page
99 * has a valid virtual memory mapping
100 */
101 it_page = kmap_atomic(sg_page(it));
102 if (unlikely(!it_page)) {
103 printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
104 return;
105 }
106
107 buf = it_page + it->offset;
Arnd Bergmannd69985a2016-10-25 23:29:10 +0200108 len = min_t(size_t, tlen, it->length);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +0300109 print_hex_dump(level, prefix_str, prefix_type, rowsize,
110 groupsize, buf, len, ascii);
111 tlen -= len;
112
113 kunmap_atomic(it_page);
114 }
115}
116#endif
117
Ruchika Guptacfc6f112013-10-25 12:01:03 +0530118static struct list_head alg_list;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800119
Herbert Xu479bcc72015-07-30 17:53:17 +0800120struct caam_alg_entry {
121 int class1_alg_type;
122 int class2_alg_type;
Herbert Xu479bcc72015-07-30 17:53:17 +0800123 bool rfc3686;
124 bool geniv;
125};
126
127struct caam_aead_alg {
128 struct aead_alg aead;
129 struct caam_alg_entry caam;
130 bool registered;
131};
132
Yuan Kangacdca312011-07-15 11:21:42 +0800133/*
Kim Phillips8e8ec592011-03-13 16:54:26 +0800134 * per-session context
135 */
136struct caam_ctx {
137 struct device *jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +0800138 u32 sh_desc_enc[DESC_MAX_USED_LEN];
139 u32 sh_desc_dec[DESC_MAX_USED_LEN];
140 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
141 dma_addr_t sh_desc_enc_dma;
142 dma_addr_t sh_desc_dec_dma;
143 dma_addr_t sh_desc_givenc_dma;
Yuan Kang1acebad2011-07-15 11:21:42 +0800144 u8 key[CAAM_MAX_KEY_SIZE];
Yuan Kang885e9e22011-07-15 11:21:41 +0800145 dma_addr_t key_dma;
Horia Geantădb576562016-11-22 15:44:04 +0200146 struct alginfo adata;
147 struct alginfo cdata;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800148 unsigned int authsize;
149};
150
Horia Geantaae4a8252014-03-14 17:46:52 +0200151static int aead_null_set_sh_desc(struct crypto_aead *aead)
152{
Horia Geantaae4a8252014-03-14 17:46:52 +0200153 struct caam_ctx *ctx = crypto_aead_ctx(aead);
154 struct device *jrdev = ctx->jrdev;
Horia Geantaae4a8252014-03-14 17:46:52 +0200155 u32 *desc;
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200156 int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
157 ctx->adata.keylen_pad;
Horia Geantaae4a8252014-03-14 17:46:52 +0200158
159 /*
160 * Job Descriptor and Shared Descriptors
161 * must all fit into the 64-word Descriptor h/w Buffer
162 */
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200163 if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
Horia Geantădb576562016-11-22 15:44:04 +0200164 ctx->adata.key_inline = true;
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100165 ctx->adata.key_virt = ctx->key;
Horia Geantădb576562016-11-22 15:44:04 +0200166 } else {
167 ctx->adata.key_inline = false;
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100168 ctx->adata.key_dma = ctx->key_dma;
Horia Geantădb576562016-11-22 15:44:04 +0200169 }
Horia Geantaae4a8252014-03-14 17:46:52 +0200170
Herbert Xu479bcc72015-07-30 17:53:17 +0800171 /* aead_encrypt shared descriptor */
Horia Geantaae4a8252014-03-14 17:46:52 +0200172 desc = ctx->sh_desc_enc;
Horia Geantă8cea7b62016-11-22 15:44:09 +0200173 cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
Horia Geantaae4a8252014-03-14 17:46:52 +0200174 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
175 desc_bytes(desc),
176 DMA_TO_DEVICE);
177 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
178 dev_err(jrdev, "unable to map shared descriptor\n");
179 return -ENOMEM;
180 }
Horia Geantaae4a8252014-03-14 17:46:52 +0200181
182 /*
183 * Job Descriptor and Shared Descriptors
184 * must all fit into the 64-word Descriptor h/w Buffer
185 */
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200186 if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
Horia Geantădb576562016-11-22 15:44:04 +0200187 ctx->adata.key_inline = true;
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100188 ctx->adata.key_virt = ctx->key;
Horia Geantădb576562016-11-22 15:44:04 +0200189 } else {
190 ctx->adata.key_inline = false;
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100191 ctx->adata.key_dma = ctx->key_dma;
Horia Geantădb576562016-11-22 15:44:04 +0200192 }
Horia Geantaae4a8252014-03-14 17:46:52 +0200193
Herbert Xu479bcc72015-07-30 17:53:17 +0800194 /* aead_decrypt shared descriptor */
Horia Geantă8cea7b62016-11-22 15:44:09 +0200195 desc = ctx->sh_desc_dec;
196 cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
Horia Geantaae4a8252014-03-14 17:46:52 +0200197 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
198 desc_bytes(desc),
199 DMA_TO_DEVICE);
200 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
201 dev_err(jrdev, "unable to map shared descriptor\n");
202 return -ENOMEM;
203 }
Horia Geantaae4a8252014-03-14 17:46:52 +0200204
205 return 0;
206}
207
Yuan Kang1acebad2011-07-15 11:21:42 +0800208static int aead_set_sh_desc(struct crypto_aead *aead)
209{
Herbert Xu479bcc72015-07-30 17:53:17 +0800210 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
211 struct caam_aead_alg, aead);
Herbert Xuadd86d52015-05-11 17:47:50 +0800212 unsigned int ivsize = crypto_aead_ivsize(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +0800213 struct caam_ctx *ctx = crypto_aead_ctx(aead);
214 struct device *jrdev = ctx->jrdev;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200215 u32 ctx1_iv_off = 0;
Horia Geantă8cea7b62016-11-22 15:44:09 +0200216 u32 *desc, *nonce = NULL;
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200217 u32 inl_mask;
218 unsigned int data_len[2];
Horia Geantădb576562016-11-22 15:44:04 +0200219 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
Catalin Vasiledaebc462014-10-31 12:45:37 +0200220 OP_ALG_AAI_CTR_MOD128);
Herbert Xu479bcc72015-07-30 17:53:17 +0800221 const bool is_rfc3686 = alg->caam.rfc3686;
Yuan Kang1acebad2011-07-15 11:21:42 +0800222
Horia Geantă2fdea252016-08-04 20:02:47 +0300223 if (!ctx->authsize)
224 return 0;
225
Horia Geantaae4a8252014-03-14 17:46:52 +0200226 /* NULL encryption / decryption */
Horia Geantădb576562016-11-22 15:44:04 +0200227 if (!ctx->cdata.keylen)
Horia Geantaae4a8252014-03-14 17:46:52 +0200228 return aead_null_set_sh_desc(aead);
229
Yuan Kang1acebad2011-07-15 11:21:42 +0800230 /*
Catalin Vasiledaebc462014-10-31 12:45:37 +0200231 * AES-CTR needs to load IV in CONTEXT1 reg
232 * at an offset of 128bits (16bytes)
233 * CONTEXT1[255:128] = IV
234 */
235 if (ctr_mode)
236 ctx1_iv_off = 16;
237
238 /*
239 * RFC3686 specific:
240 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
241 */
Horia Geantă8cea7b62016-11-22 15:44:09 +0200242 if (is_rfc3686) {
Catalin Vasiledaebc462014-10-31 12:45:37 +0200243 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
Horia Geantă8cea7b62016-11-22 15:44:09 +0200244 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
245 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
246 }
Catalin Vasiledaebc462014-10-31 12:45:37 +0200247
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200248 data_len[0] = ctx->adata.keylen_pad;
249 data_len[1] = ctx->cdata.keylen;
250
Herbert Xu479bcc72015-07-30 17:53:17 +0800251 if (alg->caam.geniv)
252 goto skip_enc;
253
Catalin Vasiledaebc462014-10-31 12:45:37 +0200254 /*
Yuan Kang1acebad2011-07-15 11:21:42 +0800255 * Job Descriptor and Shared Descriptors
256 * must all fit into the 64-word Descriptor h/w Buffer
257 */
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200258 if (desc_inline_query(DESC_AEAD_ENC_LEN +
259 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
260 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
261 ARRAY_SIZE(data_len)) < 0)
262 return -EINVAL;
263
264 if (inl_mask & 1)
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100265 ctx->adata.key_virt = ctx->key;
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200266 else
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100267 ctx->adata.key_dma = ctx->key_dma;
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200268
269 if (inl_mask & 2)
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100270 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200271 else
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100272 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200273
274 ctx->adata.key_inline = !!(inl_mask & 1);
275 ctx->cdata.key_inline = !!(inl_mask & 2);
Yuan Kang1acebad2011-07-15 11:21:42 +0800276
Herbert Xu479bcc72015-07-30 17:53:17 +0800277 /* aead_encrypt shared descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +0800278 desc = ctx->sh_desc_enc;
Horia Geantă8cea7b62016-11-22 15:44:09 +0200279 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ctx->authsize,
280 is_rfc3686, nonce, ctx1_iv_off);
Yuan Kang1acebad2011-07-15 11:21:42 +0800281 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
282 desc_bytes(desc),
283 DMA_TO_DEVICE);
284 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
285 dev_err(jrdev, "unable to map shared descriptor\n");
286 return -ENOMEM;
287 }
Yuan Kang1acebad2011-07-15 11:21:42 +0800288
Herbert Xu479bcc72015-07-30 17:53:17 +0800289skip_enc:
Yuan Kang1acebad2011-07-15 11:21:42 +0800290 /*
291 * Job Descriptor and Shared Descriptors
292 * must all fit into the 64-word Descriptor h/w Buffer
293 */
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200294 if (desc_inline_query(DESC_AEAD_DEC_LEN +
295 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
296 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
297 ARRAY_SIZE(data_len)) < 0)
298 return -EINVAL;
299
300 if (inl_mask & 1)
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100301 ctx->adata.key_virt = ctx->key;
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200302 else
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100303 ctx->adata.key_dma = ctx->key_dma;
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200304
305 if (inl_mask & 2)
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100306 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200307 else
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100308 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200309
310 ctx->adata.key_inline = !!(inl_mask & 1);
311 ctx->cdata.key_inline = !!(inl_mask & 2);
Yuan Kang1acebad2011-07-15 11:21:42 +0800312
Herbert Xu479bcc72015-07-30 17:53:17 +0800313 /* aead_decrypt shared descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +0800314 desc = ctx->sh_desc_dec;
Horia Geantă8cea7b62016-11-22 15:44:09 +0200315 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
316 ctx->authsize, alg->caam.geniv, is_rfc3686,
317 nonce, ctx1_iv_off);
Yuan Kang1acebad2011-07-15 11:21:42 +0800318 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
319 desc_bytes(desc),
320 DMA_TO_DEVICE);
321 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
322 dev_err(jrdev, "unable to map shared descriptor\n");
323 return -ENOMEM;
324 }
Yuan Kang1acebad2011-07-15 11:21:42 +0800325
Herbert Xu479bcc72015-07-30 17:53:17 +0800326 if (!alg->caam.geniv)
327 goto skip_givenc;
328
Yuan Kang1acebad2011-07-15 11:21:42 +0800329 /*
330 * Job Descriptor and Shared Descriptors
331 * must all fit into the 64-word Descriptor h/w Buffer
332 */
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200333 if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
334 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
335 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
336 ARRAY_SIZE(data_len)) < 0)
337 return -EINVAL;
338
339 if (inl_mask & 1)
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100340 ctx->adata.key_virt = ctx->key;
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200341 else
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100342 ctx->adata.key_dma = ctx->key_dma;
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200343
344 if (inl_mask & 2)
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100345 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200346 else
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100347 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200348
349 ctx->adata.key_inline = !!(inl_mask & 1);
350 ctx->cdata.key_inline = !!(inl_mask & 2);
Yuan Kang1acebad2011-07-15 11:21:42 +0800351
352 /* aead_givencrypt shared descriptor */
Horia Geantă1d2d87e2016-08-04 20:02:46 +0300353 desc = ctx->sh_desc_enc;
Horia Geantă8cea7b62016-11-22 15:44:09 +0200354 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
355 ctx->authsize, is_rfc3686, nonce,
356 ctx1_iv_off);
Herbert Xu479bcc72015-07-30 17:53:17 +0800357 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
358 desc_bytes(desc),
359 DMA_TO_DEVICE);
Horia Geantă1d2d87e2016-08-04 20:02:46 +0300360 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
Yuan Kang1acebad2011-07-15 11:21:42 +0800361 dev_err(jrdev, "unable to map shared descriptor\n");
362 return -ENOMEM;
363 }
Yuan Kang1acebad2011-07-15 11:21:42 +0800364
Herbert Xu479bcc72015-07-30 17:53:17 +0800365skip_givenc:
Yuan Kang1acebad2011-07-15 11:21:42 +0800366 return 0;
367}
368
Yuan Kang0e479302011-07-15 11:21:41 +0800369static int aead_setauthsize(struct crypto_aead *authenc,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800370 unsigned int authsize)
371{
372 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
373
374 ctx->authsize = authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +0800375 aead_set_sh_desc(authenc);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800376
377 return 0;
378}
379
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300380static int gcm_set_sh_desc(struct crypto_aead *aead)
381{
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300382 struct caam_ctx *ctx = crypto_aead_ctx(aead);
383 struct device *jrdev = ctx->jrdev;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300384 u32 *desc;
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200385 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
386 ctx->cdata.keylen;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300387
Horia Geantădb576562016-11-22 15:44:04 +0200388 if (!ctx->cdata.keylen || !ctx->authsize)
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300389 return 0;
390
391 /*
392 * AES GCM encrypt shared descriptor
393 * Job Descriptor and Shared Descriptor
394 * must fit into the 64-word Descriptor h/w Buffer
395 */
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200396 if (rem_bytes >= DESC_GCM_ENC_LEN) {
Horia Geantădb576562016-11-22 15:44:04 +0200397 ctx->cdata.key_inline = true;
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100398 ctx->cdata.key_virt = ctx->key;
Horia Geantădb576562016-11-22 15:44:04 +0200399 } else {
400 ctx->cdata.key_inline = false;
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100401 ctx->cdata.key_dma = ctx->key_dma;
Horia Geantădb576562016-11-22 15:44:04 +0200402 }
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300403
404 desc = ctx->sh_desc_enc;
Horia Geantă8cea7b62016-11-22 15:44:09 +0200405 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300406 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
407 desc_bytes(desc),
408 DMA_TO_DEVICE);
409 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
410 dev_err(jrdev, "unable to map shared descriptor\n");
411 return -ENOMEM;
412 }
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300413
414 /*
415 * Job Descriptor and Shared Descriptors
416 * must all fit into the 64-word Descriptor h/w Buffer
417 */
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200418 if (rem_bytes >= DESC_GCM_DEC_LEN) {
Horia Geantădb576562016-11-22 15:44:04 +0200419 ctx->cdata.key_inline = true;
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100420 ctx->cdata.key_virt = ctx->key;
Horia Geantădb576562016-11-22 15:44:04 +0200421 } else {
422 ctx->cdata.key_inline = false;
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100423 ctx->cdata.key_dma = ctx->key_dma;
Horia Geantădb576562016-11-22 15:44:04 +0200424 }
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300425
426 desc = ctx->sh_desc_dec;
Horia Geantă8cea7b62016-11-22 15:44:09 +0200427 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300428 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
429 desc_bytes(desc),
430 DMA_TO_DEVICE);
431 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
432 dev_err(jrdev, "unable to map shared descriptor\n");
433 return -ENOMEM;
434 }
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300435
436 return 0;
437}
438
439static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
440{
441 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
442
443 ctx->authsize = authsize;
444 gcm_set_sh_desc(authenc);
445
446 return 0;
447}
448
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300449static int rfc4106_set_sh_desc(struct crypto_aead *aead)
450{
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300451 struct caam_ctx *ctx = crypto_aead_ctx(aead);
452 struct device *jrdev = ctx->jrdev;
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300453 u32 *desc;
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200454 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
455 ctx->cdata.keylen;
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300456
Horia Geantădb576562016-11-22 15:44:04 +0200457 if (!ctx->cdata.keylen || !ctx->authsize)
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300458 return 0;
459
460 /*
461 * RFC4106 encrypt shared descriptor
462 * Job Descriptor and Shared Descriptor
463 * must fit into the 64-word Descriptor h/w Buffer
464 */
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200465 if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
Horia Geantădb576562016-11-22 15:44:04 +0200466 ctx->cdata.key_inline = true;
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100467 ctx->cdata.key_virt = ctx->key;
Horia Geantădb576562016-11-22 15:44:04 +0200468 } else {
469 ctx->cdata.key_inline = false;
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100470 ctx->cdata.key_dma = ctx->key_dma;
Horia Geantădb576562016-11-22 15:44:04 +0200471 }
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300472
473 desc = ctx->sh_desc_enc;
Horia Geantă8cea7b62016-11-22 15:44:09 +0200474 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300475 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
476 desc_bytes(desc),
477 DMA_TO_DEVICE);
478 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
479 dev_err(jrdev, "unable to map shared descriptor\n");
480 return -ENOMEM;
481 }
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300482
483 /*
484 * Job Descriptor and Shared Descriptors
485 * must all fit into the 64-word Descriptor h/w Buffer
486 */
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200487 if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
Horia Geantădb576562016-11-22 15:44:04 +0200488 ctx->cdata.key_inline = true;
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100489 ctx->cdata.key_virt = ctx->key;
Horia Geantădb576562016-11-22 15:44:04 +0200490 } else {
491 ctx->cdata.key_inline = false;
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100492 ctx->cdata.key_dma = ctx->key_dma;
Horia Geantădb576562016-11-22 15:44:04 +0200493 }
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300494
495 desc = ctx->sh_desc_dec;
Horia Geantă8cea7b62016-11-22 15:44:09 +0200496 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300497 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
498 desc_bytes(desc),
499 DMA_TO_DEVICE);
500 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
501 dev_err(jrdev, "unable to map shared descriptor\n");
502 return -ENOMEM;
503 }
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300504
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300505 return 0;
506}
507
508static int rfc4106_setauthsize(struct crypto_aead *authenc,
509 unsigned int authsize)
510{
511 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
512
513 ctx->authsize = authsize;
514 rfc4106_set_sh_desc(authenc);
515
516 return 0;
517}
518
Tudor Ambarus5d0429a2014-10-30 18:55:07 +0200519static int rfc4543_set_sh_desc(struct crypto_aead *aead)
520{
Tudor Ambarus5d0429a2014-10-30 18:55:07 +0200521 struct caam_ctx *ctx = crypto_aead_ctx(aead);
522 struct device *jrdev = ctx->jrdev;
Tudor Ambarus5d0429a2014-10-30 18:55:07 +0200523 u32 *desc;
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200524 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
525 ctx->cdata.keylen;
Tudor Ambarus5d0429a2014-10-30 18:55:07 +0200526
Horia Geantădb576562016-11-22 15:44:04 +0200527 if (!ctx->cdata.keylen || !ctx->authsize)
Tudor Ambarus5d0429a2014-10-30 18:55:07 +0200528 return 0;
529
530 /*
531 * RFC4543 encrypt shared descriptor
532 * Job Descriptor and Shared Descriptor
533 * must fit into the 64-word Descriptor h/w Buffer
534 */
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200535 if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
Horia Geantădb576562016-11-22 15:44:04 +0200536 ctx->cdata.key_inline = true;
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100537 ctx->cdata.key_virt = ctx->key;
Horia Geantădb576562016-11-22 15:44:04 +0200538 } else {
539 ctx->cdata.key_inline = false;
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100540 ctx->cdata.key_dma = ctx->key_dma;
Horia Geantădb576562016-11-22 15:44:04 +0200541 }
Tudor Ambarus5d0429a2014-10-30 18:55:07 +0200542
543 desc = ctx->sh_desc_enc;
Horia Geantă8cea7b62016-11-22 15:44:09 +0200544 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +0200545 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
546 desc_bytes(desc),
547 DMA_TO_DEVICE);
548 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
549 dev_err(jrdev, "unable to map shared descriptor\n");
550 return -ENOMEM;
551 }
Tudor Ambarus5d0429a2014-10-30 18:55:07 +0200552
553 /*
554 * Job Descriptor and Shared Descriptors
555 * must all fit into the 64-word Descriptor h/w Buffer
556 */
Horia Geantă4cbe79c2016-11-22 15:44:06 +0200557 if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
Horia Geantădb576562016-11-22 15:44:04 +0200558 ctx->cdata.key_inline = true;
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100559 ctx->cdata.key_virt = ctx->key;
Horia Geantădb576562016-11-22 15:44:04 +0200560 } else {
561 ctx->cdata.key_inline = false;
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100562 ctx->cdata.key_dma = ctx->key_dma;
Horia Geantădb576562016-11-22 15:44:04 +0200563 }
Tudor Ambarus5d0429a2014-10-30 18:55:07 +0200564
565 desc = ctx->sh_desc_dec;
Horia Geantă8cea7b62016-11-22 15:44:09 +0200566 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +0200567 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
568 desc_bytes(desc),
569 DMA_TO_DEVICE);
570 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
571 dev_err(jrdev, "unable to map shared descriptor\n");
572 return -ENOMEM;
573 }
Tudor Ambarus5d0429a2014-10-30 18:55:07 +0200574
Tudor Ambarus5d0429a2014-10-30 18:55:07 +0200575 return 0;
576}
577
578static int rfc4543_setauthsize(struct crypto_aead *authenc,
579 unsigned int authsize)
580{
581 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
582
583 ctx->authsize = authsize;
584 rfc4543_set_sh_desc(authenc);
585
586 return 0;
587}
588
Yuan Kang0e479302011-07-15 11:21:41 +0800589static int aead_setkey(struct crypto_aead *aead,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800590 const u8 *key, unsigned int keylen)
591{
Kim Phillips8e8ec592011-03-13 16:54:26 +0800592 struct caam_ctx *ctx = crypto_aead_ctx(aead);
593 struct device *jrdev = ctx->jrdev;
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200594 struct crypto_authenc_keys keys;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800595 int ret = 0;
596
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200597 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips8e8ec592011-03-13 16:54:26 +0800598 goto badkey;
599
Kim Phillips8e8ec592011-03-13 16:54:26 +0800600#ifdef DEBUG
601 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200602 keys.authkeylen + keys.enckeylen, keys.enckeylen,
603 keys.authkeylen);
Alex Porosanu514df282013-08-14 18:56:45 +0300604 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +0800605 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
606#endif
Kim Phillips8e8ec592011-03-13 16:54:26 +0800607
Horia Geantă6655cb82016-11-22 15:44:10 +0200608 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
609 keys.authkeylen, CAAM_MAX_KEY_SIZE -
610 keys.enckeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800611 if (ret) {
Kim Phillips8e8ec592011-03-13 16:54:26 +0800612 goto badkey;
613 }
614
615 /* postpend encryption key to auth split key */
Horia Geantădb576562016-11-22 15:44:04 +0200616 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800617
Horia Geantădb576562016-11-22 15:44:04 +0200618 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad +
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200619 keys.enckeylen, DMA_TO_DEVICE);
Yuan Kang885e9e22011-07-15 11:21:41 +0800620 if (dma_mapping_error(jrdev, ctx->key_dma)) {
Kim Phillips8e8ec592011-03-13 16:54:26 +0800621 dev_err(jrdev, "unable to map key i/o memory\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +0800622 return -ENOMEM;
623 }
624#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300625 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +0800626 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
Horia Geantădb576562016-11-22 15:44:04 +0200627 ctx->adata.keylen_pad + keys.enckeylen, 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800628#endif
629
Horia Geantădb576562016-11-22 15:44:04 +0200630 ctx->cdata.keylen = keys.enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800631
Yuan Kang1acebad2011-07-15 11:21:42 +0800632 ret = aead_set_sh_desc(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800633 if (ret) {
Horia Geantădb576562016-11-22 15:44:04 +0200634 dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200635 keys.enckeylen, DMA_TO_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800636 }
637
638 return ret;
639badkey:
640 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
641 return -EINVAL;
642}
643
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300644static int gcm_setkey(struct crypto_aead *aead,
645 const u8 *key, unsigned int keylen)
646{
647 struct caam_ctx *ctx = crypto_aead_ctx(aead);
648 struct device *jrdev = ctx->jrdev;
649 int ret = 0;
650
651#ifdef DEBUG
652 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
653 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
654#endif
655
656 memcpy(ctx->key, key, keylen);
657 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
658 DMA_TO_DEVICE);
659 if (dma_mapping_error(jrdev, ctx->key_dma)) {
660 dev_err(jrdev, "unable to map key i/o memory\n");
661 return -ENOMEM;
662 }
Horia Geantădb576562016-11-22 15:44:04 +0200663 ctx->cdata.keylen = keylen;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300664
665 ret = gcm_set_sh_desc(aead);
666 if (ret) {
Horia Geantădb576562016-11-22 15:44:04 +0200667 dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300668 DMA_TO_DEVICE);
669 }
670
671 return ret;
672}
673
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300674static int rfc4106_setkey(struct crypto_aead *aead,
675 const u8 *key, unsigned int keylen)
676{
677 struct caam_ctx *ctx = crypto_aead_ctx(aead);
678 struct device *jrdev = ctx->jrdev;
679 int ret = 0;
680
681 if (keylen < 4)
682 return -EINVAL;
683
684#ifdef DEBUG
685 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
686 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
687#endif
688
689 memcpy(ctx->key, key, keylen);
690
691 /*
692 * The last four bytes of the key material are used as the salt value
693 * in the nonce. Update the AES key length.
694 */
Horia Geantădb576562016-11-22 15:44:04 +0200695 ctx->cdata.keylen = keylen - 4;
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300696
Horia Geantădb576562016-11-22 15:44:04 +0200697 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen,
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300698 DMA_TO_DEVICE);
699 if (dma_mapping_error(jrdev, ctx->key_dma)) {
700 dev_err(jrdev, "unable to map key i/o memory\n");
701 return -ENOMEM;
702 }
703
704 ret = rfc4106_set_sh_desc(aead);
705 if (ret) {
Horia Geantădb576562016-11-22 15:44:04 +0200706 dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300707 DMA_TO_DEVICE);
708 }
709
710 return ret;
711}
712
Tudor Ambarus5d0429a2014-10-30 18:55:07 +0200713static int rfc4543_setkey(struct crypto_aead *aead,
714 const u8 *key, unsigned int keylen)
715{
716 struct caam_ctx *ctx = crypto_aead_ctx(aead);
717 struct device *jrdev = ctx->jrdev;
718 int ret = 0;
719
720 if (keylen < 4)
721 return -EINVAL;
722
723#ifdef DEBUG
724 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
725 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
726#endif
727
728 memcpy(ctx->key, key, keylen);
729
730 /*
731 * The last four bytes of the key material are used as the salt value
732 * in the nonce. Update the AES key length.
733 */
Horia Geantădb576562016-11-22 15:44:04 +0200734 ctx->cdata.keylen = keylen - 4;
Tudor Ambarus5d0429a2014-10-30 18:55:07 +0200735
Horia Geantădb576562016-11-22 15:44:04 +0200736 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen,
Tudor Ambarus5d0429a2014-10-30 18:55:07 +0200737 DMA_TO_DEVICE);
738 if (dma_mapping_error(jrdev, ctx->key_dma)) {
739 dev_err(jrdev, "unable to map key i/o memory\n");
740 return -ENOMEM;
741 }
742
743 ret = rfc4543_set_sh_desc(aead);
744 if (ret) {
Horia Geantădb576562016-11-22 15:44:04 +0200745 dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
Tudor Ambarus5d0429a2014-10-30 18:55:07 +0200746 DMA_TO_DEVICE);
747 }
748
749 return ret;
750}
751
Yuan Kangacdca312011-07-15 11:21:42 +0800752static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
753 const u8 *key, unsigned int keylen)
754{
755 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +0200756 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
757 const char *alg_name = crypto_tfm_alg_name(tfm);
Yuan Kangacdca312011-07-15 11:21:42 +0800758 struct device *jrdev = ctx->jrdev;
Horia Geantă8cea7b62016-11-22 15:44:09 +0200759 unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Yuan Kangacdca312011-07-15 11:21:42 +0800760 u32 *desc;
Catalin Vasile2b22f6c2014-10-31 12:45:35 +0200761 u32 ctx1_iv_off = 0;
Horia Geantădb576562016-11-22 15:44:04 +0200762 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
Catalin Vasile2b22f6c2014-10-31 12:45:35 +0200763 OP_ALG_AAI_CTR_MOD128);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +0200764 const bool is_rfc3686 = (ctr_mode &&
765 (strstr(alg_name, "rfc3686") != NULL));
Yuan Kangacdca312011-07-15 11:21:42 +0800766
Horia Geantă8cea7b62016-11-22 15:44:09 +0200767 memcpy(ctx->key, key, keylen);
Yuan Kangacdca312011-07-15 11:21:42 +0800768#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300769 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +0800770 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
771#endif
Catalin Vasile2b22f6c2014-10-31 12:45:35 +0200772 /*
773 * AES-CTR needs to load IV in CONTEXT1 reg
774 * at an offset of 128bits (16bytes)
775 * CONTEXT1[255:128] = IV
776 */
777 if (ctr_mode)
778 ctx1_iv_off = 16;
Yuan Kangacdca312011-07-15 11:21:42 +0800779
Catalin Vasilea5f57cf2014-10-31 12:45:36 +0200780 /*
781 * RFC3686 specific:
782 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
783 * | *key = {KEY, NONCE}
784 */
785 if (is_rfc3686) {
786 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
787 keylen -= CTR_RFC3686_NONCE_SIZE;
788 }
789
Yuan Kangacdca312011-07-15 11:21:42 +0800790 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
791 DMA_TO_DEVICE);
792 if (dma_mapping_error(jrdev, ctx->key_dma)) {
793 dev_err(jrdev, "unable to map key i/o memory\n");
794 return -ENOMEM;
795 }
Horia Geantădb576562016-11-22 15:44:04 +0200796 ctx->cdata.keylen = keylen;
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100797 ctx->cdata.key_virt = ctx->key;
Horia Geantădb576562016-11-22 15:44:04 +0200798 ctx->cdata.key_inline = true;
Yuan Kangacdca312011-07-15 11:21:42 +0800799
800 /* ablkcipher_encrypt shared descriptor */
801 desc = ctx->sh_desc_enc;
Horia Geantă8cea7b62016-11-22 15:44:09 +0200802 cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
803 ctx1_iv_off);
Yuan Kangacdca312011-07-15 11:21:42 +0800804 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
805 desc_bytes(desc),
806 DMA_TO_DEVICE);
807 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
808 dev_err(jrdev, "unable to map shared descriptor\n");
809 return -ENOMEM;
810 }
Horia Geantă8cea7b62016-11-22 15:44:09 +0200811
Yuan Kangacdca312011-07-15 11:21:42 +0800812 /* ablkcipher_decrypt shared descriptor */
813 desc = ctx->sh_desc_dec;
Horia Geantă8cea7b62016-11-22 15:44:09 +0200814 cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
815 ctx1_iv_off);
Yuan Kangacdca312011-07-15 11:21:42 +0800816 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
817 desc_bytes(desc),
818 DMA_TO_DEVICE);
Horia Geanta71c65f72014-07-11 15:34:48 +0300819 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
Yuan Kangacdca312011-07-15 11:21:42 +0800820 dev_err(jrdev, "unable to map shared descriptor\n");
821 return -ENOMEM;
822 }
823
Catalin Vasile7222d1a2014-10-31 12:45:38 +0200824 /* ablkcipher_givencrypt shared descriptor */
825 desc = ctx->sh_desc_givenc;
Horia Geantă8cea7b62016-11-22 15:44:09 +0200826 cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
827 ctx1_iv_off);
Catalin Vasile7222d1a2014-10-31 12:45:38 +0200828 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
829 desc_bytes(desc),
830 DMA_TO_DEVICE);
831 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
832 dev_err(jrdev, "unable to map shared descriptor\n");
833 return -ENOMEM;
834 }
Yuan Kangacdca312011-07-15 11:21:42 +0800835
Horia Geantă8cea7b62016-11-22 15:44:09 +0200836 return 0;
Yuan Kangacdca312011-07-15 11:21:42 +0800837}
838
Catalin Vasilec6415a62015-10-02 13:13:18 +0300839static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
840 const u8 *key, unsigned int keylen)
841{
842 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
843 struct device *jrdev = ctx->jrdev;
Horia Geantă8cea7b62016-11-22 15:44:09 +0200844 u32 *desc;
Catalin Vasilec6415a62015-10-02 13:13:18 +0300845
846 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
847 crypto_ablkcipher_set_flags(ablkcipher,
848 CRYPTO_TFM_RES_BAD_KEY_LEN);
849 dev_err(jrdev, "key size mismatch\n");
850 return -EINVAL;
851 }
852
853 memcpy(ctx->key, key, keylen);
854 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
855 if (dma_mapping_error(jrdev, ctx->key_dma)) {
856 dev_err(jrdev, "unable to map key i/o memory\n");
857 return -ENOMEM;
858 }
Horia Geantădb576562016-11-22 15:44:04 +0200859 ctx->cdata.keylen = keylen;
Arnd Bergmann9c0bc5112016-11-30 22:01:59 +0100860 ctx->cdata.key_virt = ctx->key;
Horia Geantădb576562016-11-22 15:44:04 +0200861 ctx->cdata.key_inline = true;
Catalin Vasilec6415a62015-10-02 13:13:18 +0300862
863 /* xts_ablkcipher_encrypt shared descriptor */
864 desc = ctx->sh_desc_enc;
Horia Geantă8cea7b62016-11-22 15:44:09 +0200865 cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
Catalin Vasilec6415a62015-10-02 13:13:18 +0300866 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
867 DMA_TO_DEVICE);
868 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
869 dev_err(jrdev, "unable to map shared descriptor\n");
870 return -ENOMEM;
871 }
Catalin Vasilec6415a62015-10-02 13:13:18 +0300872
873 /* xts_ablkcipher_decrypt shared descriptor */
874 desc = ctx->sh_desc_dec;
Horia Geantă8cea7b62016-11-22 15:44:09 +0200875 cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
Catalin Vasilec6415a62015-10-02 13:13:18 +0300876 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
877 DMA_TO_DEVICE);
878 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
879 dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
880 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
881 dev_err(jrdev, "unable to map shared descriptor\n");
882 return -ENOMEM;
883 }
Catalin Vasilec6415a62015-10-02 13:13:18 +0300884
885 return 0;
886}
887
Kim Phillips8e8ec592011-03-13 16:54:26 +0800888/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800889 * aead_edesc - s/w-extended aead descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +0800890 * @src_nents: number of segments in input scatterlist
891 * @dst_nents: number of segments in output scatterlist
Yuan Kanga299c832012-06-22 19:48:46 -0500892 * @sec4_sg_bytes: length of dma mapped sec4_sg space
893 * @sec4_sg_dma: bus physical mapped address of h/w link table
Horia Geantă4ca7c7d2016-11-09 10:46:18 +0200894 * @sec4_sg: pointer to h/w link table
Kim Phillips8e8ec592011-03-13 16:54:26 +0800895 * @hw_desc: the h/w job descriptor followed by any referenced link tables
896 */
Yuan Kang0e479302011-07-15 11:21:41 +0800897struct aead_edesc {
Kim Phillips8e8ec592011-03-13 16:54:26 +0800898 int src_nents;
899 int dst_nents;
Yuan Kanga299c832012-06-22 19:48:46 -0500900 int sec4_sg_bytes;
901 dma_addr_t sec4_sg_dma;
902 struct sec4_sg_entry *sec4_sg;
Herbert Xuf2147b82015-06-16 13:54:23 +0800903 u32 hw_desc[];
Kim Phillips8e8ec592011-03-13 16:54:26 +0800904};
905
Yuan Kangacdca312011-07-15 11:21:42 +0800906/*
907 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
908 * @src_nents: number of segments in input scatterlist
909 * @dst_nents: number of segments in output scatterlist
910 * @iv_dma: dma address of iv for checking continuity and link table
Yuan Kanga299c832012-06-22 19:48:46 -0500911 * @sec4_sg_bytes: length of dma mapped sec4_sg space
912 * @sec4_sg_dma: bus physical mapped address of h/w link table
Horia Geantă4ca7c7d2016-11-09 10:46:18 +0200913 * @sec4_sg: pointer to h/w link table
Yuan Kangacdca312011-07-15 11:21:42 +0800914 * @hw_desc: the h/w job descriptor followed by any referenced link tables
915 */
916struct ablkcipher_edesc {
917 int src_nents;
918 int dst_nents;
919 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -0500920 int sec4_sg_bytes;
921 dma_addr_t sec4_sg_dma;
922 struct sec4_sg_entry *sec4_sg;
Yuan Kangacdca312011-07-15 11:21:42 +0800923 u32 hw_desc[0];
924};
925
Yuan Kang1acebad2011-07-15 11:21:42 +0800926static void caam_unmap(struct device *dev, struct scatterlist *src,
Yuan Kang643b39b2012-06-22 19:48:49 -0500927 struct scatterlist *dst, int src_nents,
LABBE Corentin13fb8fd2015-09-23 13:55:27 +0200928 int dst_nents,
Yuan Kanga299c832012-06-22 19:48:46 -0500929 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
930 int sec4_sg_bytes)
Kim Phillips8e8ec592011-03-13 16:54:26 +0800931{
Yuan Kang643b39b2012-06-22 19:48:49 -0500932 if (dst != src) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +0200933 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
934 dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800935 } else {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +0200936 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800937 }
938
Yuan Kang1acebad2011-07-15 11:21:42 +0800939 if (iv_dma)
940 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Yuan Kanga299c832012-06-22 19:48:46 -0500941 if (sec4_sg_bytes)
942 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800943 DMA_TO_DEVICE);
944}
945
Yuan Kang1acebad2011-07-15 11:21:42 +0800946static void aead_unmap(struct device *dev,
947 struct aead_edesc *edesc,
948 struct aead_request *req)
949{
Herbert Xuf2147b82015-06-16 13:54:23 +0800950 caam_unmap(dev, req->src, req->dst,
LABBE Corentin13fb8fd2015-09-23 13:55:27 +0200951 edesc->src_nents, edesc->dst_nents, 0, 0,
Herbert Xuf2147b82015-06-16 13:54:23 +0800952 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
953}
954
Yuan Kangacdca312011-07-15 11:21:42 +0800955static void ablkcipher_unmap(struct device *dev,
956 struct ablkcipher_edesc *edesc,
957 struct ablkcipher_request *req)
958{
959 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
960 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
961
962 caam_unmap(dev, req->src, req->dst,
LABBE Corentin13fb8fd2015-09-23 13:55:27 +0200963 edesc->src_nents, edesc->dst_nents,
964 edesc->iv_dma, ivsize,
Yuan Kang643b39b2012-06-22 19:48:49 -0500965 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kangacdca312011-07-15 11:21:42 +0800966}
967
Yuan Kang0e479302011-07-15 11:21:41 +0800968static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800969 void *context)
970{
Yuan Kang0e479302011-07-15 11:21:41 +0800971 struct aead_request *req = context;
972 struct aead_edesc *edesc;
Herbert Xuf2147b82015-06-16 13:54:23 +0800973
974#ifdef DEBUG
975 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
976#endif
977
978 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
979
980 if (err)
981 caam_jr_strstatus(jrdev, err);
982
983 aead_unmap(jrdev, edesc, req);
984
985 kfree(edesc);
986
987 aead_request_complete(req, err);
988}
989
Yuan Kang0e479302011-07-15 11:21:41 +0800990static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800991 void *context)
992{
Yuan Kang0e479302011-07-15 11:21:41 +0800993 struct aead_request *req = context;
994 struct aead_edesc *edesc;
Herbert Xuf2147b82015-06-16 13:54:23 +0800995
996#ifdef DEBUG
997 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
998#endif
999
1000 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1001
1002 if (err)
1003 caam_jr_strstatus(jrdev, err);
1004
1005 aead_unmap(jrdev, edesc, req);
1006
1007 /*
1008 * verify hw auth check passed else return -EBADMSG
1009 */
1010 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
1011 err = -EBADMSG;
1012
1013 kfree(edesc);
1014
1015 aead_request_complete(req, err);
1016}
1017
Yuan Kangacdca312011-07-15 11:21:42 +08001018static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1019 void *context)
1020{
1021 struct ablkcipher_request *req = context;
1022 struct ablkcipher_edesc *edesc;
1023#ifdef DEBUG
1024 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1025 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1026
1027 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1028#endif
1029
Horia Geantă4ca7c7d2016-11-09 10:46:18 +02001030 edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
Yuan Kangacdca312011-07-15 11:21:42 +08001031
Marek Vasutfa9659c2014-04-24 20:05:12 +02001032 if (err)
1033 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08001034
1035#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001036 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001037 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1038 edesc->src_nents > 1 ? 100 : ivsize, 1);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03001039 dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
1040 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
Horia Geantă00fef2b2016-11-09 10:46:16 +02001041 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08001042#endif
1043
1044 ablkcipher_unmap(jrdev, edesc, req);
1045 kfree(edesc);
1046
1047 ablkcipher_request_complete(req, err);
1048}
1049
1050static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1051 void *context)
1052{
1053 struct ablkcipher_request *req = context;
1054 struct ablkcipher_edesc *edesc;
1055#ifdef DEBUG
1056 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1057 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1058
1059 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1060#endif
1061
Horia Geantă4ca7c7d2016-11-09 10:46:18 +02001062 edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
Marek Vasutfa9659c2014-04-24 20:05:12 +02001063 if (err)
1064 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08001065
1066#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001067 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001068 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1069 ivsize, 1);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03001070 dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
1071 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
Horia Geantă00fef2b2016-11-09 10:46:16 +02001072 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08001073#endif
1074
1075 ablkcipher_unmap(jrdev, edesc, req);
1076 kfree(edesc);
1077
1078 ablkcipher_request_complete(req, err);
1079}
1080
Kim Phillips8e8ec592011-03-13 16:54:26 +08001081/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001082 * Fill in aead job descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08001083 */
Herbert Xuf2147b82015-06-16 13:54:23 +08001084static void init_aead_job(struct aead_request *req,
1085 struct aead_edesc *edesc,
1086 bool all_contig, bool encrypt)
1087{
1088 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1089 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1090 int authsize = ctx->authsize;
1091 u32 *desc = edesc->hw_desc;
1092 u32 out_options, in_options;
1093 dma_addr_t dst_dma, src_dma;
1094 int len, sec4_sg_index = 0;
1095 dma_addr_t ptr;
1096 u32 *sh_desc;
1097
1098 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
1099 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
1100
1101 len = desc_len(sh_desc);
1102 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1103
1104 if (all_contig) {
1105 src_dma = sg_dma_address(req->src);
1106 in_options = 0;
1107 } else {
1108 src_dma = edesc->sec4_sg_dma;
1109 sec4_sg_index += edesc->src_nents;
1110 in_options = LDST_SGF;
1111 }
1112
1113 append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
1114 in_options);
1115
1116 dst_dma = src_dma;
1117 out_options = in_options;
1118
1119 if (unlikely(req->src != req->dst)) {
1120 if (!edesc->dst_nents) {
1121 dst_dma = sg_dma_address(req->dst);
1122 } else {
1123 dst_dma = edesc->sec4_sg_dma +
1124 sec4_sg_index *
1125 sizeof(struct sec4_sg_entry);
1126 out_options = LDST_SGF;
1127 }
1128 }
1129
1130 if (encrypt)
1131 append_seq_out_ptr(desc, dst_dma,
1132 req->assoclen + req->cryptlen + authsize,
1133 out_options);
1134 else
1135 append_seq_out_ptr(desc, dst_dma,
1136 req->assoclen + req->cryptlen - authsize,
1137 out_options);
1138
1139 /* REG3 = assoclen */
1140 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1141}
1142
1143static void init_gcm_job(struct aead_request *req,
1144 struct aead_edesc *edesc,
1145 bool all_contig, bool encrypt)
1146{
1147 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1148 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1149 unsigned int ivsize = crypto_aead_ivsize(aead);
1150 u32 *desc = edesc->hw_desc;
1151 bool generic_gcm = (ivsize == 12);
1152 unsigned int last;
1153
1154 init_aead_job(req, edesc, all_contig, encrypt);
1155
1156 /* BUG This should not be specific to generic GCM. */
1157 last = 0;
1158 if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
1159 last = FIFOLD_TYPE_LAST1;
1160
1161 /* Read GCM IV */
1162 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1163 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
1164 /* Append Salt */
1165 if (!generic_gcm)
Horia Geantădb576562016-11-22 15:44:04 +02001166 append_data(desc, ctx->key + ctx->cdata.keylen, 4);
Herbert Xuf2147b82015-06-16 13:54:23 +08001167 /* Append IV */
1168 append_data(desc, req->iv, ivsize);
1169 /* End of blank commands */
1170}
1171
Herbert Xu479bcc72015-07-30 17:53:17 +08001172static void init_authenc_job(struct aead_request *req,
1173 struct aead_edesc *edesc,
1174 bool all_contig, bool encrypt)
Yuan Kang1acebad2011-07-15 11:21:42 +08001175{
1176 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Herbert Xu479bcc72015-07-30 17:53:17 +08001177 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
1178 struct caam_aead_alg, aead);
1179 unsigned int ivsize = crypto_aead_ivsize(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +08001180 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Horia Geantădb576562016-11-22 15:44:04 +02001181 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
Herbert Xu479bcc72015-07-30 17:53:17 +08001182 OP_ALG_AAI_CTR_MOD128);
1183 const bool is_rfc3686 = alg->caam.rfc3686;
Yuan Kang1acebad2011-07-15 11:21:42 +08001184 u32 *desc = edesc->hw_desc;
Herbert Xu479bcc72015-07-30 17:53:17 +08001185 u32 ivoffset = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001186
Herbert Xu479bcc72015-07-30 17:53:17 +08001187 /*
1188 * AES-CTR needs to load IV in CONTEXT1 reg
1189 * at an offset of 128bits (16bytes)
1190 * CONTEXT1[255:128] = IV
1191 */
1192 if (ctr_mode)
1193 ivoffset = 16;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001194
Herbert Xu479bcc72015-07-30 17:53:17 +08001195 /*
1196 * RFC3686 specific:
1197 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1198 */
1199 if (is_rfc3686)
1200 ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001201
Herbert Xu479bcc72015-07-30 17:53:17 +08001202 init_aead_job(req, edesc, all_contig, encrypt);
Yuan Kang1acebad2011-07-15 11:21:42 +08001203
Horia Geantă8b18e232016-08-29 14:52:14 +03001204 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
Herbert Xu479bcc72015-07-30 17:53:17 +08001205 append_load_as_imm(desc, req->iv, ivsize,
1206 LDST_CLASS_1_CCB |
1207 LDST_SRCDST_BYTE_CONTEXT |
1208 (ivoffset << LDST_OFFSET_SHIFT));
Kim Phillips8e8ec592011-03-13 16:54:26 +08001209}
1210
1211/*
Yuan Kangacdca312011-07-15 11:21:42 +08001212 * Fill in ablkcipher job descriptor
1213 */
1214static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1215 struct ablkcipher_edesc *edesc,
1216 struct ablkcipher_request *req,
1217 bool iv_contig)
1218{
1219 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1220 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1221 u32 *desc = edesc->hw_desc;
1222 u32 out_options = 0, in_options;
1223 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001224 int len, sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08001225
1226#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001227 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001228 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1229 ivsize, 1);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03001230 printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
1231 dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
1232 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
Horia Geantă00fef2b2016-11-09 10:46:16 +02001233 edesc->src_nents ? 100 : req->nbytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08001234#endif
1235
1236 len = desc_len(sh_desc);
1237 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1238
1239 if (iv_contig) {
1240 src_dma = edesc->iv_dma;
1241 in_options = 0;
1242 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001243 src_dma = edesc->sec4_sg_dma;
Cristian Stoica35b82e52015-01-21 11:53:30 +02001244 sec4_sg_index += edesc->src_nents + 1;
Yuan Kangacdca312011-07-15 11:21:42 +08001245 in_options = LDST_SGF;
1246 }
1247 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1248
1249 if (likely(req->src == req->dst)) {
1250 if (!edesc->src_nents && iv_contig) {
1251 dst_dma = sg_dma_address(req->src);
1252 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001253 dst_dma = edesc->sec4_sg_dma +
1254 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001255 out_options = LDST_SGF;
1256 }
1257 } else {
1258 if (!edesc->dst_nents) {
1259 dst_dma = sg_dma_address(req->dst);
1260 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001261 dst_dma = edesc->sec4_sg_dma +
1262 sec4_sg_index * sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001263 out_options = LDST_SGF;
1264 }
1265 }
1266 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1267}
1268
1269/*
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001270 * Fill in ablkcipher givencrypt job descriptor
1271 */
1272static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
1273 struct ablkcipher_edesc *edesc,
1274 struct ablkcipher_request *req,
1275 bool iv_contig)
1276{
1277 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1278 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1279 u32 *desc = edesc->hw_desc;
1280 u32 out_options, in_options;
1281 dma_addr_t dst_dma, src_dma;
1282 int len, sec4_sg_index = 0;
1283
1284#ifdef DEBUG
1285 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
1286 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1287 ivsize, 1);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03001288 dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
1289 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
Horia Geantă00fef2b2016-11-09 10:46:16 +02001290 edesc->src_nents ? 100 : req->nbytes, 1);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001291#endif
1292
1293 len = desc_len(sh_desc);
1294 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1295
1296 if (!edesc->src_nents) {
1297 src_dma = sg_dma_address(req->src);
1298 in_options = 0;
1299 } else {
1300 src_dma = edesc->sec4_sg_dma;
1301 sec4_sg_index += edesc->src_nents;
1302 in_options = LDST_SGF;
1303 }
1304 append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
1305
1306 if (iv_contig) {
1307 dst_dma = edesc->iv_dma;
1308 out_options = 0;
1309 } else {
1310 dst_dma = edesc->sec4_sg_dma +
1311 sec4_sg_index * sizeof(struct sec4_sg_entry);
1312 out_options = LDST_SGF;
1313 }
1314 append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
1315}
1316
1317/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001318 * allocate and map the aead extended descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08001319 */
Herbert Xuf2147b82015-06-16 13:54:23 +08001320static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1321 int desc_bytes, bool *all_contig_ptr,
1322 bool encrypt)
1323{
1324 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1325 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1326 struct device *jrdev = ctx->jrdev;
1327 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1328 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1329 int src_nents, dst_nents = 0;
1330 struct aead_edesc *edesc;
1331 int sgc;
1332 bool all_contig = true;
Herbert Xuf2147b82015-06-16 13:54:23 +08001333 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1334 unsigned int authsize = ctx->authsize;
1335
1336 if (unlikely(req->dst != req->src)) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001337 src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
Herbert Xuf2147b82015-06-16 13:54:23 +08001338 dst_nents = sg_count(req->dst,
1339 req->assoclen + req->cryptlen +
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001340 (encrypt ? authsize : (-authsize)));
Herbert Xuf2147b82015-06-16 13:54:23 +08001341 } else {
1342 src_nents = sg_count(req->src,
1343 req->assoclen + req->cryptlen +
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001344 (encrypt ? authsize : 0));
Herbert Xuf2147b82015-06-16 13:54:23 +08001345 }
1346
1347 /* Check if data are contiguous. */
1348 all_contig = !src_nents;
Horia Geantăc530e342016-11-09 10:46:15 +02001349 if (!all_contig)
Herbert Xuf2147b82015-06-16 13:54:23 +08001350 sec4_sg_len = src_nents;
Herbert Xuf2147b82015-06-16 13:54:23 +08001351
1352 sec4_sg_len += dst_nents;
1353
1354 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1355
1356 /* allocate space for base edesc and hw desc commands, link tables */
Victoria Milhoandde20ae2015-08-05 11:28:39 -07001357 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
1358 GFP_DMA | flags);
Herbert Xuf2147b82015-06-16 13:54:23 +08001359 if (!edesc) {
1360 dev_err(jrdev, "could not allocate extended descriptor\n");
1361 return ERR_PTR(-ENOMEM);
1362 }
1363
1364 if (likely(req->src == req->dst)) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001365 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1366 DMA_BIDIRECTIONAL);
Herbert Xuf2147b82015-06-16 13:54:23 +08001367 if (unlikely(!sgc)) {
1368 dev_err(jrdev, "unable to map source\n");
1369 kfree(edesc);
1370 return ERR_PTR(-ENOMEM);
1371 }
1372 } else {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001373 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1374 DMA_TO_DEVICE);
Herbert Xuf2147b82015-06-16 13:54:23 +08001375 if (unlikely(!sgc)) {
1376 dev_err(jrdev, "unable to map source\n");
1377 kfree(edesc);
1378 return ERR_PTR(-ENOMEM);
1379 }
1380
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001381 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1382 DMA_FROM_DEVICE);
Herbert Xuf2147b82015-06-16 13:54:23 +08001383 if (unlikely(!sgc)) {
1384 dev_err(jrdev, "unable to map destination\n");
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001385 dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
1386 DMA_TO_DEVICE);
Herbert Xuf2147b82015-06-16 13:54:23 +08001387 kfree(edesc);
1388 return ERR_PTR(-ENOMEM);
1389 }
1390 }
1391
1392 edesc->src_nents = src_nents;
Herbert Xuf2147b82015-06-16 13:54:23 +08001393 edesc->dst_nents = dst_nents;
Herbert Xuf2147b82015-06-16 13:54:23 +08001394 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1395 desc_bytes;
1396 *all_contig_ptr = all_contig;
1397
1398 sec4_sg_index = 0;
1399 if (!all_contig) {
Herbert Xu7793bda2015-06-18 14:25:56 +08001400 sg_to_sec4_sg_last(req->src, src_nents,
Herbert Xuf2147b82015-06-16 13:54:23 +08001401 edesc->sec4_sg + sec4_sg_index, 0);
1402 sec4_sg_index += src_nents;
1403 }
1404 if (dst_nents) {
1405 sg_to_sec4_sg_last(req->dst, dst_nents,
1406 edesc->sec4_sg + sec4_sg_index, 0);
1407 }
1408
1409 if (!sec4_sg_bytes)
1410 return edesc;
1411
1412 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1413 sec4_sg_bytes, DMA_TO_DEVICE);
1414 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1415 dev_err(jrdev, "unable to map S/G table\n");
1416 aead_unmap(jrdev, edesc, req);
1417 kfree(edesc);
1418 return ERR_PTR(-ENOMEM);
1419 }
1420
1421 edesc->sec4_sg_bytes = sec4_sg_bytes;
1422
1423 return edesc;
1424}
1425
1426static int gcm_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001427{
Yuan Kang0e479302011-07-15 11:21:41 +08001428 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001429 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001430 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1431 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08001432 bool all_contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001433 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001434 int ret = 0;
1435
Kim Phillips8e8ec592011-03-13 16:54:26 +08001436 /* allocate extended descriptor */
Herbert Xuf2147b82015-06-16 13:54:23 +08001437 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001438 if (IS_ERR(edesc))
1439 return PTR_ERR(edesc);
1440
Yuan Kang1acebad2011-07-15 11:21:42 +08001441 /* Create and submit job descriptor */
Herbert Xuf2147b82015-06-16 13:54:23 +08001442 init_gcm_job(req, edesc, all_contig, true);
Yuan Kang1acebad2011-07-15 11:21:42 +08001443#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001444 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001445 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1446 desc_bytes(edesc->hw_desc), 1);
1447#endif
1448
Kim Phillips8e8ec592011-03-13 16:54:26 +08001449 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001450 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1451 if (!ret) {
1452 ret = -EINPROGRESS;
1453 } else {
1454 aead_unmap(jrdev, edesc, req);
1455 kfree(edesc);
1456 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001457
Yuan Kang1acebad2011-07-15 11:21:42 +08001458 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001459}
1460
Herbert Xu46218752015-07-09 07:17:33 +08001461static int ipsec_gcm_encrypt(struct aead_request *req)
1462{
1463 if (req->assoclen < 8)
1464 return -EINVAL;
1465
1466 return gcm_encrypt(req);
1467}
1468
Herbert Xu479bcc72015-07-30 17:53:17 +08001469static int aead_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001470{
Yuan Kang1acebad2011-07-15 11:21:42 +08001471 struct aead_edesc *edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08001472 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Yuan Kang0e479302011-07-15 11:21:41 +08001473 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1474 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08001475 bool all_contig;
Yuan Kang0e479302011-07-15 11:21:41 +08001476 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001477 int ret = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08001478
1479 /* allocate extended descriptor */
Herbert Xu479bcc72015-07-30 17:53:17 +08001480 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
1481 &all_contig, true);
Yuan Kang0e479302011-07-15 11:21:41 +08001482 if (IS_ERR(edesc))
1483 return PTR_ERR(edesc);
1484
Herbert Xuf2147b82015-06-16 13:54:23 +08001485 /* Create and submit job descriptor */
Herbert Xu479bcc72015-07-30 17:53:17 +08001486 init_authenc_job(req, edesc, all_contig, true);
Yuan Kang1acebad2011-07-15 11:21:42 +08001487#ifdef DEBUG
Herbert Xuf2147b82015-06-16 13:54:23 +08001488 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1489 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1490 desc_bytes(edesc->hw_desc), 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08001491#endif
1492
Herbert Xuf2147b82015-06-16 13:54:23 +08001493 desc = edesc->hw_desc;
Herbert Xu479bcc72015-07-30 17:53:17 +08001494 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
Herbert Xuf2147b82015-06-16 13:54:23 +08001495 if (!ret) {
1496 ret = -EINPROGRESS;
1497 } else {
Herbert Xu479bcc72015-07-30 17:53:17 +08001498 aead_unmap(jrdev, edesc, req);
Herbert Xuf2147b82015-06-16 13:54:23 +08001499 kfree(edesc);
1500 }
1501
1502 return ret;
1503}
1504
1505static int gcm_decrypt(struct aead_request *req)
1506{
1507 struct aead_edesc *edesc;
1508 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1509 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1510 struct device *jrdev = ctx->jrdev;
1511 bool all_contig;
1512 u32 *desc;
1513 int ret = 0;
1514
1515 /* allocate extended descriptor */
1516 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
1517 if (IS_ERR(edesc))
1518 return PTR_ERR(edesc);
1519
Yuan Kang1acebad2011-07-15 11:21:42 +08001520 /* Create and submit job descriptor*/
Herbert Xuf2147b82015-06-16 13:54:23 +08001521 init_gcm_job(req, edesc, all_contig, false);
Yuan Kang1acebad2011-07-15 11:21:42 +08001522#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001523 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001524 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1525 desc_bytes(edesc->hw_desc), 1);
1526#endif
1527
Yuan Kang0e479302011-07-15 11:21:41 +08001528 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001529 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1530 if (!ret) {
1531 ret = -EINPROGRESS;
1532 } else {
1533 aead_unmap(jrdev, edesc, req);
1534 kfree(edesc);
1535 }
Yuan Kang0e479302011-07-15 11:21:41 +08001536
Yuan Kang1acebad2011-07-15 11:21:42 +08001537 return ret;
1538}
Yuan Kang0e479302011-07-15 11:21:41 +08001539
Herbert Xu46218752015-07-09 07:17:33 +08001540static int ipsec_gcm_decrypt(struct aead_request *req)
1541{
1542 if (req->assoclen < 8)
1543 return -EINVAL;
1544
1545 return gcm_decrypt(req);
1546}
1547
Herbert Xu479bcc72015-07-30 17:53:17 +08001548static int aead_decrypt(struct aead_request *req)
Herbert Xuf2147b82015-06-16 13:54:23 +08001549{
1550 struct aead_edesc *edesc;
1551 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1552 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1553 struct device *jrdev = ctx->jrdev;
1554 bool all_contig;
1555 u32 *desc;
1556 int ret = 0;
1557
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03001558#ifdef DEBUG
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03001559 dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
1560 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
Horia Geantă00fef2b2016-11-09 10:46:16 +02001561 req->assoclen + req->cryptlen, 1);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03001562#endif
1563
Herbert Xuf2147b82015-06-16 13:54:23 +08001564 /* allocate extended descriptor */
Herbert Xu479bcc72015-07-30 17:53:17 +08001565 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
1566 &all_contig, false);
Herbert Xuf2147b82015-06-16 13:54:23 +08001567 if (IS_ERR(edesc))
1568 return PTR_ERR(edesc);
1569
Herbert Xuf2147b82015-06-16 13:54:23 +08001570 /* Create and submit job descriptor*/
Herbert Xu479bcc72015-07-30 17:53:17 +08001571 init_authenc_job(req, edesc, all_contig, false);
Herbert Xuf2147b82015-06-16 13:54:23 +08001572#ifdef DEBUG
1573 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1574 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1575 desc_bytes(edesc->hw_desc), 1);
1576#endif
1577
1578 desc = edesc->hw_desc;
Herbert Xu479bcc72015-07-30 17:53:17 +08001579 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
Herbert Xuf2147b82015-06-16 13:54:23 +08001580 if (!ret) {
1581 ret = -EINPROGRESS;
1582 } else {
Herbert Xu479bcc72015-07-30 17:53:17 +08001583 aead_unmap(jrdev, edesc, req);
Herbert Xuf2147b82015-06-16 13:54:23 +08001584 kfree(edesc);
1585 }
1586
1587 return ret;
1588}
1589
Yuan Kangacdca312011-07-15 11:21:42 +08001590/*
1591 * allocate and map the ablkcipher extended descriptor for ablkcipher
1592 */
1593static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1594 *req, int desc_bytes,
1595 bool *iv_contig_out)
1596{
1597 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1598 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1599 struct device *jrdev = ctx->jrdev;
1600 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1601 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1602 GFP_KERNEL : GFP_ATOMIC;
Yuan Kanga299c832012-06-22 19:48:46 -05001603 int src_nents, dst_nents = 0, sec4_sg_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08001604 struct ablkcipher_edesc *edesc;
1605 dma_addr_t iv_dma = 0;
1606 bool iv_contig = false;
1607 int sgc;
1608 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Yuan Kanga299c832012-06-22 19:48:46 -05001609 int sec4_sg_index;
Yuan Kangacdca312011-07-15 11:21:42 +08001610
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001611 src_nents = sg_count(req->src, req->nbytes);
Yuan Kangacdca312011-07-15 11:21:42 +08001612
Yuan Kang643b39b2012-06-22 19:48:49 -05001613 if (req->dst != req->src)
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001614 dst_nents = sg_count(req->dst, req->nbytes);
Yuan Kangacdca312011-07-15 11:21:42 +08001615
1616 if (likely(req->src == req->dst)) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001617 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1618 DMA_BIDIRECTIONAL);
Horia Geantăc73e36e2016-11-09 10:46:20 +02001619 if (unlikely(!sgc)) {
1620 dev_err(jrdev, "unable to map source\n");
1621 return ERR_PTR(-ENOMEM);
1622 }
Yuan Kangacdca312011-07-15 11:21:42 +08001623 } else {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001624 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1625 DMA_TO_DEVICE);
Horia Geantăc73e36e2016-11-09 10:46:20 +02001626 if (unlikely(!sgc)) {
1627 dev_err(jrdev, "unable to map source\n");
1628 return ERR_PTR(-ENOMEM);
1629 }
1630
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001631 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1632 DMA_FROM_DEVICE);
Horia Geantăc73e36e2016-11-09 10:46:20 +02001633 if (unlikely(!sgc)) {
1634 dev_err(jrdev, "unable to map destination\n");
1635 dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
1636 DMA_TO_DEVICE);
1637 return ERR_PTR(-ENOMEM);
1638 }
Yuan Kangacdca312011-07-15 11:21:42 +08001639 }
1640
Horia Geantace572082014-07-11 15:34:49 +03001641 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1642 if (dma_mapping_error(jrdev, iv_dma)) {
1643 dev_err(jrdev, "unable to map IV\n");
Horia Geantăc73e36e2016-11-09 10:46:20 +02001644 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1645 0, 0, 0);
Horia Geantace572082014-07-11 15:34:49 +03001646 return ERR_PTR(-ENOMEM);
1647 }
1648
Yuan Kangacdca312011-07-15 11:21:42 +08001649 /*
1650 * Check if iv can be contiguous with source and destination.
1651 * If so, include it. If not, create scatterlist.
1652 */
Yuan Kangacdca312011-07-15 11:21:42 +08001653 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1654 iv_contig = true;
1655 else
1656 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05001657 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1658 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001659
1660 /* allocate space for base edesc and hw desc commands, link tables */
Victoria Milhoandde20ae2015-08-05 11:28:39 -07001661 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
1662 GFP_DMA | flags);
Yuan Kangacdca312011-07-15 11:21:42 +08001663 if (!edesc) {
1664 dev_err(jrdev, "could not allocate extended descriptor\n");
Horia Geantăc73e36e2016-11-09 10:46:20 +02001665 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
1666 iv_dma, ivsize, 0, 0);
Yuan Kangacdca312011-07-15 11:21:42 +08001667 return ERR_PTR(-ENOMEM);
1668 }
1669
1670 edesc->src_nents = src_nents;
1671 edesc->dst_nents = dst_nents;
Yuan Kanga299c832012-06-22 19:48:46 -05001672 edesc->sec4_sg_bytes = sec4_sg_bytes;
1673 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1674 desc_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08001675
Yuan Kanga299c832012-06-22 19:48:46 -05001676 sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08001677 if (!iv_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05001678 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1679 sg_to_sec4_sg_last(req->src, src_nents,
1680 edesc->sec4_sg + 1, 0);
1681 sec4_sg_index += 1 + src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08001682 }
1683
Yuan Kang643b39b2012-06-22 19:48:49 -05001684 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05001685 sg_to_sec4_sg_last(req->dst, dst_nents,
1686 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kangacdca312011-07-15 11:21:42 +08001687 }
1688
Yuan Kanga299c832012-06-22 19:48:46 -05001689 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1690 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001691 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1692 dev_err(jrdev, "unable to map S/G table\n");
Horia Geantăc73e36e2016-11-09 10:46:20 +02001693 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
1694 iv_dma, ivsize, 0, 0);
1695 kfree(edesc);
Horia Geantace572082014-07-11 15:34:49 +03001696 return ERR_PTR(-ENOMEM);
1697 }
1698
Yuan Kangacdca312011-07-15 11:21:42 +08001699 edesc->iv_dma = iv_dma;
1700
1701#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001702 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
Yuan Kanga299c832012-06-22 19:48:46 -05001703 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1704 sec4_sg_bytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08001705#endif
1706
1707 *iv_contig_out = iv_contig;
1708 return edesc;
1709}
1710
1711static int ablkcipher_encrypt(struct ablkcipher_request *req)
1712{
1713 struct ablkcipher_edesc *edesc;
1714 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1715 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1716 struct device *jrdev = ctx->jrdev;
1717 bool iv_contig;
1718 u32 *desc;
1719 int ret = 0;
1720
1721 /* allocate extended descriptor */
1722 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1723 CAAM_CMD_SZ, &iv_contig);
1724 if (IS_ERR(edesc))
1725 return PTR_ERR(edesc);
1726
1727 /* Create and submit job descriptor*/
1728 init_ablkcipher_job(ctx->sh_desc_enc,
1729 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1730#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001731 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001732 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1733 desc_bytes(edesc->hw_desc), 1);
1734#endif
1735 desc = edesc->hw_desc;
1736 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1737
1738 if (!ret) {
1739 ret = -EINPROGRESS;
1740 } else {
1741 ablkcipher_unmap(jrdev, edesc, req);
1742 kfree(edesc);
1743 }
1744
1745 return ret;
1746}
1747
1748static int ablkcipher_decrypt(struct ablkcipher_request *req)
1749{
1750 struct ablkcipher_edesc *edesc;
1751 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1752 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1753 struct device *jrdev = ctx->jrdev;
1754 bool iv_contig;
1755 u32 *desc;
1756 int ret = 0;
1757
1758 /* allocate extended descriptor */
1759 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1760 CAAM_CMD_SZ, &iv_contig);
1761 if (IS_ERR(edesc))
1762 return PTR_ERR(edesc);
1763
1764 /* Create and submit job descriptor*/
1765 init_ablkcipher_job(ctx->sh_desc_dec,
1766 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1767 desc = edesc->hw_desc;
1768#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001769 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001770 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1771 desc_bytes(edesc->hw_desc), 1);
1772#endif
1773
1774 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1775 if (!ret) {
1776 ret = -EINPROGRESS;
1777 } else {
1778 ablkcipher_unmap(jrdev, edesc, req);
1779 kfree(edesc);
1780 }
1781
1782 return ret;
1783}
1784
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001785/*
1786 * allocate and map the ablkcipher extended descriptor
1787 * for ablkcipher givencrypt
1788 */
1789static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1790 struct skcipher_givcrypt_request *greq,
1791 int desc_bytes,
1792 bool *iv_contig_out)
1793{
1794 struct ablkcipher_request *req = &greq->creq;
1795 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1796 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1797 struct device *jrdev = ctx->jrdev;
1798 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1799 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1800 GFP_KERNEL : GFP_ATOMIC;
1801 int src_nents, dst_nents = 0, sec4_sg_bytes;
1802 struct ablkcipher_edesc *edesc;
1803 dma_addr_t iv_dma = 0;
1804 bool iv_contig = false;
1805 int sgc;
1806 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001807 int sec4_sg_index;
1808
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001809 src_nents = sg_count(req->src, req->nbytes);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001810
1811 if (unlikely(req->dst != req->src))
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001812 dst_nents = sg_count(req->dst, req->nbytes);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001813
1814 if (likely(req->src == req->dst)) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001815 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1816 DMA_BIDIRECTIONAL);
Horia Geantăc73e36e2016-11-09 10:46:20 +02001817 if (unlikely(!sgc)) {
1818 dev_err(jrdev, "unable to map source\n");
1819 return ERR_PTR(-ENOMEM);
1820 }
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001821 } else {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001822 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1823 DMA_TO_DEVICE);
Horia Geantăc73e36e2016-11-09 10:46:20 +02001824 if (unlikely(!sgc)) {
1825 dev_err(jrdev, "unable to map source\n");
1826 return ERR_PTR(-ENOMEM);
1827 }
1828
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001829 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1830 DMA_FROM_DEVICE);
Horia Geantăc73e36e2016-11-09 10:46:20 +02001831 if (unlikely(!sgc)) {
1832 dev_err(jrdev, "unable to map destination\n");
1833 dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
1834 DMA_TO_DEVICE);
1835 return ERR_PTR(-ENOMEM);
1836 }
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001837 }
1838
1839 /*
1840 * Check if iv can be contiguous with source and destination.
1841 * If so, include it. If not, create scatterlist.
1842 */
1843 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1844 if (dma_mapping_error(jrdev, iv_dma)) {
1845 dev_err(jrdev, "unable to map IV\n");
Horia Geantăc73e36e2016-11-09 10:46:20 +02001846 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1847 0, 0, 0);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001848 return ERR_PTR(-ENOMEM);
1849 }
1850
1851 if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
1852 iv_contig = true;
1853 else
1854 dst_nents = dst_nents ? : 1;
1855 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1856 sizeof(struct sec4_sg_entry);
1857
1858 /* allocate space for base edesc and hw desc commands, link tables */
Victoria Milhoandde20ae2015-08-05 11:28:39 -07001859 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
1860 GFP_DMA | flags);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001861 if (!edesc) {
1862 dev_err(jrdev, "could not allocate extended descriptor\n");
Horia Geantăc73e36e2016-11-09 10:46:20 +02001863 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
1864 iv_dma, ivsize, 0, 0);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001865 return ERR_PTR(-ENOMEM);
1866 }
1867
1868 edesc->src_nents = src_nents;
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001869 edesc->dst_nents = dst_nents;
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001870 edesc->sec4_sg_bytes = sec4_sg_bytes;
1871 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1872 desc_bytes;
1873
1874 sec4_sg_index = 0;
1875 if (src_nents) {
1876 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1877 sec4_sg_index += src_nents;
1878 }
1879
1880 if (!iv_contig) {
1881 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1882 iv_dma, ivsize, 0);
1883 sec4_sg_index += 1;
1884 sg_to_sec4_sg_last(req->dst, dst_nents,
1885 edesc->sec4_sg + sec4_sg_index, 0);
1886 }
1887
1888 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1889 sec4_sg_bytes, DMA_TO_DEVICE);
1890 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1891 dev_err(jrdev, "unable to map S/G table\n");
Horia Geantăc73e36e2016-11-09 10:46:20 +02001892 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
1893 iv_dma, ivsize, 0, 0);
1894 kfree(edesc);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001895 return ERR_PTR(-ENOMEM);
1896 }
1897 edesc->iv_dma = iv_dma;
1898
1899#ifdef DEBUG
1900 print_hex_dump(KERN_ERR,
1901 "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
1902 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1903 sec4_sg_bytes, 1);
1904#endif
1905
1906 *iv_contig_out = iv_contig;
1907 return edesc;
1908}
1909
1910static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
1911{
1912 struct ablkcipher_request *req = &creq->creq;
1913 struct ablkcipher_edesc *edesc;
1914 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1915 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1916 struct device *jrdev = ctx->jrdev;
1917 bool iv_contig;
1918 u32 *desc;
1919 int ret = 0;
1920
1921 /* allocate extended descriptor */
1922 edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
1923 CAAM_CMD_SZ, &iv_contig);
1924 if (IS_ERR(edesc))
1925 return PTR_ERR(edesc);
1926
1927 /* Create and submit job descriptor*/
1928 init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
1929 edesc, req, iv_contig);
1930#ifdef DEBUG
1931 print_hex_dump(KERN_ERR,
1932 "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
1933 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1934 desc_bytes(edesc->hw_desc), 1);
1935#endif
1936 desc = edesc->hw_desc;
1937 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1938
1939 if (!ret) {
1940 ret = -EINPROGRESS;
1941 } else {
1942 ablkcipher_unmap(jrdev, edesc, req);
1943 kfree(edesc);
1944 }
1945
1946 return ret;
1947}
1948
Yuan Kang885e9e22011-07-15 11:21:41 +08001949#define template_aead template_u.aead
Yuan Kangacdca312011-07-15 11:21:42 +08001950#define template_ablkcipher template_u.ablkcipher
Kim Phillips8e8ec592011-03-13 16:54:26 +08001951struct caam_alg_template {
1952 char name[CRYPTO_MAX_ALG_NAME];
1953 char driver_name[CRYPTO_MAX_ALG_NAME];
1954 unsigned int blocksize;
Yuan Kang885e9e22011-07-15 11:21:41 +08001955 u32 type;
1956 union {
1957 struct ablkcipher_alg ablkcipher;
Yuan Kang885e9e22011-07-15 11:21:41 +08001958 } template_u;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001959 u32 class1_alg_type;
1960 u32 class2_alg_type;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001961};
1962
1963static struct caam_alg_template driver_algs[] = {
Yuan Kangacdca312011-07-15 11:21:42 +08001964 /* ablkcipher descriptor */
1965 {
1966 .name = "cbc(aes)",
1967 .driver_name = "cbc-aes-caam",
1968 .blocksize = AES_BLOCK_SIZE,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001969 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Yuan Kangacdca312011-07-15 11:21:42 +08001970 .template_ablkcipher = {
1971 .setkey = ablkcipher_setkey,
1972 .encrypt = ablkcipher_encrypt,
1973 .decrypt = ablkcipher_decrypt,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001974 .givencrypt = ablkcipher_givencrypt,
1975 .geniv = "<built-in>",
Yuan Kangacdca312011-07-15 11:21:42 +08001976 .min_keysize = AES_MIN_KEY_SIZE,
1977 .max_keysize = AES_MAX_KEY_SIZE,
1978 .ivsize = AES_BLOCK_SIZE,
1979 },
1980 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1981 },
1982 {
1983 .name = "cbc(des3_ede)",
1984 .driver_name = "cbc-3des-caam",
1985 .blocksize = DES3_EDE_BLOCK_SIZE,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02001986 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Yuan Kangacdca312011-07-15 11:21:42 +08001987 .template_ablkcipher = {
1988 .setkey = ablkcipher_setkey,
1989 .encrypt = ablkcipher_encrypt,
1990 .decrypt = ablkcipher_decrypt,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02001991 .givencrypt = ablkcipher_givencrypt,
1992 .geniv = "<built-in>",
Yuan Kangacdca312011-07-15 11:21:42 +08001993 .min_keysize = DES3_EDE_KEY_SIZE,
1994 .max_keysize = DES3_EDE_KEY_SIZE,
1995 .ivsize = DES3_EDE_BLOCK_SIZE,
1996 },
1997 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1998 },
1999 {
2000 .name = "cbc(des)",
2001 .driver_name = "cbc-des-caam",
2002 .blocksize = DES_BLOCK_SIZE,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02002003 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Yuan Kangacdca312011-07-15 11:21:42 +08002004 .template_ablkcipher = {
2005 .setkey = ablkcipher_setkey,
2006 .encrypt = ablkcipher_encrypt,
2007 .decrypt = ablkcipher_decrypt,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02002008 .givencrypt = ablkcipher_givencrypt,
2009 .geniv = "<built-in>",
Yuan Kangacdca312011-07-15 11:21:42 +08002010 .min_keysize = DES_KEY_SIZE,
2011 .max_keysize = DES_KEY_SIZE,
2012 .ivsize = DES_BLOCK_SIZE,
2013 },
2014 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02002015 },
2016 {
2017 .name = "ctr(aes)",
2018 .driver_name = "ctr-aes-caam",
2019 .blocksize = 1,
2020 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2021 .template_ablkcipher = {
2022 .setkey = ablkcipher_setkey,
2023 .encrypt = ablkcipher_encrypt,
2024 .decrypt = ablkcipher_decrypt,
2025 .geniv = "chainiv",
2026 .min_keysize = AES_MIN_KEY_SIZE,
2027 .max_keysize = AES_MAX_KEY_SIZE,
2028 .ivsize = AES_BLOCK_SIZE,
2029 },
2030 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02002031 },
2032 {
2033 .name = "rfc3686(ctr(aes))",
2034 .driver_name = "rfc3686-ctr-aes-caam",
2035 .blocksize = 1,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002036 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02002037 .template_ablkcipher = {
2038 .setkey = ablkcipher_setkey,
2039 .encrypt = ablkcipher_encrypt,
2040 .decrypt = ablkcipher_decrypt,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002041 .givencrypt = ablkcipher_givencrypt,
2042 .geniv = "<built-in>",
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02002043 .min_keysize = AES_MIN_KEY_SIZE +
2044 CTR_RFC3686_NONCE_SIZE,
2045 .max_keysize = AES_MAX_KEY_SIZE +
2046 CTR_RFC3686_NONCE_SIZE,
2047 .ivsize = CTR_RFC3686_IV_SIZE,
2048 },
2049 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
Catalin Vasilec6415a62015-10-02 13:13:18 +03002050 },
2051 {
2052 .name = "xts(aes)",
2053 .driver_name = "xts-aes-caam",
2054 .blocksize = AES_BLOCK_SIZE,
2055 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2056 .template_ablkcipher = {
2057 .setkey = xts_ablkcipher_setkey,
2058 .encrypt = ablkcipher_encrypt,
2059 .decrypt = ablkcipher_decrypt,
2060 .geniv = "eseqiv",
2061 .min_keysize = 2 * AES_MIN_KEY_SIZE,
2062 .max_keysize = 2 * AES_MAX_KEY_SIZE,
2063 .ivsize = AES_BLOCK_SIZE,
2064 },
2065 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
2066 },
Kim Phillips8e8ec592011-03-13 16:54:26 +08002067};
2068
Herbert Xuf2147b82015-06-16 13:54:23 +08002069static struct caam_aead_alg driver_aeads[] = {
2070 {
2071 .aead = {
2072 .base = {
2073 .cra_name = "rfc4106(gcm(aes))",
2074 .cra_driver_name = "rfc4106-gcm-aes-caam",
2075 .cra_blocksize = 1,
2076 },
2077 .setkey = rfc4106_setkey,
2078 .setauthsize = rfc4106_setauthsize,
Herbert Xu46218752015-07-09 07:17:33 +08002079 .encrypt = ipsec_gcm_encrypt,
2080 .decrypt = ipsec_gcm_decrypt,
Herbert Xuf2147b82015-06-16 13:54:23 +08002081 .ivsize = 8,
2082 .maxauthsize = AES_BLOCK_SIZE,
2083 },
2084 .caam = {
2085 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2086 },
2087 },
2088 {
2089 .aead = {
2090 .base = {
2091 .cra_name = "rfc4543(gcm(aes))",
2092 .cra_driver_name = "rfc4543-gcm-aes-caam",
2093 .cra_blocksize = 1,
2094 },
2095 .setkey = rfc4543_setkey,
2096 .setauthsize = rfc4543_setauthsize,
Herbert Xu46218752015-07-09 07:17:33 +08002097 .encrypt = ipsec_gcm_encrypt,
2098 .decrypt = ipsec_gcm_decrypt,
Herbert Xuf2147b82015-06-16 13:54:23 +08002099 .ivsize = 8,
2100 .maxauthsize = AES_BLOCK_SIZE,
2101 },
2102 .caam = {
2103 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2104 },
2105 },
2106 /* Galois Counter Mode */
2107 {
2108 .aead = {
2109 .base = {
2110 .cra_name = "gcm(aes)",
2111 .cra_driver_name = "gcm-aes-caam",
2112 .cra_blocksize = 1,
2113 },
2114 .setkey = gcm_setkey,
2115 .setauthsize = gcm_setauthsize,
2116 .encrypt = gcm_encrypt,
2117 .decrypt = gcm_decrypt,
2118 .ivsize = 12,
2119 .maxauthsize = AES_BLOCK_SIZE,
2120 },
2121 .caam = {
2122 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2123 },
2124 },
Herbert Xu479bcc72015-07-30 17:53:17 +08002125 /* single-pass ipsec_esp descriptor */
2126 {
2127 .aead = {
2128 .base = {
2129 .cra_name = "authenc(hmac(md5),"
2130 "ecb(cipher_null))",
2131 .cra_driver_name = "authenc-hmac-md5-"
2132 "ecb-cipher_null-caam",
2133 .cra_blocksize = NULL_BLOCK_SIZE,
2134 },
2135 .setkey = aead_setkey,
2136 .setauthsize = aead_setauthsize,
2137 .encrypt = aead_encrypt,
2138 .decrypt = aead_decrypt,
2139 .ivsize = NULL_IV_SIZE,
2140 .maxauthsize = MD5_DIGEST_SIZE,
2141 },
2142 .caam = {
2143 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2144 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002145 },
2146 },
2147 {
2148 .aead = {
2149 .base = {
2150 .cra_name = "authenc(hmac(sha1),"
2151 "ecb(cipher_null))",
2152 .cra_driver_name = "authenc-hmac-sha1-"
2153 "ecb-cipher_null-caam",
2154 .cra_blocksize = NULL_BLOCK_SIZE,
2155 },
2156 .setkey = aead_setkey,
2157 .setauthsize = aead_setauthsize,
2158 .encrypt = aead_encrypt,
2159 .decrypt = aead_decrypt,
2160 .ivsize = NULL_IV_SIZE,
2161 .maxauthsize = SHA1_DIGEST_SIZE,
2162 },
2163 .caam = {
2164 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2165 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002166 },
2167 },
2168 {
2169 .aead = {
2170 .base = {
2171 .cra_name = "authenc(hmac(sha224),"
2172 "ecb(cipher_null))",
2173 .cra_driver_name = "authenc-hmac-sha224-"
2174 "ecb-cipher_null-caam",
2175 .cra_blocksize = NULL_BLOCK_SIZE,
2176 },
2177 .setkey = aead_setkey,
2178 .setauthsize = aead_setauthsize,
2179 .encrypt = aead_encrypt,
2180 .decrypt = aead_decrypt,
2181 .ivsize = NULL_IV_SIZE,
2182 .maxauthsize = SHA224_DIGEST_SIZE,
2183 },
2184 .caam = {
2185 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2186 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002187 },
2188 },
2189 {
2190 .aead = {
2191 .base = {
2192 .cra_name = "authenc(hmac(sha256),"
2193 "ecb(cipher_null))",
2194 .cra_driver_name = "authenc-hmac-sha256-"
2195 "ecb-cipher_null-caam",
2196 .cra_blocksize = NULL_BLOCK_SIZE,
2197 },
2198 .setkey = aead_setkey,
2199 .setauthsize = aead_setauthsize,
2200 .encrypt = aead_encrypt,
2201 .decrypt = aead_decrypt,
2202 .ivsize = NULL_IV_SIZE,
2203 .maxauthsize = SHA256_DIGEST_SIZE,
2204 },
2205 .caam = {
2206 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2207 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002208 },
2209 },
2210 {
2211 .aead = {
2212 .base = {
2213 .cra_name = "authenc(hmac(sha384),"
2214 "ecb(cipher_null))",
2215 .cra_driver_name = "authenc-hmac-sha384-"
2216 "ecb-cipher_null-caam",
2217 .cra_blocksize = NULL_BLOCK_SIZE,
2218 },
2219 .setkey = aead_setkey,
2220 .setauthsize = aead_setauthsize,
2221 .encrypt = aead_encrypt,
2222 .decrypt = aead_decrypt,
2223 .ivsize = NULL_IV_SIZE,
2224 .maxauthsize = SHA384_DIGEST_SIZE,
2225 },
2226 .caam = {
2227 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2228 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002229 },
2230 },
2231 {
2232 .aead = {
2233 .base = {
2234 .cra_name = "authenc(hmac(sha512),"
2235 "ecb(cipher_null))",
2236 .cra_driver_name = "authenc-hmac-sha512-"
2237 "ecb-cipher_null-caam",
2238 .cra_blocksize = NULL_BLOCK_SIZE,
2239 },
2240 .setkey = aead_setkey,
2241 .setauthsize = aead_setauthsize,
2242 .encrypt = aead_encrypt,
2243 .decrypt = aead_decrypt,
2244 .ivsize = NULL_IV_SIZE,
2245 .maxauthsize = SHA512_DIGEST_SIZE,
2246 },
2247 .caam = {
2248 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2249 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002250 },
2251 },
2252 {
2253 .aead = {
2254 .base = {
2255 .cra_name = "authenc(hmac(md5),cbc(aes))",
2256 .cra_driver_name = "authenc-hmac-md5-"
2257 "cbc-aes-caam",
2258 .cra_blocksize = AES_BLOCK_SIZE,
2259 },
2260 .setkey = aead_setkey,
2261 .setauthsize = aead_setauthsize,
2262 .encrypt = aead_encrypt,
2263 .decrypt = aead_decrypt,
2264 .ivsize = AES_BLOCK_SIZE,
2265 .maxauthsize = MD5_DIGEST_SIZE,
2266 },
2267 .caam = {
2268 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2269 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2270 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002271 },
2272 },
2273 {
2274 .aead = {
2275 .base = {
2276 .cra_name = "echainiv(authenc(hmac(md5),"
2277 "cbc(aes)))",
2278 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2279 "cbc-aes-caam",
2280 .cra_blocksize = AES_BLOCK_SIZE,
2281 },
2282 .setkey = aead_setkey,
2283 .setauthsize = aead_setauthsize,
2284 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03002285 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08002286 .ivsize = AES_BLOCK_SIZE,
2287 .maxauthsize = MD5_DIGEST_SIZE,
2288 },
2289 .caam = {
2290 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2291 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2292 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002293 .geniv = true,
2294 },
2295 },
2296 {
2297 .aead = {
2298 .base = {
2299 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2300 .cra_driver_name = "authenc-hmac-sha1-"
2301 "cbc-aes-caam",
2302 .cra_blocksize = AES_BLOCK_SIZE,
2303 },
2304 .setkey = aead_setkey,
2305 .setauthsize = aead_setauthsize,
2306 .encrypt = aead_encrypt,
2307 .decrypt = aead_decrypt,
2308 .ivsize = AES_BLOCK_SIZE,
2309 .maxauthsize = SHA1_DIGEST_SIZE,
2310 },
2311 .caam = {
2312 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2313 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2314 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002315 },
2316 },
2317 {
2318 .aead = {
2319 .base = {
2320 .cra_name = "echainiv(authenc(hmac(sha1),"
2321 "cbc(aes)))",
2322 .cra_driver_name = "echainiv-authenc-"
2323 "hmac-sha1-cbc-aes-caam",
2324 .cra_blocksize = AES_BLOCK_SIZE,
2325 },
2326 .setkey = aead_setkey,
2327 .setauthsize = aead_setauthsize,
2328 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03002329 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08002330 .ivsize = AES_BLOCK_SIZE,
2331 .maxauthsize = SHA1_DIGEST_SIZE,
2332 },
2333 .caam = {
2334 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2335 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2336 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002337 .geniv = true,
2338 },
2339 },
2340 {
2341 .aead = {
2342 .base = {
2343 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2344 .cra_driver_name = "authenc-hmac-sha224-"
2345 "cbc-aes-caam",
2346 .cra_blocksize = AES_BLOCK_SIZE,
2347 },
2348 .setkey = aead_setkey,
2349 .setauthsize = aead_setauthsize,
2350 .encrypt = aead_encrypt,
2351 .decrypt = aead_decrypt,
2352 .ivsize = AES_BLOCK_SIZE,
2353 .maxauthsize = SHA224_DIGEST_SIZE,
2354 },
2355 .caam = {
2356 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2357 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2358 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002359 },
2360 },
2361 {
2362 .aead = {
2363 .base = {
2364 .cra_name = "echainiv(authenc(hmac(sha224),"
2365 "cbc(aes)))",
2366 .cra_driver_name = "echainiv-authenc-"
2367 "hmac-sha224-cbc-aes-caam",
2368 .cra_blocksize = AES_BLOCK_SIZE,
2369 },
2370 .setkey = aead_setkey,
2371 .setauthsize = aead_setauthsize,
2372 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03002373 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08002374 .ivsize = AES_BLOCK_SIZE,
2375 .maxauthsize = SHA224_DIGEST_SIZE,
2376 },
2377 .caam = {
2378 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2379 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2380 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002381 .geniv = true,
2382 },
2383 },
2384 {
2385 .aead = {
2386 .base = {
2387 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2388 .cra_driver_name = "authenc-hmac-sha256-"
2389 "cbc-aes-caam",
2390 .cra_blocksize = AES_BLOCK_SIZE,
2391 },
2392 .setkey = aead_setkey,
2393 .setauthsize = aead_setauthsize,
2394 .encrypt = aead_encrypt,
2395 .decrypt = aead_decrypt,
2396 .ivsize = AES_BLOCK_SIZE,
2397 .maxauthsize = SHA256_DIGEST_SIZE,
2398 },
2399 .caam = {
2400 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2401 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2402 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002403 },
2404 },
2405 {
2406 .aead = {
2407 .base = {
2408 .cra_name = "echainiv(authenc(hmac(sha256),"
2409 "cbc(aes)))",
2410 .cra_driver_name = "echainiv-authenc-"
2411 "hmac-sha256-cbc-aes-caam",
2412 .cra_blocksize = AES_BLOCK_SIZE,
2413 },
2414 .setkey = aead_setkey,
2415 .setauthsize = aead_setauthsize,
2416 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03002417 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08002418 .ivsize = AES_BLOCK_SIZE,
2419 .maxauthsize = SHA256_DIGEST_SIZE,
2420 },
2421 .caam = {
2422 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2423 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2424 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002425 .geniv = true,
2426 },
2427 },
2428 {
2429 .aead = {
2430 .base = {
2431 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2432 .cra_driver_name = "authenc-hmac-sha384-"
2433 "cbc-aes-caam",
2434 .cra_blocksize = AES_BLOCK_SIZE,
2435 },
2436 .setkey = aead_setkey,
2437 .setauthsize = aead_setauthsize,
2438 .encrypt = aead_encrypt,
2439 .decrypt = aead_decrypt,
2440 .ivsize = AES_BLOCK_SIZE,
2441 .maxauthsize = SHA384_DIGEST_SIZE,
2442 },
2443 .caam = {
2444 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2445 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2446 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002447 },
2448 },
2449 {
2450 .aead = {
2451 .base = {
2452 .cra_name = "echainiv(authenc(hmac(sha384),"
2453 "cbc(aes)))",
2454 .cra_driver_name = "echainiv-authenc-"
2455 "hmac-sha384-cbc-aes-caam",
2456 .cra_blocksize = AES_BLOCK_SIZE,
2457 },
2458 .setkey = aead_setkey,
2459 .setauthsize = aead_setauthsize,
2460 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03002461 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08002462 .ivsize = AES_BLOCK_SIZE,
2463 .maxauthsize = SHA384_DIGEST_SIZE,
2464 },
2465 .caam = {
2466 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2467 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2468 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002469 .geniv = true,
2470 },
2471 },
2472 {
2473 .aead = {
2474 .base = {
2475 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2476 .cra_driver_name = "authenc-hmac-sha512-"
2477 "cbc-aes-caam",
2478 .cra_blocksize = AES_BLOCK_SIZE,
2479 },
2480 .setkey = aead_setkey,
2481 .setauthsize = aead_setauthsize,
2482 .encrypt = aead_encrypt,
2483 .decrypt = aead_decrypt,
2484 .ivsize = AES_BLOCK_SIZE,
2485 .maxauthsize = SHA512_DIGEST_SIZE,
2486 },
2487 .caam = {
2488 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2489 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2490 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002491 },
2492 },
2493 {
2494 .aead = {
2495 .base = {
2496 .cra_name = "echainiv(authenc(hmac(sha512),"
2497 "cbc(aes)))",
2498 .cra_driver_name = "echainiv-authenc-"
2499 "hmac-sha512-cbc-aes-caam",
2500 .cra_blocksize = AES_BLOCK_SIZE,
2501 },
2502 .setkey = aead_setkey,
2503 .setauthsize = aead_setauthsize,
2504 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03002505 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08002506 .ivsize = AES_BLOCK_SIZE,
2507 .maxauthsize = SHA512_DIGEST_SIZE,
2508 },
2509 .caam = {
2510 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2511 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2512 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002513 .geniv = true,
2514 },
2515 },
2516 {
2517 .aead = {
2518 .base = {
2519 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2520 .cra_driver_name = "authenc-hmac-md5-"
2521 "cbc-des3_ede-caam",
2522 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2523 },
2524 .setkey = aead_setkey,
2525 .setauthsize = aead_setauthsize,
2526 .encrypt = aead_encrypt,
2527 .decrypt = aead_decrypt,
2528 .ivsize = DES3_EDE_BLOCK_SIZE,
2529 .maxauthsize = MD5_DIGEST_SIZE,
2530 },
2531 .caam = {
2532 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2533 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2534 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002535 }
2536 },
2537 {
2538 .aead = {
2539 .base = {
2540 .cra_name = "echainiv(authenc(hmac(md5),"
2541 "cbc(des3_ede)))",
2542 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2543 "cbc-des3_ede-caam",
2544 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2545 },
2546 .setkey = aead_setkey,
2547 .setauthsize = aead_setauthsize,
2548 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03002549 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08002550 .ivsize = DES3_EDE_BLOCK_SIZE,
2551 .maxauthsize = MD5_DIGEST_SIZE,
2552 },
2553 .caam = {
2554 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2555 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2556 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002557 .geniv = true,
2558 }
2559 },
2560 {
2561 .aead = {
2562 .base = {
2563 .cra_name = "authenc(hmac(sha1),"
2564 "cbc(des3_ede))",
2565 .cra_driver_name = "authenc-hmac-sha1-"
2566 "cbc-des3_ede-caam",
2567 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2568 },
2569 .setkey = aead_setkey,
2570 .setauthsize = aead_setauthsize,
2571 .encrypt = aead_encrypt,
2572 .decrypt = aead_decrypt,
2573 .ivsize = DES3_EDE_BLOCK_SIZE,
2574 .maxauthsize = SHA1_DIGEST_SIZE,
2575 },
2576 .caam = {
2577 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2578 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2579 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002580 },
2581 },
2582 {
2583 .aead = {
2584 .base = {
2585 .cra_name = "echainiv(authenc(hmac(sha1),"
2586 "cbc(des3_ede)))",
2587 .cra_driver_name = "echainiv-authenc-"
2588 "hmac-sha1-"
2589 "cbc-des3_ede-caam",
2590 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2591 },
2592 .setkey = aead_setkey,
2593 .setauthsize = aead_setauthsize,
2594 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03002595 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08002596 .ivsize = DES3_EDE_BLOCK_SIZE,
2597 .maxauthsize = SHA1_DIGEST_SIZE,
2598 },
2599 .caam = {
2600 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2601 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2602 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002603 .geniv = true,
2604 },
2605 },
2606 {
2607 .aead = {
2608 .base = {
2609 .cra_name = "authenc(hmac(sha224),"
2610 "cbc(des3_ede))",
2611 .cra_driver_name = "authenc-hmac-sha224-"
2612 "cbc-des3_ede-caam",
2613 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2614 },
2615 .setkey = aead_setkey,
2616 .setauthsize = aead_setauthsize,
2617 .encrypt = aead_encrypt,
2618 .decrypt = aead_decrypt,
2619 .ivsize = DES3_EDE_BLOCK_SIZE,
2620 .maxauthsize = SHA224_DIGEST_SIZE,
2621 },
2622 .caam = {
2623 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2624 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2625 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002626 },
2627 },
2628 {
2629 .aead = {
2630 .base = {
2631 .cra_name = "echainiv(authenc(hmac(sha224),"
2632 "cbc(des3_ede)))",
2633 .cra_driver_name = "echainiv-authenc-"
2634 "hmac-sha224-"
2635 "cbc-des3_ede-caam",
2636 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2637 },
2638 .setkey = aead_setkey,
2639 .setauthsize = aead_setauthsize,
2640 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03002641 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08002642 .ivsize = DES3_EDE_BLOCK_SIZE,
2643 .maxauthsize = SHA224_DIGEST_SIZE,
2644 },
2645 .caam = {
2646 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2647 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2648 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002649 .geniv = true,
2650 },
2651 },
2652 {
2653 .aead = {
2654 .base = {
2655 .cra_name = "authenc(hmac(sha256),"
2656 "cbc(des3_ede))",
2657 .cra_driver_name = "authenc-hmac-sha256-"
2658 "cbc-des3_ede-caam",
2659 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2660 },
2661 .setkey = aead_setkey,
2662 .setauthsize = aead_setauthsize,
2663 .encrypt = aead_encrypt,
2664 .decrypt = aead_decrypt,
2665 .ivsize = DES3_EDE_BLOCK_SIZE,
2666 .maxauthsize = SHA256_DIGEST_SIZE,
2667 },
2668 .caam = {
2669 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2670 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2671 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002672 },
2673 },
2674 {
2675 .aead = {
2676 .base = {
2677 .cra_name = "echainiv(authenc(hmac(sha256),"
2678 "cbc(des3_ede)))",
2679 .cra_driver_name = "echainiv-authenc-"
2680 "hmac-sha256-"
2681 "cbc-des3_ede-caam",
2682 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2683 },
2684 .setkey = aead_setkey,
2685 .setauthsize = aead_setauthsize,
2686 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03002687 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08002688 .ivsize = DES3_EDE_BLOCK_SIZE,
2689 .maxauthsize = SHA256_DIGEST_SIZE,
2690 },
2691 .caam = {
2692 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2693 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2694 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002695 .geniv = true,
2696 },
2697 },
2698 {
2699 .aead = {
2700 .base = {
2701 .cra_name = "authenc(hmac(sha384),"
2702 "cbc(des3_ede))",
2703 .cra_driver_name = "authenc-hmac-sha384-"
2704 "cbc-des3_ede-caam",
2705 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2706 },
2707 .setkey = aead_setkey,
2708 .setauthsize = aead_setauthsize,
2709 .encrypt = aead_encrypt,
2710 .decrypt = aead_decrypt,
2711 .ivsize = DES3_EDE_BLOCK_SIZE,
2712 .maxauthsize = SHA384_DIGEST_SIZE,
2713 },
2714 .caam = {
2715 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2716 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2717 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002718 },
2719 },
2720 {
2721 .aead = {
2722 .base = {
2723 .cra_name = "echainiv(authenc(hmac(sha384),"
2724 "cbc(des3_ede)))",
2725 .cra_driver_name = "echainiv-authenc-"
2726 "hmac-sha384-"
2727 "cbc-des3_ede-caam",
2728 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2729 },
2730 .setkey = aead_setkey,
2731 .setauthsize = aead_setauthsize,
2732 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03002733 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08002734 .ivsize = DES3_EDE_BLOCK_SIZE,
2735 .maxauthsize = SHA384_DIGEST_SIZE,
2736 },
2737 .caam = {
2738 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2739 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2740 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002741 .geniv = true,
2742 },
2743 },
2744 {
2745 .aead = {
2746 .base = {
2747 .cra_name = "authenc(hmac(sha512),"
2748 "cbc(des3_ede))",
2749 .cra_driver_name = "authenc-hmac-sha512-"
2750 "cbc-des3_ede-caam",
2751 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2752 },
2753 .setkey = aead_setkey,
2754 .setauthsize = aead_setauthsize,
2755 .encrypt = aead_encrypt,
2756 .decrypt = aead_decrypt,
2757 .ivsize = DES3_EDE_BLOCK_SIZE,
2758 .maxauthsize = SHA512_DIGEST_SIZE,
2759 },
2760 .caam = {
2761 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2762 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2763 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002764 },
2765 },
2766 {
2767 .aead = {
2768 .base = {
2769 .cra_name = "echainiv(authenc(hmac(sha512),"
2770 "cbc(des3_ede)))",
2771 .cra_driver_name = "echainiv-authenc-"
2772 "hmac-sha512-"
2773 "cbc-des3_ede-caam",
2774 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2775 },
2776 .setkey = aead_setkey,
2777 .setauthsize = aead_setauthsize,
2778 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03002779 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08002780 .ivsize = DES3_EDE_BLOCK_SIZE,
2781 .maxauthsize = SHA512_DIGEST_SIZE,
2782 },
2783 .caam = {
2784 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2785 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2786 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002787 .geniv = true,
2788 },
2789 },
2790 {
2791 .aead = {
2792 .base = {
2793 .cra_name = "authenc(hmac(md5),cbc(des))",
2794 .cra_driver_name = "authenc-hmac-md5-"
2795 "cbc-des-caam",
2796 .cra_blocksize = DES_BLOCK_SIZE,
2797 },
2798 .setkey = aead_setkey,
2799 .setauthsize = aead_setauthsize,
2800 .encrypt = aead_encrypt,
2801 .decrypt = aead_decrypt,
2802 .ivsize = DES_BLOCK_SIZE,
2803 .maxauthsize = MD5_DIGEST_SIZE,
2804 },
2805 .caam = {
2806 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2807 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2808 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002809 },
2810 },
2811 {
2812 .aead = {
2813 .base = {
2814 .cra_name = "echainiv(authenc(hmac(md5),"
2815 "cbc(des)))",
2816 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2817 "cbc-des-caam",
2818 .cra_blocksize = DES_BLOCK_SIZE,
2819 },
2820 .setkey = aead_setkey,
2821 .setauthsize = aead_setauthsize,
2822 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03002823 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08002824 .ivsize = DES_BLOCK_SIZE,
2825 .maxauthsize = MD5_DIGEST_SIZE,
2826 },
2827 .caam = {
2828 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2829 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2830 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002831 .geniv = true,
2832 },
2833 },
2834 {
2835 .aead = {
2836 .base = {
2837 .cra_name = "authenc(hmac(sha1),cbc(des))",
2838 .cra_driver_name = "authenc-hmac-sha1-"
2839 "cbc-des-caam",
2840 .cra_blocksize = DES_BLOCK_SIZE,
2841 },
2842 .setkey = aead_setkey,
2843 .setauthsize = aead_setauthsize,
2844 .encrypt = aead_encrypt,
2845 .decrypt = aead_decrypt,
2846 .ivsize = DES_BLOCK_SIZE,
2847 .maxauthsize = SHA1_DIGEST_SIZE,
2848 },
2849 .caam = {
2850 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2851 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2852 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002853 },
2854 },
2855 {
2856 .aead = {
2857 .base = {
2858 .cra_name = "echainiv(authenc(hmac(sha1),"
2859 "cbc(des)))",
2860 .cra_driver_name = "echainiv-authenc-"
2861 "hmac-sha1-cbc-des-caam",
2862 .cra_blocksize = DES_BLOCK_SIZE,
2863 },
2864 .setkey = aead_setkey,
2865 .setauthsize = aead_setauthsize,
2866 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03002867 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08002868 .ivsize = DES_BLOCK_SIZE,
2869 .maxauthsize = SHA1_DIGEST_SIZE,
2870 },
2871 .caam = {
2872 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2873 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2874 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002875 .geniv = true,
2876 },
2877 },
2878 {
2879 .aead = {
2880 .base = {
2881 .cra_name = "authenc(hmac(sha224),cbc(des))",
2882 .cra_driver_name = "authenc-hmac-sha224-"
2883 "cbc-des-caam",
2884 .cra_blocksize = DES_BLOCK_SIZE,
2885 },
2886 .setkey = aead_setkey,
2887 .setauthsize = aead_setauthsize,
2888 .encrypt = aead_encrypt,
2889 .decrypt = aead_decrypt,
2890 .ivsize = DES_BLOCK_SIZE,
2891 .maxauthsize = SHA224_DIGEST_SIZE,
2892 },
2893 .caam = {
2894 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2895 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2896 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002897 },
2898 },
2899 {
2900 .aead = {
2901 .base = {
2902 .cra_name = "echainiv(authenc(hmac(sha224),"
2903 "cbc(des)))",
2904 .cra_driver_name = "echainiv-authenc-"
2905 "hmac-sha224-cbc-des-caam",
2906 .cra_blocksize = DES_BLOCK_SIZE,
2907 },
2908 .setkey = aead_setkey,
2909 .setauthsize = aead_setauthsize,
2910 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03002911 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08002912 .ivsize = DES_BLOCK_SIZE,
2913 .maxauthsize = SHA224_DIGEST_SIZE,
2914 },
2915 .caam = {
2916 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2917 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2918 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002919 .geniv = true,
2920 },
2921 },
2922 {
2923 .aead = {
2924 .base = {
2925 .cra_name = "authenc(hmac(sha256),cbc(des))",
2926 .cra_driver_name = "authenc-hmac-sha256-"
2927 "cbc-des-caam",
2928 .cra_blocksize = DES_BLOCK_SIZE,
2929 },
2930 .setkey = aead_setkey,
2931 .setauthsize = aead_setauthsize,
2932 .encrypt = aead_encrypt,
2933 .decrypt = aead_decrypt,
2934 .ivsize = DES_BLOCK_SIZE,
2935 .maxauthsize = SHA256_DIGEST_SIZE,
2936 },
2937 .caam = {
2938 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2939 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2940 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002941 },
2942 },
2943 {
2944 .aead = {
2945 .base = {
2946 .cra_name = "echainiv(authenc(hmac(sha256),"
2947 "cbc(des)))",
2948 .cra_driver_name = "echainiv-authenc-"
2949 "hmac-sha256-cbc-des-caam",
2950 .cra_blocksize = DES_BLOCK_SIZE,
2951 },
2952 .setkey = aead_setkey,
2953 .setauthsize = aead_setauthsize,
2954 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03002955 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08002956 .ivsize = DES_BLOCK_SIZE,
2957 .maxauthsize = SHA256_DIGEST_SIZE,
2958 },
2959 .caam = {
2960 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2961 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2962 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002963 .geniv = true,
2964 },
2965 },
2966 {
2967 .aead = {
2968 .base = {
2969 .cra_name = "authenc(hmac(sha384),cbc(des))",
2970 .cra_driver_name = "authenc-hmac-sha384-"
2971 "cbc-des-caam",
2972 .cra_blocksize = DES_BLOCK_SIZE,
2973 },
2974 .setkey = aead_setkey,
2975 .setauthsize = aead_setauthsize,
2976 .encrypt = aead_encrypt,
2977 .decrypt = aead_decrypt,
2978 .ivsize = DES_BLOCK_SIZE,
2979 .maxauthsize = SHA384_DIGEST_SIZE,
2980 },
2981 .caam = {
2982 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2983 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2984 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08002985 },
2986 },
2987 {
2988 .aead = {
2989 .base = {
2990 .cra_name = "echainiv(authenc(hmac(sha384),"
2991 "cbc(des)))",
2992 .cra_driver_name = "echainiv-authenc-"
2993 "hmac-sha384-cbc-des-caam",
2994 .cra_blocksize = DES_BLOCK_SIZE,
2995 },
2996 .setkey = aead_setkey,
2997 .setauthsize = aead_setauthsize,
2998 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03002999 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003000 .ivsize = DES_BLOCK_SIZE,
3001 .maxauthsize = SHA384_DIGEST_SIZE,
3002 },
3003 .caam = {
3004 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3005 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3006 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08003007 .geniv = true,
3008 },
3009 },
3010 {
3011 .aead = {
3012 .base = {
3013 .cra_name = "authenc(hmac(sha512),cbc(des))",
3014 .cra_driver_name = "authenc-hmac-sha512-"
3015 "cbc-des-caam",
3016 .cra_blocksize = DES_BLOCK_SIZE,
3017 },
3018 .setkey = aead_setkey,
3019 .setauthsize = aead_setauthsize,
3020 .encrypt = aead_encrypt,
3021 .decrypt = aead_decrypt,
3022 .ivsize = DES_BLOCK_SIZE,
3023 .maxauthsize = SHA512_DIGEST_SIZE,
3024 },
3025 .caam = {
3026 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3027 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3028 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08003029 },
3030 },
3031 {
3032 .aead = {
3033 .base = {
3034 .cra_name = "echainiv(authenc(hmac(sha512),"
3035 "cbc(des)))",
3036 .cra_driver_name = "echainiv-authenc-"
3037 "hmac-sha512-cbc-des-caam",
3038 .cra_blocksize = DES_BLOCK_SIZE,
3039 },
3040 .setkey = aead_setkey,
3041 .setauthsize = aead_setauthsize,
3042 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003043 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003044 .ivsize = DES_BLOCK_SIZE,
3045 .maxauthsize = SHA512_DIGEST_SIZE,
3046 },
3047 .caam = {
3048 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3049 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3050 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08003051 .geniv = true,
3052 },
3053 },
3054 {
3055 .aead = {
3056 .base = {
3057 .cra_name = "authenc(hmac(md5),"
3058 "rfc3686(ctr(aes)))",
3059 .cra_driver_name = "authenc-hmac-md5-"
3060 "rfc3686-ctr-aes-caam",
3061 .cra_blocksize = 1,
3062 },
3063 .setkey = aead_setkey,
3064 .setauthsize = aead_setauthsize,
3065 .encrypt = aead_encrypt,
3066 .decrypt = aead_decrypt,
3067 .ivsize = CTR_RFC3686_IV_SIZE,
3068 .maxauthsize = MD5_DIGEST_SIZE,
3069 },
3070 .caam = {
3071 .class1_alg_type = OP_ALG_ALGSEL_AES |
3072 OP_ALG_AAI_CTR_MOD128,
3073 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3074 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08003075 .rfc3686 = true,
3076 },
3077 },
3078 {
3079 .aead = {
3080 .base = {
3081 .cra_name = "seqiv(authenc("
3082 "hmac(md5),rfc3686(ctr(aes))))",
3083 .cra_driver_name = "seqiv-authenc-hmac-md5-"
3084 "rfc3686-ctr-aes-caam",
3085 .cra_blocksize = 1,
3086 },
3087 .setkey = aead_setkey,
3088 .setauthsize = aead_setauthsize,
3089 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003090 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003091 .ivsize = CTR_RFC3686_IV_SIZE,
3092 .maxauthsize = MD5_DIGEST_SIZE,
3093 },
3094 .caam = {
3095 .class1_alg_type = OP_ALG_ALGSEL_AES |
3096 OP_ALG_AAI_CTR_MOD128,
3097 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3098 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08003099 .rfc3686 = true,
3100 .geniv = true,
3101 },
3102 },
3103 {
3104 .aead = {
3105 .base = {
3106 .cra_name = "authenc(hmac(sha1),"
3107 "rfc3686(ctr(aes)))",
3108 .cra_driver_name = "authenc-hmac-sha1-"
3109 "rfc3686-ctr-aes-caam",
3110 .cra_blocksize = 1,
3111 },
3112 .setkey = aead_setkey,
3113 .setauthsize = aead_setauthsize,
3114 .encrypt = aead_encrypt,
3115 .decrypt = aead_decrypt,
3116 .ivsize = CTR_RFC3686_IV_SIZE,
3117 .maxauthsize = SHA1_DIGEST_SIZE,
3118 },
3119 .caam = {
3120 .class1_alg_type = OP_ALG_ALGSEL_AES |
3121 OP_ALG_AAI_CTR_MOD128,
3122 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3123 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08003124 .rfc3686 = true,
3125 },
3126 },
3127 {
3128 .aead = {
3129 .base = {
3130 .cra_name = "seqiv(authenc("
3131 "hmac(sha1),rfc3686(ctr(aes))))",
3132 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
3133 "rfc3686-ctr-aes-caam",
3134 .cra_blocksize = 1,
3135 },
3136 .setkey = aead_setkey,
3137 .setauthsize = aead_setauthsize,
3138 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003139 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003140 .ivsize = CTR_RFC3686_IV_SIZE,
3141 .maxauthsize = SHA1_DIGEST_SIZE,
3142 },
3143 .caam = {
3144 .class1_alg_type = OP_ALG_ALGSEL_AES |
3145 OP_ALG_AAI_CTR_MOD128,
3146 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3147 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08003148 .rfc3686 = true,
3149 .geniv = true,
3150 },
3151 },
3152 {
3153 .aead = {
3154 .base = {
3155 .cra_name = "authenc(hmac(sha224),"
3156 "rfc3686(ctr(aes)))",
3157 .cra_driver_name = "authenc-hmac-sha224-"
3158 "rfc3686-ctr-aes-caam",
3159 .cra_blocksize = 1,
3160 },
3161 .setkey = aead_setkey,
3162 .setauthsize = aead_setauthsize,
3163 .encrypt = aead_encrypt,
3164 .decrypt = aead_decrypt,
3165 .ivsize = CTR_RFC3686_IV_SIZE,
3166 .maxauthsize = SHA224_DIGEST_SIZE,
3167 },
3168 .caam = {
3169 .class1_alg_type = OP_ALG_ALGSEL_AES |
3170 OP_ALG_AAI_CTR_MOD128,
3171 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3172 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08003173 .rfc3686 = true,
3174 },
3175 },
3176 {
3177 .aead = {
3178 .base = {
3179 .cra_name = "seqiv(authenc("
3180 "hmac(sha224),rfc3686(ctr(aes))))",
3181 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
3182 "rfc3686-ctr-aes-caam",
3183 .cra_blocksize = 1,
3184 },
3185 .setkey = aead_setkey,
3186 .setauthsize = aead_setauthsize,
3187 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003188 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003189 .ivsize = CTR_RFC3686_IV_SIZE,
3190 .maxauthsize = SHA224_DIGEST_SIZE,
3191 },
3192 .caam = {
3193 .class1_alg_type = OP_ALG_ALGSEL_AES |
3194 OP_ALG_AAI_CTR_MOD128,
3195 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3196 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08003197 .rfc3686 = true,
3198 .geniv = true,
3199 },
3200 },
3201 {
3202 .aead = {
3203 .base = {
3204 .cra_name = "authenc(hmac(sha256),"
3205 "rfc3686(ctr(aes)))",
3206 .cra_driver_name = "authenc-hmac-sha256-"
3207 "rfc3686-ctr-aes-caam",
3208 .cra_blocksize = 1,
3209 },
3210 .setkey = aead_setkey,
3211 .setauthsize = aead_setauthsize,
3212 .encrypt = aead_encrypt,
3213 .decrypt = aead_decrypt,
3214 .ivsize = CTR_RFC3686_IV_SIZE,
3215 .maxauthsize = SHA256_DIGEST_SIZE,
3216 },
3217 .caam = {
3218 .class1_alg_type = OP_ALG_ALGSEL_AES |
3219 OP_ALG_AAI_CTR_MOD128,
3220 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3221 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08003222 .rfc3686 = true,
3223 },
3224 },
3225 {
3226 .aead = {
3227 .base = {
3228 .cra_name = "seqiv(authenc(hmac(sha256),"
3229 "rfc3686(ctr(aes))))",
3230 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
3231 "rfc3686-ctr-aes-caam",
3232 .cra_blocksize = 1,
3233 },
3234 .setkey = aead_setkey,
3235 .setauthsize = aead_setauthsize,
3236 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003237 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003238 .ivsize = CTR_RFC3686_IV_SIZE,
3239 .maxauthsize = SHA256_DIGEST_SIZE,
3240 },
3241 .caam = {
3242 .class1_alg_type = OP_ALG_ALGSEL_AES |
3243 OP_ALG_AAI_CTR_MOD128,
3244 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3245 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08003246 .rfc3686 = true,
3247 .geniv = true,
3248 },
3249 },
3250 {
3251 .aead = {
3252 .base = {
3253 .cra_name = "authenc(hmac(sha384),"
3254 "rfc3686(ctr(aes)))",
3255 .cra_driver_name = "authenc-hmac-sha384-"
3256 "rfc3686-ctr-aes-caam",
3257 .cra_blocksize = 1,
3258 },
3259 .setkey = aead_setkey,
3260 .setauthsize = aead_setauthsize,
3261 .encrypt = aead_encrypt,
3262 .decrypt = aead_decrypt,
3263 .ivsize = CTR_RFC3686_IV_SIZE,
3264 .maxauthsize = SHA384_DIGEST_SIZE,
3265 },
3266 .caam = {
3267 .class1_alg_type = OP_ALG_ALGSEL_AES |
3268 OP_ALG_AAI_CTR_MOD128,
3269 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3270 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08003271 .rfc3686 = true,
3272 },
3273 },
3274 {
3275 .aead = {
3276 .base = {
3277 .cra_name = "seqiv(authenc(hmac(sha384),"
3278 "rfc3686(ctr(aes))))",
3279 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
3280 "rfc3686-ctr-aes-caam",
3281 .cra_blocksize = 1,
3282 },
3283 .setkey = aead_setkey,
3284 .setauthsize = aead_setauthsize,
3285 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003286 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003287 .ivsize = CTR_RFC3686_IV_SIZE,
3288 .maxauthsize = SHA384_DIGEST_SIZE,
3289 },
3290 .caam = {
3291 .class1_alg_type = OP_ALG_ALGSEL_AES |
3292 OP_ALG_AAI_CTR_MOD128,
3293 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3294 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08003295 .rfc3686 = true,
3296 .geniv = true,
3297 },
3298 },
3299 {
3300 .aead = {
3301 .base = {
3302 .cra_name = "authenc(hmac(sha512),"
3303 "rfc3686(ctr(aes)))",
3304 .cra_driver_name = "authenc-hmac-sha512-"
3305 "rfc3686-ctr-aes-caam",
3306 .cra_blocksize = 1,
3307 },
3308 .setkey = aead_setkey,
3309 .setauthsize = aead_setauthsize,
3310 .encrypt = aead_encrypt,
3311 .decrypt = aead_decrypt,
3312 .ivsize = CTR_RFC3686_IV_SIZE,
3313 .maxauthsize = SHA512_DIGEST_SIZE,
3314 },
3315 .caam = {
3316 .class1_alg_type = OP_ALG_ALGSEL_AES |
3317 OP_ALG_AAI_CTR_MOD128,
3318 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3319 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08003320 .rfc3686 = true,
3321 },
3322 },
3323 {
3324 .aead = {
3325 .base = {
3326 .cra_name = "seqiv(authenc(hmac(sha512),"
3327 "rfc3686(ctr(aes))))",
3328 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
3329 "rfc3686-ctr-aes-caam",
3330 .cra_blocksize = 1,
3331 },
3332 .setkey = aead_setkey,
3333 .setauthsize = aead_setauthsize,
3334 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003335 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003336 .ivsize = CTR_RFC3686_IV_SIZE,
3337 .maxauthsize = SHA512_DIGEST_SIZE,
3338 },
3339 .caam = {
3340 .class1_alg_type = OP_ALG_ALGSEL_AES |
3341 OP_ALG_AAI_CTR_MOD128,
3342 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3343 OP_ALG_AAI_HMAC_PRECOMP,
Herbert Xu479bcc72015-07-30 17:53:17 +08003344 .rfc3686 = true,
3345 .geniv = true,
3346 },
3347 },
Herbert Xuf2147b82015-06-16 13:54:23 +08003348};
3349
3350struct caam_crypto_alg {
3351 struct crypto_alg crypto_alg;
3352 struct list_head entry;
3353 struct caam_alg_entry caam;
3354};
3355
3356static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
3357{
3358 ctx->jrdev = caam_jr_alloc();
3359 if (IS_ERR(ctx->jrdev)) {
3360 pr_err("Job Ring Device allocation for transform failed\n");
3361 return PTR_ERR(ctx->jrdev);
3362 }
3363
3364 /* copy descriptor header template value */
Horia Geantădb576562016-11-22 15:44:04 +02003365 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
3366 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
Herbert Xuf2147b82015-06-16 13:54:23 +08003367
3368 return 0;
3369}
3370
Kim Phillips8e8ec592011-03-13 16:54:26 +08003371static int caam_cra_init(struct crypto_tfm *tfm)
3372{
3373 struct crypto_alg *alg = tfm->__crt_alg;
3374 struct caam_crypto_alg *caam_alg =
3375 container_of(alg, struct caam_crypto_alg, crypto_alg);
3376 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003377
Herbert Xuf2147b82015-06-16 13:54:23 +08003378 return caam_init_common(ctx, &caam_alg->caam);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003379}
3380
Herbert Xuf2147b82015-06-16 13:54:23 +08003381static int caam_aead_init(struct crypto_aead *tfm)
Kim Phillips8e8ec592011-03-13 16:54:26 +08003382{
Herbert Xuf2147b82015-06-16 13:54:23 +08003383 struct aead_alg *alg = crypto_aead_alg(tfm);
3384 struct caam_aead_alg *caam_alg =
3385 container_of(alg, struct caam_aead_alg, aead);
3386 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003387
Herbert Xuf2147b82015-06-16 13:54:23 +08003388 return caam_init_common(ctx, &caam_alg->caam);
3389}
3390
3391static void caam_exit_common(struct caam_ctx *ctx)
3392{
Yuan Kang1acebad2011-07-15 11:21:42 +08003393 if (ctx->sh_desc_enc_dma &&
3394 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
3395 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
3396 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
3397 if (ctx->sh_desc_dec_dma &&
3398 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
3399 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
3400 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
3401 if (ctx->sh_desc_givenc_dma &&
3402 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
3403 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
3404 desc_bytes(ctx->sh_desc_givenc),
Kim Phillips4427b1b2011-05-14 22:08:17 -05003405 DMA_TO_DEVICE);
Horia Geantaec31eed2014-03-14 17:48:30 +02003406 if (ctx->key_dma &&
3407 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
3408 dma_unmap_single(ctx->jrdev, ctx->key_dma,
Horia Geantădb576562016-11-22 15:44:04 +02003409 ctx->cdata.keylen + ctx->adata.keylen_pad,
Horia Geantaec31eed2014-03-14 17:48:30 +02003410 DMA_TO_DEVICE);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303411
3412 caam_jr_free(ctx->jrdev);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003413}
3414
Herbert Xuf2147b82015-06-16 13:54:23 +08003415static void caam_cra_exit(struct crypto_tfm *tfm)
3416{
3417 caam_exit_common(crypto_tfm_ctx(tfm));
3418}
3419
3420static void caam_aead_exit(struct crypto_aead *tfm)
3421{
3422 caam_exit_common(crypto_aead_ctx(tfm));
3423}
3424
Kim Phillips8e8ec592011-03-13 16:54:26 +08003425static void __exit caam_algapi_exit(void)
3426{
3427
Kim Phillips8e8ec592011-03-13 16:54:26 +08003428 struct caam_crypto_alg *t_alg, *n;
Herbert Xuf2147b82015-06-16 13:54:23 +08003429 int i;
3430
3431 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
3432 struct caam_aead_alg *t_alg = driver_aeads + i;
3433
3434 if (t_alg->registered)
3435 crypto_unregister_aead(&t_alg->aead);
3436 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003437
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303438 if (!alg_list.next)
Kim Phillips8e8ec592011-03-13 16:54:26 +08003439 return;
3440
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303441 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003442 crypto_unregister_alg(&t_alg->crypto_alg);
3443 list_del(&t_alg->entry);
3444 kfree(t_alg);
3445 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003446}
3447
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303448static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
Kim Phillips8e8ec592011-03-13 16:54:26 +08003449 *template)
3450{
3451 struct caam_crypto_alg *t_alg;
3452 struct crypto_alg *alg;
3453
Fabio Estevam9c4f9732015-08-21 13:52:00 -03003454 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003455 if (!t_alg) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303456 pr_err("failed to allocate t_alg\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08003457 return ERR_PTR(-ENOMEM);
3458 }
3459
3460 alg = &t_alg->crypto_alg;
3461
3462 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
3463 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
3464 template->driver_name);
3465 alg->cra_module = THIS_MODULE;
3466 alg->cra_init = caam_cra_init;
3467 alg->cra_exit = caam_cra_exit;
3468 alg->cra_priority = CAAM_CRA_PRIORITY;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003469 alg->cra_blocksize = template->blocksize;
3470 alg->cra_alignmask = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003471 alg->cra_ctxsize = sizeof(struct caam_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01003472 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
3473 template->type;
Yuan Kang885e9e22011-07-15 11:21:41 +08003474 switch (template->type) {
Catalin Vasile7222d1a2014-10-31 12:45:38 +02003475 case CRYPTO_ALG_TYPE_GIVCIPHER:
3476 alg->cra_type = &crypto_givcipher_type;
3477 alg->cra_ablkcipher = template->template_ablkcipher;
3478 break;
Yuan Kangacdca312011-07-15 11:21:42 +08003479 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3480 alg->cra_type = &crypto_ablkcipher_type;
3481 alg->cra_ablkcipher = template->template_ablkcipher;
3482 break;
Yuan Kang885e9e22011-07-15 11:21:41 +08003483 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003484
Herbert Xuf2147b82015-06-16 13:54:23 +08003485 t_alg->caam.class1_alg_type = template->class1_alg_type;
3486 t_alg->caam.class2_alg_type = template->class2_alg_type;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003487
3488 return t_alg;
3489}
3490
Herbert Xuf2147b82015-06-16 13:54:23 +08003491static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3492{
3493 struct aead_alg *alg = &t_alg->aead;
3494
3495 alg->base.cra_module = THIS_MODULE;
3496 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3497 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
Herbert Xu5e4b8c12015-08-13 17:29:06 +08003498 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
Herbert Xuf2147b82015-06-16 13:54:23 +08003499
3500 alg->init = caam_aead_init;
3501 alg->exit = caam_aead_exit;
3502}
3503
Kim Phillips8e8ec592011-03-13 16:54:26 +08003504static int __init caam_algapi_init(void)
3505{
Ruchika Gupta35af6402014-07-07 10:42:12 +05303506 struct device_node *dev_node;
3507 struct platform_device *pdev;
3508 struct device *ctrldev;
Victoria Milhoanbf834902015-08-05 11:28:48 -07003509 struct caam_drv_private *priv;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003510 int i = 0, err = 0;
Victoria Milhoanbf834902015-08-05 11:28:48 -07003511 u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
3512 unsigned int md_limit = SHA512_DIGEST_SIZE;
Herbert Xuf2147b82015-06-16 13:54:23 +08003513 bool registered = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003514
Ruchika Gupta35af6402014-07-07 10:42:12 +05303515 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
3516 if (!dev_node) {
3517 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
3518 if (!dev_node)
3519 return -ENODEV;
3520 }
3521
3522 pdev = of_find_device_by_node(dev_node);
3523 if (!pdev) {
3524 of_node_put(dev_node);
3525 return -ENODEV;
3526 }
3527
3528 ctrldev = &pdev->dev;
3529 priv = dev_get_drvdata(ctrldev);
3530 of_node_put(dev_node);
3531
3532 /*
3533 * If priv is NULL, it's probably because the caam driver wasn't
3534 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
3535 */
3536 if (!priv)
3537 return -ENODEV;
3538
3539
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303540 INIT_LIST_HEAD(&alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003541
Victoria Milhoanbf834902015-08-05 11:28:48 -07003542 /*
3543 * Register crypto algorithms the device supports.
3544 * First, detect presence and attributes of DES, AES, and MD blocks.
3545 */
3546 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
3547 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
3548 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
3549 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
3550 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003551
Victoria Milhoanbf834902015-08-05 11:28:48 -07003552 /* If MD is present, limit digest size based on LP256 */
3553 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
3554 md_limit = SHA256_DIGEST_SIZE;
3555
3556 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3557 struct caam_crypto_alg *t_alg;
3558 struct caam_alg_template *alg = driver_algs + i;
3559 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
3560
3561 /* Skip DES algorithms if not supported by device */
3562 if (!des_inst &&
3563 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
3564 (alg_sel == OP_ALG_ALGSEL_DES)))
3565 continue;
3566
3567 /* Skip AES algorithms if not supported by device */
3568 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
3569 continue;
3570
Sven Ebenfeld83d2c9a2016-11-07 18:51:34 +01003571 /*
3572 * Check support for AES modes not available
3573 * on LP devices.
3574 */
3575 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
3576 if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
3577 OP_ALG_AAI_XTS)
3578 continue;
3579
Victoria Milhoanbf834902015-08-05 11:28:48 -07003580 t_alg = caam_alg_alloc(alg);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003581 if (IS_ERR(t_alg)) {
3582 err = PTR_ERR(t_alg);
Victoria Milhoanbf834902015-08-05 11:28:48 -07003583 pr_warn("%s alg allocation failed\n", alg->driver_name);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003584 continue;
3585 }
3586
3587 err = crypto_register_alg(&t_alg->crypto_alg);
3588 if (err) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303589 pr_warn("%s alg registration failed\n",
Kim Phillips8e8ec592011-03-13 16:54:26 +08003590 t_alg->crypto_alg.cra_driver_name);
3591 kfree(t_alg);
Herbert Xuf2147b82015-06-16 13:54:23 +08003592 continue;
3593 }
3594
3595 list_add_tail(&t_alg->entry, &alg_list);
3596 registered = true;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003597 }
Herbert Xuf2147b82015-06-16 13:54:23 +08003598
3599 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
3600 struct caam_aead_alg *t_alg = driver_aeads + i;
Victoria Milhoanbf834902015-08-05 11:28:48 -07003601 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
3602 OP_ALG_ALGSEL_MASK;
3603 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
3604 OP_ALG_ALGSEL_MASK;
3605 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
3606
3607 /* Skip DES algorithms if not supported by device */
3608 if (!des_inst &&
3609 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
3610 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
3611 continue;
3612
3613 /* Skip AES algorithms if not supported by device */
3614 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
3615 continue;
3616
3617 /*
3618 * Check support for AES algorithms not available
3619 * on LP devices.
3620 */
3621 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
3622 if (alg_aai == OP_ALG_AAI_GCM)
3623 continue;
3624
3625 /*
3626 * Skip algorithms requiring message digests
3627 * if MD or MD size is not supported by device.
3628 */
3629 if (c2_alg_sel &&
3630 (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
3631 continue;
Herbert Xuf2147b82015-06-16 13:54:23 +08003632
3633 caam_aead_alg_init(t_alg);
3634
3635 err = crypto_register_aead(&t_alg->aead);
3636 if (err) {
3637 pr_warn("%s alg registration failed\n",
3638 t_alg->aead.base.cra_driver_name);
3639 continue;
3640 }
3641
3642 t_alg->registered = true;
3643 registered = true;
3644 }
3645
3646 if (registered)
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303647 pr_info("caam algorithms registered in /proc/crypto\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08003648
3649 return err;
3650}
3651
3652module_init(caam_algapi_init);
3653module_exit(caam_algapi_exit);
3654
3655MODULE_LICENSE("GPL");
3656MODULE_DESCRIPTION("FSL CAAM support for crypto API");
3657MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");