blob: 2ade24000246060da00e5af10b217dbc39791acb [file] [log] [blame]
Huang Ying54b6a1b2009-01-18 16:28:34 +11001/*
2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
4 *
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
7 *
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04008 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
15 *
Huang Ying54b6a1b2009-01-18 16:28:34 +110016 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 */
21
22#include <linux/hardirq.h>
23#include <linux/types.h>
24#include <linux/crypto.h>
Paul Gortmaker7c52d552011-05-27 12:33:10 -040025#include <linux/module.h>
Huang Ying54b6a1b2009-01-18 16:28:34 +110026#include <linux/err.h>
27#include <crypto/algapi.h>
28#include <crypto/aes.h>
29#include <crypto/cryptd.h>
Huang Ying12387a42010-03-10 18:28:55 +080030#include <crypto/ctr.h>
Jussi Kivilinna023af602012-07-22 18:18:37 +030031#include <crypto/b128ops.h>
32#include <crypto/lrw.h>
33#include <crypto/xts.h>
Andi Kleen3bd391f2012-01-26 00:09:06 +010034#include <asm/cpu_device_id.h>
Huang Ying54b6a1b2009-01-18 16:28:34 +110035#include <asm/i387.h>
Jussi Kivilinna70ef2602012-06-18 14:07:50 +030036#include <asm/crypto/aes.h>
Ard Biesheuvel801201a2013-09-20 09:55:41 +020037#include <crypto/ablk_helper.h>
Tadeusz Struk0bd82f52010-11-04 15:00:45 -040038#include <crypto/scatterwalk.h>
39#include <crypto/internal/aead.h>
40#include <linux/workqueue.h>
41#include <linux/spinlock.h>
Jussi Kivilinnac456a9c2013-04-08 21:51:16 +030042#ifdef CONFIG_X86_64
43#include <asm/crypto/glue_helper.h>
44#endif
Huang Ying54b6a1b2009-01-18 16:28:34 +110045
Timothy McCaffreye31ac322015-01-13 13:16:43 -050046
Tadeusz Struk0bd82f52010-11-04 15:00:45 -040047/* This data is stored at the end of the crypto_tfm struct.
48 * It's a type of per "session" data storage location.
49 * This needs to be 16 byte aligned.
50 */
51struct aesni_rfc4106_gcm_ctx {
52 u8 hash_subkey[16];
53 struct crypto_aes_ctx aes_key_expanded;
54 u8 nonce[4];
55 struct cryptd_aead *cryptd_tfm;
56};
57
58struct aesni_gcm_set_hash_subkey_result {
59 int err;
60 struct completion completion;
61};
62
63struct aesni_hash_subkey_req_data {
64 u8 iv[16];
65 struct aesni_gcm_set_hash_subkey_result result;
66 struct scatterlist sg;
67};
68
69#define AESNI_ALIGN (16)
Huang Ying54b6a1b2009-01-18 16:28:34 +110070#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
Tadeusz Struk0bd82f52010-11-04 15:00:45 -040071#define RFC4106_HASH_SUBKEY_SIZE 16
Huang Ying54b6a1b2009-01-18 16:28:34 +110072
Jussi Kivilinna023af602012-07-22 18:18:37 +030073struct aesni_lrw_ctx {
74 struct lrw_table_ctx lrw_table;
75 u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
76};
77
78struct aesni_xts_ctx {
79 u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
80 u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
81};
82
Huang Ying54b6a1b2009-01-18 16:28:34 +110083asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
84 unsigned int key_len);
85asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
86 const u8 *in);
87asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
88 const u8 *in);
89asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
90 const u8 *in, unsigned int len);
91asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
92 const u8 *in, unsigned int len);
93asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
94 const u8 *in, unsigned int len, u8 *iv);
95asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
96 const u8 *in, unsigned int len, u8 *iv);
Randy Dunlap9bed4ac2011-05-18 09:03:34 +100097
98int crypto_fpu_init(void);
99void crypto_fpu_exit(void);
100
Tim Chend7645932013-12-11 14:28:41 -0800101#define AVX_GEN2_OPTSIZE 640
102#define AVX_GEN4_OPTSIZE 4096
103
Mathias Krause0d258ef2010-11-27 16:34:46 +0800104#ifdef CONFIG_X86_64
chandramouli narayanan22cddcc2014-06-10 09:22:47 -0700105
106static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
107 const u8 *in, unsigned int len, u8 *iv);
Huang Ying12387a42010-03-10 18:28:55 +0800108asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
109 const u8 *in, unsigned int len, u8 *iv);
Huang Ying54b6a1b2009-01-18 16:28:34 +1100110
Jussi Kivilinnac456a9c2013-04-08 21:51:16 +0300111asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
112 const u8 *in, bool enc, u8 *iv);
113
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400114/* asmlinkage void aesni_gcm_enc()
115 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
116 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
117 * const u8 *in, Plaintext input
118 * unsigned long plaintext_len, Length of data in bytes for encryption.
119 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
120 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
121 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
122 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
123 * const u8 *aad, Additional Authentication Data (AAD)
124 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
125 * is going to be 8 or 12 bytes
126 * u8 *auth_tag, Authenticated Tag output.
127 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
128 * Valid values are 16 (most likely), 12 or 8.
129 */
130asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
131 const u8 *in, unsigned long plaintext_len, u8 *iv,
132 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
133 u8 *auth_tag, unsigned long auth_tag_len);
134
135/* asmlinkage void aesni_gcm_dec()
136 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
137 * u8 *out, Plaintext output. Decrypt in-place is allowed.
138 * const u8 *in, Ciphertext input
139 * unsigned long ciphertext_len, Length of data in bytes for decryption.
140 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
141 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
142 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
143 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
144 * const u8 *aad, Additional Authentication Data (AAD)
145 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
146 * to be 8 or 12 bytes
147 * u8 *auth_tag, Authenticated Tag output.
148 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
149 * Valid values are 16 (most likely), 12 or 8.
150 */
151asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
152 const u8 *in, unsigned long ciphertext_len, u8 *iv,
153 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
154 u8 *auth_tag, unsigned long auth_tag_len);
155
Tim Chend7645932013-12-11 14:28:41 -0800156
157#ifdef CONFIG_AS_AVX
chandramouli narayanan22cddcc2014-06-10 09:22:47 -0700158asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
159 void *keys, u8 *out, unsigned int num_bytes);
160asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
161 void *keys, u8 *out, unsigned int num_bytes);
162asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
163 void *keys, u8 *out, unsigned int num_bytes);
Tim Chend7645932013-12-11 14:28:41 -0800164/*
165 * asmlinkage void aesni_gcm_precomp_avx_gen2()
166 * gcm_data *my_ctx_data, context data
167 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
168 */
169asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
170
171asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
172 const u8 *in, unsigned long plaintext_len, u8 *iv,
173 const u8 *aad, unsigned long aad_len,
174 u8 *auth_tag, unsigned long auth_tag_len);
175
176asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
177 const u8 *in, unsigned long ciphertext_len, u8 *iv,
178 const u8 *aad, unsigned long aad_len,
179 u8 *auth_tag, unsigned long auth_tag_len);
180
181static void aesni_gcm_enc_avx(void *ctx, u8 *out,
182 const u8 *in, unsigned long plaintext_len, u8 *iv,
183 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
184 u8 *auth_tag, unsigned long auth_tag_len)
185{
Timothy McCaffreye31ac322015-01-13 13:16:43 -0500186 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
187 if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
Tim Chend7645932013-12-11 14:28:41 -0800188 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
189 aad_len, auth_tag, auth_tag_len);
190 } else {
191 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
192 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
193 aad_len, auth_tag, auth_tag_len);
194 }
195}
196
197static void aesni_gcm_dec_avx(void *ctx, u8 *out,
198 const u8 *in, unsigned long ciphertext_len, u8 *iv,
199 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
200 u8 *auth_tag, unsigned long auth_tag_len)
201{
Timothy McCaffreye31ac322015-01-13 13:16:43 -0500202 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
203 if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
Tim Chend7645932013-12-11 14:28:41 -0800204 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
205 aad_len, auth_tag, auth_tag_len);
206 } else {
207 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
208 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
209 aad_len, auth_tag, auth_tag_len);
210 }
211}
212#endif
213
214#ifdef CONFIG_AS_AVX2
215/*
216 * asmlinkage void aesni_gcm_precomp_avx_gen4()
217 * gcm_data *my_ctx_data, context data
218 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
219 */
220asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
221
222asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
223 const u8 *in, unsigned long plaintext_len, u8 *iv,
224 const u8 *aad, unsigned long aad_len,
225 u8 *auth_tag, unsigned long auth_tag_len);
226
227asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
228 const u8 *in, unsigned long ciphertext_len, u8 *iv,
229 const u8 *aad, unsigned long aad_len,
230 u8 *auth_tag, unsigned long auth_tag_len);
231
232static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
233 const u8 *in, unsigned long plaintext_len, u8 *iv,
234 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
235 u8 *auth_tag, unsigned long auth_tag_len)
236{
Timothy McCaffreye31ac322015-01-13 13:16:43 -0500237 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
238 if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
Tim Chend7645932013-12-11 14:28:41 -0800239 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
240 aad_len, auth_tag, auth_tag_len);
241 } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
242 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
243 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
244 aad_len, auth_tag, auth_tag_len);
245 } else {
246 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
247 aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
248 aad_len, auth_tag, auth_tag_len);
249 }
250}
251
252static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
253 const u8 *in, unsigned long ciphertext_len, u8 *iv,
254 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
255 u8 *auth_tag, unsigned long auth_tag_len)
256{
Timothy McCaffreye31ac322015-01-13 13:16:43 -0500257 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
258 if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
Tim Chend7645932013-12-11 14:28:41 -0800259 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
260 aad, aad_len, auth_tag, auth_tag_len);
261 } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
262 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
263 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
264 aad_len, auth_tag, auth_tag_len);
265 } else {
266 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
267 aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
268 aad_len, auth_tag, auth_tag_len);
269 }
270}
271#endif
272
273static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
274 const u8 *in, unsigned long plaintext_len, u8 *iv,
275 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
276 u8 *auth_tag, unsigned long auth_tag_len);
277
278static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
279 const u8 *in, unsigned long ciphertext_len, u8 *iv,
280 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
281 u8 *auth_tag, unsigned long auth_tag_len);
282
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400283static inline struct
284aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
285{
286 return
287 (struct aesni_rfc4106_gcm_ctx *)
288 PTR_ALIGN((u8 *)
289 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
290}
Mathias Krause559ad0f2010-11-29 08:35:39 +0800291#endif
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400292
Huang Ying54b6a1b2009-01-18 16:28:34 +1100293static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
294{
295 unsigned long addr = (unsigned long)raw_ctx;
296 unsigned long align = AESNI_ALIGN;
297
298 if (align <= crypto_tfm_ctx_alignment())
299 align = 1;
300 return (struct crypto_aes_ctx *)ALIGN(addr, align);
301}
302
303static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
304 const u8 *in_key, unsigned int key_len)
305{
306 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
307 u32 *flags = &tfm->crt_flags;
308 int err;
309
310 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
311 key_len != AES_KEYSIZE_256) {
312 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
313 return -EINVAL;
314 }
315
Huang Ying13b79b972009-10-20 16:20:47 +0900316 if (!irq_fpu_usable())
Huang Ying54b6a1b2009-01-18 16:28:34 +1100317 err = crypto_aes_expand_key(ctx, in_key, key_len);
318 else {
319 kernel_fpu_begin();
320 err = aesni_set_key(ctx, in_key, key_len);
321 kernel_fpu_end();
322 }
323
324 return err;
325}
326
327static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
328 unsigned int key_len)
329{
330 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
331}
332
333static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
334{
335 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
336
Huang Ying13b79b972009-10-20 16:20:47 +0900337 if (!irq_fpu_usable())
Huang Ying54b6a1b2009-01-18 16:28:34 +1100338 crypto_aes_encrypt_x86(ctx, dst, src);
339 else {
340 kernel_fpu_begin();
341 aesni_enc(ctx, dst, src);
342 kernel_fpu_end();
343 }
344}
345
346static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
347{
348 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
349
Huang Ying13b79b972009-10-20 16:20:47 +0900350 if (!irq_fpu_usable())
Huang Ying54b6a1b2009-01-18 16:28:34 +1100351 crypto_aes_decrypt_x86(ctx, dst, src);
352 else {
353 kernel_fpu_begin();
354 aesni_dec(ctx, dst, src);
355 kernel_fpu_end();
356 }
357}
358
Huang Ying2cf4ac82009-03-29 15:41:20 +0800359static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
360{
361 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
362
363 aesni_enc(ctx, dst, src);
364}
365
366static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
367{
368 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
369
370 aesni_dec(ctx, dst, src);
371}
372
Huang Ying54b6a1b2009-01-18 16:28:34 +1100373static int ecb_encrypt(struct blkcipher_desc *desc,
374 struct scatterlist *dst, struct scatterlist *src,
375 unsigned int nbytes)
376{
377 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
378 struct blkcipher_walk walk;
379 int err;
380
381 blkcipher_walk_init(&walk, dst, src, nbytes);
382 err = blkcipher_walk_virt(desc, &walk);
Huang Ying9251b642009-06-18 19:41:27 +0800383 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Huang Ying54b6a1b2009-01-18 16:28:34 +1100384
385 kernel_fpu_begin();
386 while ((nbytes = walk.nbytes)) {
387 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
388 nbytes & AES_BLOCK_MASK);
389 nbytes &= AES_BLOCK_SIZE - 1;
390 err = blkcipher_walk_done(desc, &walk, nbytes);
391 }
392 kernel_fpu_end();
393
394 return err;
395}
396
397static int ecb_decrypt(struct blkcipher_desc *desc,
398 struct scatterlist *dst, struct scatterlist *src,
399 unsigned int nbytes)
400{
401 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
402 struct blkcipher_walk walk;
403 int err;
404
405 blkcipher_walk_init(&walk, dst, src, nbytes);
406 err = blkcipher_walk_virt(desc, &walk);
Huang Ying9251b642009-06-18 19:41:27 +0800407 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Huang Ying54b6a1b2009-01-18 16:28:34 +1100408
409 kernel_fpu_begin();
410 while ((nbytes = walk.nbytes)) {
411 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
412 nbytes & AES_BLOCK_MASK);
413 nbytes &= AES_BLOCK_SIZE - 1;
414 err = blkcipher_walk_done(desc, &walk, nbytes);
415 }
416 kernel_fpu_end();
417
418 return err;
419}
420
Huang Ying54b6a1b2009-01-18 16:28:34 +1100421static int cbc_encrypt(struct blkcipher_desc *desc,
422 struct scatterlist *dst, struct scatterlist *src,
423 unsigned int nbytes)
424{
425 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
426 struct blkcipher_walk walk;
427 int err;
428
429 blkcipher_walk_init(&walk, dst, src, nbytes);
430 err = blkcipher_walk_virt(desc, &walk);
Huang Ying9251b642009-06-18 19:41:27 +0800431 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Huang Ying54b6a1b2009-01-18 16:28:34 +1100432
433 kernel_fpu_begin();
434 while ((nbytes = walk.nbytes)) {
435 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
436 nbytes & AES_BLOCK_MASK, walk.iv);
437 nbytes &= AES_BLOCK_SIZE - 1;
438 err = blkcipher_walk_done(desc, &walk, nbytes);
439 }
440 kernel_fpu_end();
441
442 return err;
443}
444
445static int cbc_decrypt(struct blkcipher_desc *desc,
446 struct scatterlist *dst, struct scatterlist *src,
447 unsigned int nbytes)
448{
449 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
450 struct blkcipher_walk walk;
451 int err;
452
453 blkcipher_walk_init(&walk, dst, src, nbytes);
454 err = blkcipher_walk_virt(desc, &walk);
Huang Ying9251b642009-06-18 19:41:27 +0800455 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Huang Ying54b6a1b2009-01-18 16:28:34 +1100456
457 kernel_fpu_begin();
458 while ((nbytes = walk.nbytes)) {
459 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
460 nbytes & AES_BLOCK_MASK, walk.iv);
461 nbytes &= AES_BLOCK_SIZE - 1;
462 err = blkcipher_walk_done(desc, &walk, nbytes);
463 }
464 kernel_fpu_end();
465
466 return err;
467}
468
Mathias Krause0d258ef2010-11-27 16:34:46 +0800469#ifdef CONFIG_X86_64
Huang Ying12387a42010-03-10 18:28:55 +0800470static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
471 struct blkcipher_walk *walk)
472{
473 u8 *ctrblk = walk->iv;
474 u8 keystream[AES_BLOCK_SIZE];
475 u8 *src = walk->src.virt.addr;
476 u8 *dst = walk->dst.virt.addr;
477 unsigned int nbytes = walk->nbytes;
478
479 aesni_enc(ctx, keystream, ctrblk);
480 crypto_xor(keystream, src, nbytes);
481 memcpy(dst, keystream, nbytes);
482 crypto_inc(ctrblk, AES_BLOCK_SIZE);
483}
484
Mathias Krause5cfed7b2014-09-28 22:24:01 +0200485#ifdef CONFIG_AS_AVX
chandramouli narayanan22cddcc2014-06-10 09:22:47 -0700486static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
487 const u8 *in, unsigned int len, u8 *iv)
488{
489 /*
490 * based on key length, override with the by8 version
491 * of ctr mode encryption/decryption for improved performance
492 * aes_set_key_common() ensures that key length is one of
493 * {128,192,256}
494 */
495 if (ctx->key_length == AES_KEYSIZE_128)
496 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
497 else if (ctx->key_length == AES_KEYSIZE_192)
498 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
499 else
500 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
501}
502#endif
503
Huang Ying12387a42010-03-10 18:28:55 +0800504static int ctr_crypt(struct blkcipher_desc *desc,
505 struct scatterlist *dst, struct scatterlist *src,
506 unsigned int nbytes)
507{
508 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
509 struct blkcipher_walk walk;
510 int err;
511
512 blkcipher_walk_init(&walk, dst, src, nbytes);
513 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
514 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
515
516 kernel_fpu_begin();
517 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
chandramouli narayanan22cddcc2014-06-10 09:22:47 -0700518 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
Timothy McCaffreye31ac322015-01-13 13:16:43 -0500519 nbytes & AES_BLOCK_MASK, walk.iv);
Huang Ying12387a42010-03-10 18:28:55 +0800520 nbytes &= AES_BLOCK_SIZE - 1;
521 err = blkcipher_walk_done(desc, &walk, nbytes);
522 }
523 if (walk.nbytes) {
524 ctr_crypt_final(ctx, &walk);
525 err = blkcipher_walk_done(desc, &walk, 0);
526 }
527 kernel_fpu_end();
528
529 return err;
530}
Mathias Krause0d258ef2010-11-27 16:34:46 +0800531#endif
Huang Ying12387a42010-03-10 18:28:55 +0800532
Huang Ying54b6a1b2009-01-18 16:28:34 +1100533static int ablk_ecb_init(struct crypto_tfm *tfm)
534{
Jussi Kivilinnaef45b832012-05-11 16:00:54 +0300535 return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
Huang Ying54b6a1b2009-01-18 16:28:34 +1100536}
537
Huang Ying54b6a1b2009-01-18 16:28:34 +1100538static int ablk_cbc_init(struct crypto_tfm *tfm)
539{
Jussi Kivilinnaef45b832012-05-11 16:00:54 +0300540 return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
Huang Ying54b6a1b2009-01-18 16:28:34 +1100541}
542
Mathias Krause0d258ef2010-11-27 16:34:46 +0800543#ifdef CONFIG_X86_64
Huang Ying2cf4ac82009-03-29 15:41:20 +0800544static int ablk_ctr_init(struct crypto_tfm *tfm)
545{
Jussi Kivilinnaef45b832012-05-11 16:00:54 +0300546 return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
Huang Ying2cf4ac82009-03-29 15:41:20 +0800547}
548
Mathias Krause0d258ef2010-11-27 16:34:46 +0800549#endif
Huang Ying2cf4ac82009-03-29 15:41:20 +0800550
Valentin Rothberg304576a2014-10-21 11:35:32 +0200551#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
Huang Ying2cf4ac82009-03-29 15:41:20 +0800552static int ablk_pcbc_init(struct crypto_tfm *tfm)
553{
Jussi Kivilinnaef45b832012-05-11 16:00:54 +0300554 return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
Huang Ying2cf4ac82009-03-29 15:41:20 +0800555}
Huang Ying2cf4ac82009-03-29 15:41:20 +0800556#endif
557
Jussi Kivilinna023af602012-07-22 18:18:37 +0300558static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
Huang Ying2cf4ac82009-03-29 15:41:20 +0800559{
Jussi Kivilinna023af602012-07-22 18:18:37 +0300560 aesni_ecb_enc(ctx, blks, blks, nbytes);
Huang Ying2cf4ac82009-03-29 15:41:20 +0800561}
Jussi Kivilinna023af602012-07-22 18:18:37 +0300562
563static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
564{
565 aesni_ecb_dec(ctx, blks, blks, nbytes);
566}
567
568static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
569 unsigned int keylen)
570{
571 struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
572 int err;
573
574 err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
575 keylen - AES_BLOCK_SIZE);
576 if (err)
577 return err;
578
579 return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
580}
581
582static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
583{
584 struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
585
586 lrw_free_table(&ctx->lrw_table);
587}
588
589static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
590 struct scatterlist *src, unsigned int nbytes)
591{
592 struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
593 be128 buf[8];
594 struct lrw_crypt_req req = {
595 .tbuf = buf,
596 .tbuflen = sizeof(buf),
597
598 .table_ctx = &ctx->lrw_table,
599 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
600 .crypt_fn = lrw_xts_encrypt_callback,
601 };
602 int ret;
603
604 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
605
606 kernel_fpu_begin();
607 ret = lrw_crypt(desc, dst, src, nbytes, &req);
608 kernel_fpu_end();
609
610 return ret;
611}
612
613static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
614 struct scatterlist *src, unsigned int nbytes)
615{
616 struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
617 be128 buf[8];
618 struct lrw_crypt_req req = {
619 .tbuf = buf,
620 .tbuflen = sizeof(buf),
621
622 .table_ctx = &ctx->lrw_table,
623 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
624 .crypt_fn = lrw_xts_decrypt_callback,
625 };
626 int ret;
627
628 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
629
630 kernel_fpu_begin();
631 ret = lrw_crypt(desc, dst, src, nbytes, &req);
632 kernel_fpu_end();
633
634 return ret;
635}
636
637static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
638 unsigned int keylen)
639{
640 struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
641 u32 *flags = &tfm->crt_flags;
642 int err;
643
644 /* key consists of keys of equal size concatenated, therefore
645 * the length must be even
646 */
647 if (keylen % 2) {
648 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
649 return -EINVAL;
650 }
651
652 /* first half of xts-key is for crypt */
653 err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
654 if (err)
655 return err;
656
657 /* second half of xts-key is for tweak */
658 return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
659 keylen / 2);
660}
661
662
Jussi Kivilinna32bec972012-10-18 23:24:57 +0300663static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
664{
665 aesni_enc(ctx, out, in);
666}
667
Jussi Kivilinnac456a9c2013-04-08 21:51:16 +0300668#ifdef CONFIG_X86_64
669
670static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
671{
672 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
673}
674
675static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
676{
677 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
678}
679
680static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
681{
682 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
683}
684
685static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
686{
687 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
688}
689
690static const struct common_glue_ctx aesni_enc_xts = {
691 .num_funcs = 2,
692 .fpu_blocks_limit = 1,
693
694 .funcs = { {
695 .num_blocks = 8,
696 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
697 }, {
698 .num_blocks = 1,
699 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
700 } }
701};
702
703static const struct common_glue_ctx aesni_dec_xts = {
704 .num_funcs = 2,
705 .fpu_blocks_limit = 1,
706
707 .funcs = { {
708 .num_blocks = 8,
709 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
710 }, {
711 .num_blocks = 1,
712 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
713 } }
714};
715
716static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
717 struct scatterlist *src, unsigned int nbytes)
718{
719 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
720
721 return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
722 XTS_TWEAK_CAST(aesni_xts_tweak),
723 aes_ctx(ctx->raw_tweak_ctx),
724 aes_ctx(ctx->raw_crypt_ctx));
725}
726
727static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
728 struct scatterlist *src, unsigned int nbytes)
729{
730 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
731
732 return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
733 XTS_TWEAK_CAST(aesni_xts_tweak),
734 aes_ctx(ctx->raw_tweak_ctx),
735 aes_ctx(ctx->raw_crypt_ctx));
736}
737
738#else
739
Jussi Kivilinna023af602012-07-22 18:18:37 +0300740static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
741 struct scatterlist *src, unsigned int nbytes)
742{
743 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
744 be128 buf[8];
745 struct xts_crypt_req req = {
746 .tbuf = buf,
747 .tbuflen = sizeof(buf),
748
749 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
Jussi Kivilinna32bec972012-10-18 23:24:57 +0300750 .tweak_fn = aesni_xts_tweak,
Jussi Kivilinna023af602012-07-22 18:18:37 +0300751 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
752 .crypt_fn = lrw_xts_encrypt_callback,
753 };
754 int ret;
755
756 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
757
758 kernel_fpu_begin();
759 ret = xts_crypt(desc, dst, src, nbytes, &req);
760 kernel_fpu_end();
761
762 return ret;
763}
764
765static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
766 struct scatterlist *src, unsigned int nbytes)
767{
768 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
769 be128 buf[8];
770 struct xts_crypt_req req = {
771 .tbuf = buf,
772 .tbuflen = sizeof(buf),
773
774 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
Jussi Kivilinna32bec972012-10-18 23:24:57 +0300775 .tweak_fn = aesni_xts_tweak,
Jussi Kivilinna023af602012-07-22 18:18:37 +0300776 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
777 .crypt_fn = lrw_xts_decrypt_callback,
778 };
779 int ret;
780
781 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
782
783 kernel_fpu_begin();
784 ret = xts_crypt(desc, dst, src, nbytes, &req);
785 kernel_fpu_end();
786
787 return ret;
788}
Huang Ying2cf4ac82009-03-29 15:41:20 +0800789
Jussi Kivilinnac456a9c2013-04-08 21:51:16 +0300790#endif
791
Mathias Krause559ad0f2010-11-29 08:35:39 +0800792#ifdef CONFIG_X86_64
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400793static int rfc4106_init(struct crypto_tfm *tfm)
794{
795 struct cryptd_aead *cryptd_tfm;
796 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
797 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
Tadeusz Struk60af5202011-03-13 16:56:17 +0800798 struct crypto_aead *cryptd_child;
799 struct aesni_rfc4106_gcm_ctx *child_ctx;
Stephan Muellereabdc322015-03-30 21:58:17 +0200800 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
801 CRYPTO_ALG_INTERNAL,
802 CRYPTO_ALG_INTERNAL);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400803 if (IS_ERR(cryptd_tfm))
804 return PTR_ERR(cryptd_tfm);
Tadeusz Struk60af5202011-03-13 16:56:17 +0800805
806 cryptd_child = cryptd_aead_child(cryptd_tfm);
807 child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
808 memcpy(child_ctx, ctx, sizeof(*ctx));
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400809 ctx->cryptd_tfm = cryptd_tfm;
Herbert Xua5a2b4d2015-05-11 17:48:04 +0800810 crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
811 sizeof(struct aead_request) +
812 crypto_aead_reqsize(&cryptd_tfm->base));
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400813 return 0;
814}
815
816static void rfc4106_exit(struct crypto_tfm *tfm)
817{
818 struct aesni_rfc4106_gcm_ctx *ctx =
819 (struct aesni_rfc4106_gcm_ctx *)
820 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
821 if (!IS_ERR(ctx->cryptd_tfm))
822 cryptd_free_aead(ctx->cryptd_tfm);
823 return;
824}
825
826static void
827rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
828{
829 struct aesni_gcm_set_hash_subkey_result *result = req->data;
830
831 if (err == -EINPROGRESS)
832 return;
833 result->err = err;
834 complete(&result->completion);
835}
836
837static int
838rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
839{
840 struct crypto_ablkcipher *ctr_tfm;
841 struct ablkcipher_request *req;
842 int ret = -EINVAL;
843 struct aesni_hash_subkey_req_data *req_data;
844
845 ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
846 if (IS_ERR(ctr_tfm))
847 return PTR_ERR(ctr_tfm);
848
849 crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
850
851 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
Jesper Juhl7efd95f62011-01-23 18:56:36 +1100852 if (ret)
Jesper Juhl7efd95f62011-01-23 18:56:36 +1100853 goto out_free_ablkcipher;
Jesper Juhlfc9044e22011-02-16 13:04:09 +1100854
855 ret = -ENOMEM;
856 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
857 if (!req)
858 goto out_free_ablkcipher;
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400859
860 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
Jesper Juhlfc9044e22011-02-16 13:04:09 +1100861 if (!req_data)
Jesper Juhl7efd95f62011-01-23 18:56:36 +1100862 goto out_free_request;
Jesper Juhlfc9044e22011-02-16 13:04:09 +1100863
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400864 memset(req_data->iv, 0, sizeof(req_data->iv));
865
866 /* Clear the data in the hash sub key container to zero.*/
867 /* We want to cipher all zeros to create the hash sub key. */
868 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
869
870 init_completion(&req_data->result.completion);
871 sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
872 ablkcipher_request_set_tfm(req, ctr_tfm);
873 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
874 CRYPTO_TFM_REQ_MAY_BACKLOG,
875 rfc4106_set_hash_subkey_done,
876 &req_data->result);
877
878 ablkcipher_request_set_crypt(req, &req_data->sg,
879 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
880
881 ret = crypto_ablkcipher_encrypt(req);
882 if (ret == -EINPROGRESS || ret == -EBUSY) {
883 ret = wait_for_completion_interruptible
884 (&req_data->result.completion);
885 if (!ret)
886 ret = req_data->result.err;
887 }
Jesper Juhlfc9044e22011-02-16 13:04:09 +1100888 kfree(req_data);
Jesper Juhl7efd95f62011-01-23 18:56:36 +1100889out_free_request:
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400890 ablkcipher_request_free(req);
Jesper Juhl7efd95f62011-01-23 18:56:36 +1100891out_free_ablkcipher:
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400892 crypto_free_ablkcipher(ctr_tfm);
893 return ret;
894}
895
Tadeusz Struk81e397d2015-02-06 10:25:20 -0800896static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
897 unsigned int key_len)
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400898{
899 int ret = 0;
Tadeusz Struk81e397d2015-02-06 10:25:20 -0800900 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
901 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
Milan Brozbf084d82012-06-28 17:26:02 +0200902 u8 *new_key_align, *new_key_mem = NULL;
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400903
904 if (key_len < 4) {
905 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
906 return -EINVAL;
907 }
908 /*Account for 4 byte nonce at the end.*/
909 key_len -= 4;
Timothy McCaffreye31ac322015-01-13 13:16:43 -0500910 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
911 key_len != AES_KEYSIZE_256) {
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400912 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
913 return -EINVAL;
914 }
915
916 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
917 /*This must be on a 16 byte boundary!*/
918 if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
919 return -EINVAL;
920
921 if ((unsigned long)key % AESNI_ALIGN) {
922 /*key is not aligned: use an auxuliar aligned pointer*/
923 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
924 if (!new_key_mem)
925 return -ENOMEM;
926
Milan Brozbf084d82012-06-28 17:26:02 +0200927 new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
928 memcpy(new_key_align, key, key_len);
929 key = new_key_align;
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400930 }
931
932 if (!irq_fpu_usable())
933 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
934 key, key_len);
935 else {
936 kernel_fpu_begin();
937 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
938 kernel_fpu_end();
939 }
940 /*This must be on a 16 byte boundary!*/
941 if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
942 ret = -EINVAL;
943 goto exit;
944 }
945 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
946exit:
947 kfree(new_key_mem);
948 return ret;
949}
950
Tadeusz Struk81e397d2015-02-06 10:25:20 -0800951static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
952 unsigned int key_len)
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400953{
954 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
Tadeusz Struk81e397d2015-02-06 10:25:20 -0800955 struct crypto_aead *child = cryptd_aead_child(ctx->cryptd_tfm);
956 struct aesni_rfc4106_gcm_ctx *c_ctx = aesni_rfc4106_gcm_ctx_get(child);
957 struct cryptd_aead *cryptd_tfm = ctx->cryptd_tfm;
958 int ret;
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400959
Tadeusz Struk81e397d2015-02-06 10:25:20 -0800960 ret = crypto_aead_setkey(child, key, key_len);
961 if (!ret) {
962 memcpy(ctx, c_ctx, sizeof(*ctx));
963 ctx->cryptd_tfm = cryptd_tfm;
964 }
965 return ret;
966}
967
968static int common_rfc4106_set_authsize(struct crypto_aead *aead,
969 unsigned int authsize)
970{
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400971 switch (authsize) {
972 case 8:
973 case 12:
974 case 16:
975 break;
976 default:
977 return -EINVAL;
978 }
Tadeusz Struk81e397d2015-02-06 10:25:20 -0800979 crypto_aead_crt(aead)->authsize = authsize;
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400980 return 0;
981}
982
Tadeusz Struk81e397d2015-02-06 10:25:20 -0800983/* This is the Integrity Check Value (aka the authentication tag length and can
984 * be 8, 12 or 16 bytes long. */
985static int rfc4106_set_authsize(struct crypto_aead *parent,
986 unsigned int authsize)
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400987{
Tadeusz Struk81e397d2015-02-06 10:25:20 -0800988 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
989 struct crypto_aead *child = cryptd_aead_child(ctx->cryptd_tfm);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400990 int ret;
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400991
Tadeusz Struk81e397d2015-02-06 10:25:20 -0800992 ret = crypto_aead_setauthsize(child, authsize);
993 if (!ret)
994 crypto_aead_crt(parent)->authsize = authsize;
995 return ret;
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400996}
997
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400998static int __driver_rfc4106_encrypt(struct aead_request *req)
999{
1000 u8 one_entry_in_sg = 0;
1001 u8 *src, *dst, *assoc;
1002 __be32 counter = cpu_to_be32(1);
1003 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1004 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
Timothy McCaffreye31ac322015-01-13 13:16:43 -05001005 u32 key_len = ctx->aes_key_expanded.key_length;
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001006 void *aes_ctx = &(ctx->aes_key_expanded);
1007 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1008 u8 iv_tab[16+AESNI_ALIGN];
1009 u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
1010 struct scatter_walk src_sg_walk;
1011 struct scatter_walk assoc_sg_walk;
1012 struct scatter_walk dst_sg_walk;
1013 unsigned int i;
1014
1015 /* Assuming we are supporting rfc4106 64-bit extended */
1016 /* sequence numbers We need to have the AAD length equal */
1017 /* to 8 or 12 bytes */
1018 if (unlikely(req->assoclen != 8 && req->assoclen != 12))
1019 return -EINVAL;
Timothy McCaffreye31ac322015-01-13 13:16:43 -05001020 if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16))
1021 return -EINVAL;
1022 if (unlikely(key_len != AES_KEYSIZE_128 &&
1023 key_len != AES_KEYSIZE_192 &&
1024 key_len != AES_KEYSIZE_256))
1025 return -EINVAL;
1026
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001027 /* IV below built */
1028 for (i = 0; i < 4; i++)
1029 *(iv+i) = ctx->nonce[i];
1030 for (i = 0; i < 8; i++)
1031 *(iv+4+i) = req->iv[i];
1032 *((__be32 *)(iv+12)) = counter;
1033
1034 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1035 one_entry_in_sg = 1;
1036 scatterwalk_start(&src_sg_walk, req->src);
1037 scatterwalk_start(&assoc_sg_walk, req->assoc);
Cong Wang8fd75e12011-11-25 23:14:17 +08001038 src = scatterwalk_map(&src_sg_walk);
1039 assoc = scatterwalk_map(&assoc_sg_walk);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001040 dst = src;
1041 if (unlikely(req->src != req->dst)) {
1042 scatterwalk_start(&dst_sg_walk, req->dst);
Cong Wang8fd75e12011-11-25 23:14:17 +08001043 dst = scatterwalk_map(&dst_sg_walk);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001044 }
1045
1046 } else {
1047 /* Allocate memory for src, dst, assoc */
1048 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
1049 GFP_ATOMIC);
1050 if (unlikely(!src))
1051 return -ENOMEM;
1052 assoc = (src + req->cryptlen + auth_tag_len);
1053 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1054 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1055 req->assoclen, 0);
1056 dst = src;
1057 }
1058
Tim Chend7645932013-12-11 14:28:41 -08001059 aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001060 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
1061 + ((unsigned long)req->cryptlen), auth_tag_len);
1062
1063 /* The authTag (aka the Integrity Check Value) needs to be written
1064 * back to the packet. */
1065 if (one_entry_in_sg) {
1066 if (unlikely(req->src != req->dst)) {
Cong Wang8fd75e12011-11-25 23:14:17 +08001067 scatterwalk_unmap(dst);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001068 scatterwalk_done(&dst_sg_walk, 0, 0);
1069 }
Cong Wang8fd75e12011-11-25 23:14:17 +08001070 scatterwalk_unmap(src);
1071 scatterwalk_unmap(assoc);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001072 scatterwalk_done(&src_sg_walk, 0, 0);
1073 scatterwalk_done(&assoc_sg_walk, 0, 0);
1074 } else {
1075 scatterwalk_map_and_copy(dst, req->dst, 0,
1076 req->cryptlen + auth_tag_len, 1);
1077 kfree(src);
1078 }
1079 return 0;
1080}
1081
1082static int __driver_rfc4106_decrypt(struct aead_request *req)
1083{
1084 u8 one_entry_in_sg = 0;
1085 u8 *src, *dst, *assoc;
1086 unsigned long tempCipherLen = 0;
1087 __be32 counter = cpu_to_be32(1);
1088 int retval = 0;
1089 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1090 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
Timothy McCaffreye31ac322015-01-13 13:16:43 -05001091 u32 key_len = ctx->aes_key_expanded.key_length;
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001092 void *aes_ctx = &(ctx->aes_key_expanded);
1093 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1094 u8 iv_and_authTag[32+AESNI_ALIGN];
1095 u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
1096 u8 *authTag = iv + 16;
1097 struct scatter_walk src_sg_walk;
1098 struct scatter_walk assoc_sg_walk;
1099 struct scatter_walk dst_sg_walk;
1100 unsigned int i;
1101
1102 if (unlikely((req->cryptlen < auth_tag_len) ||
1103 (req->assoclen != 8 && req->assoclen != 12)))
1104 return -EINVAL;
Timothy McCaffreye31ac322015-01-13 13:16:43 -05001105 if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16))
1106 return -EINVAL;
1107 if (unlikely(key_len != AES_KEYSIZE_128 &&
1108 key_len != AES_KEYSIZE_192 &&
1109 key_len != AES_KEYSIZE_256))
1110 return -EINVAL;
1111
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001112 /* Assuming we are supporting rfc4106 64-bit extended */
1113 /* sequence numbers We need to have the AAD length */
1114 /* equal to 8 or 12 bytes */
1115
1116 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1117 /* IV below built */
1118 for (i = 0; i < 4; i++)
1119 *(iv+i) = ctx->nonce[i];
1120 for (i = 0; i < 8; i++)
1121 *(iv+4+i) = req->iv[i];
1122 *((__be32 *)(iv+12)) = counter;
1123
1124 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1125 one_entry_in_sg = 1;
1126 scatterwalk_start(&src_sg_walk, req->src);
1127 scatterwalk_start(&assoc_sg_walk, req->assoc);
Cong Wang8fd75e12011-11-25 23:14:17 +08001128 src = scatterwalk_map(&src_sg_walk);
1129 assoc = scatterwalk_map(&assoc_sg_walk);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001130 dst = src;
1131 if (unlikely(req->src != req->dst)) {
1132 scatterwalk_start(&dst_sg_walk, req->dst);
Cong Wang8fd75e12011-11-25 23:14:17 +08001133 dst = scatterwalk_map(&dst_sg_walk);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001134 }
1135
1136 } else {
1137 /* Allocate memory for src, dst, assoc */
1138 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1139 if (!src)
1140 return -ENOMEM;
Stephan Muellerccfe8c32015-03-12 09:17:51 +01001141 assoc = (src + req->cryptlen);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001142 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1143 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1144 req->assoclen, 0);
1145 dst = src;
1146 }
1147
Tim Chend7645932013-12-11 14:28:41 -08001148 aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001149 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1150 authTag, auth_tag_len);
1151
1152 /* Compare generated tag with passed in tag. */
Daniel Borkmannfed28612013-12-11 11:28:59 +01001153 retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001154 -EBADMSG : 0;
1155
1156 if (one_entry_in_sg) {
1157 if (unlikely(req->src != req->dst)) {
Cong Wang8fd75e12011-11-25 23:14:17 +08001158 scatterwalk_unmap(dst);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001159 scatterwalk_done(&dst_sg_walk, 0, 0);
1160 }
Cong Wang8fd75e12011-11-25 23:14:17 +08001161 scatterwalk_unmap(src);
1162 scatterwalk_unmap(assoc);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001163 scatterwalk_done(&src_sg_walk, 0, 0);
1164 scatterwalk_done(&assoc_sg_walk, 0, 0);
1165 } else {
Stephan Muellerccfe8c32015-03-12 09:17:51 +01001166 scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001167 kfree(src);
1168 }
1169 return retval;
1170}
Tadeusz Struk81e397d2015-02-06 10:25:20 -08001171
1172static int rfc4106_encrypt(struct aead_request *req)
1173{
1174 int ret;
1175 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1176 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1177
1178 if (!irq_fpu_usable()) {
1179 struct aead_request *cryptd_req =
1180 (struct aead_request *) aead_request_ctx(req);
1181
1182 memcpy(cryptd_req, req, sizeof(*req));
1183 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1184 ret = crypto_aead_encrypt(cryptd_req);
1185 } else {
1186 kernel_fpu_begin();
1187 ret = __driver_rfc4106_encrypt(req);
1188 kernel_fpu_end();
1189 }
1190 return ret;
1191}
1192
1193static int rfc4106_decrypt(struct aead_request *req)
1194{
1195 int ret;
1196 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1197 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1198
1199 if (!irq_fpu_usable()) {
1200 struct aead_request *cryptd_req =
1201 (struct aead_request *) aead_request_ctx(req);
1202
1203 memcpy(cryptd_req, req, sizeof(*req));
1204 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1205 ret = crypto_aead_decrypt(cryptd_req);
1206 } else {
1207 kernel_fpu_begin();
1208 ret = __driver_rfc4106_decrypt(req);
1209 kernel_fpu_end();
1210 }
1211 return ret;
1212}
1213
1214static int helper_rfc4106_encrypt(struct aead_request *req)
1215{
1216 int ret;
1217
1218 if (unlikely(!irq_fpu_usable())) {
1219 WARN_ONCE(1, "__gcm-aes-aesni alg used in invalid context");
1220 ret = -EINVAL;
1221 } else {
1222 kernel_fpu_begin();
1223 ret = __driver_rfc4106_encrypt(req);
1224 kernel_fpu_end();
1225 }
1226 return ret;
1227}
1228
1229static int helper_rfc4106_decrypt(struct aead_request *req)
1230{
1231 int ret;
1232
1233 if (unlikely(!irq_fpu_usable())) {
1234 WARN_ONCE(1, "__gcm-aes-aesni alg used in invalid context");
1235 ret = -EINVAL;
1236 } else {
1237 kernel_fpu_begin();
1238 ret = __driver_rfc4106_decrypt(req);
1239 kernel_fpu_end();
1240 }
1241 return ret;
1242}
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001243#endif
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001244
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001245static struct crypto_alg aesni_algs[] = { {
1246 .cra_name = "aes",
1247 .cra_driver_name = "aes-aesni",
1248 .cra_priority = 300,
1249 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
1250 .cra_blocksize = AES_BLOCK_SIZE,
1251 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1252 AESNI_ALIGN - 1,
1253 .cra_alignmask = 0,
1254 .cra_module = THIS_MODULE,
1255 .cra_u = {
1256 .cipher = {
1257 .cia_min_keysize = AES_MIN_KEY_SIZE,
1258 .cia_max_keysize = AES_MAX_KEY_SIZE,
1259 .cia_setkey = aes_set_key,
1260 .cia_encrypt = aes_encrypt,
1261 .cia_decrypt = aes_decrypt
1262 }
1263 }
1264}, {
1265 .cra_name = "__aes-aesni",
1266 .cra_driver_name = "__driver-aes-aesni",
1267 .cra_priority = 0,
Stephan Muellereabdc322015-03-30 21:58:17 +02001268 .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001269 .cra_blocksize = AES_BLOCK_SIZE,
1270 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1271 AESNI_ALIGN - 1,
1272 .cra_alignmask = 0,
1273 .cra_module = THIS_MODULE,
1274 .cra_u = {
1275 .cipher = {
1276 .cia_min_keysize = AES_MIN_KEY_SIZE,
1277 .cia_max_keysize = AES_MAX_KEY_SIZE,
1278 .cia_setkey = aes_set_key,
1279 .cia_encrypt = __aes_encrypt,
1280 .cia_decrypt = __aes_decrypt
1281 }
1282 }
1283}, {
1284 .cra_name = "__ecb-aes-aesni",
1285 .cra_driver_name = "__driver-ecb-aes-aesni",
1286 .cra_priority = 0,
Stephan Muellereabdc322015-03-30 21:58:17 +02001287 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1288 CRYPTO_ALG_INTERNAL,
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001289 .cra_blocksize = AES_BLOCK_SIZE,
1290 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1291 AESNI_ALIGN - 1,
1292 .cra_alignmask = 0,
1293 .cra_type = &crypto_blkcipher_type,
1294 .cra_module = THIS_MODULE,
1295 .cra_u = {
1296 .blkcipher = {
1297 .min_keysize = AES_MIN_KEY_SIZE,
1298 .max_keysize = AES_MAX_KEY_SIZE,
1299 .setkey = aes_set_key,
1300 .encrypt = ecb_encrypt,
1301 .decrypt = ecb_decrypt,
1302 },
1303 },
1304}, {
1305 .cra_name = "__cbc-aes-aesni",
1306 .cra_driver_name = "__driver-cbc-aes-aesni",
1307 .cra_priority = 0,
Stephan Muellereabdc322015-03-30 21:58:17 +02001308 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1309 CRYPTO_ALG_INTERNAL,
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001310 .cra_blocksize = AES_BLOCK_SIZE,
1311 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1312 AESNI_ALIGN - 1,
1313 .cra_alignmask = 0,
1314 .cra_type = &crypto_blkcipher_type,
1315 .cra_module = THIS_MODULE,
1316 .cra_u = {
1317 .blkcipher = {
1318 .min_keysize = AES_MIN_KEY_SIZE,
1319 .max_keysize = AES_MAX_KEY_SIZE,
1320 .setkey = aes_set_key,
1321 .encrypt = cbc_encrypt,
1322 .decrypt = cbc_decrypt,
1323 },
1324 },
1325}, {
1326 .cra_name = "ecb(aes)",
1327 .cra_driver_name = "ecb-aes-aesni",
1328 .cra_priority = 400,
1329 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1330 .cra_blocksize = AES_BLOCK_SIZE,
Jussi Kivilinnaa9629d72012-06-18 14:07:08 +03001331 .cra_ctxsize = sizeof(struct async_helper_ctx),
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001332 .cra_alignmask = 0,
1333 .cra_type = &crypto_ablkcipher_type,
1334 .cra_module = THIS_MODULE,
1335 .cra_init = ablk_ecb_init,
1336 .cra_exit = ablk_exit,
1337 .cra_u = {
1338 .ablkcipher = {
1339 .min_keysize = AES_MIN_KEY_SIZE,
1340 .max_keysize = AES_MAX_KEY_SIZE,
1341 .setkey = ablk_set_key,
1342 .encrypt = ablk_encrypt,
1343 .decrypt = ablk_decrypt,
1344 },
1345 },
1346}, {
1347 .cra_name = "cbc(aes)",
1348 .cra_driver_name = "cbc-aes-aesni",
1349 .cra_priority = 400,
1350 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1351 .cra_blocksize = AES_BLOCK_SIZE,
Jussi Kivilinnaa9629d72012-06-18 14:07:08 +03001352 .cra_ctxsize = sizeof(struct async_helper_ctx),
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001353 .cra_alignmask = 0,
1354 .cra_type = &crypto_ablkcipher_type,
1355 .cra_module = THIS_MODULE,
1356 .cra_init = ablk_cbc_init,
1357 .cra_exit = ablk_exit,
1358 .cra_u = {
1359 .ablkcipher = {
1360 .min_keysize = AES_MIN_KEY_SIZE,
1361 .max_keysize = AES_MAX_KEY_SIZE,
1362 .ivsize = AES_BLOCK_SIZE,
1363 .setkey = ablk_set_key,
1364 .encrypt = ablk_encrypt,
1365 .decrypt = ablk_decrypt,
1366 },
1367 },
1368#ifdef CONFIG_X86_64
1369}, {
1370 .cra_name = "__ctr-aes-aesni",
1371 .cra_driver_name = "__driver-ctr-aes-aesni",
1372 .cra_priority = 0,
Stephan Muellereabdc322015-03-30 21:58:17 +02001373 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1374 CRYPTO_ALG_INTERNAL,
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001375 .cra_blocksize = 1,
1376 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1377 AESNI_ALIGN - 1,
1378 .cra_alignmask = 0,
1379 .cra_type = &crypto_blkcipher_type,
1380 .cra_module = THIS_MODULE,
1381 .cra_u = {
1382 .blkcipher = {
1383 .min_keysize = AES_MIN_KEY_SIZE,
1384 .max_keysize = AES_MAX_KEY_SIZE,
1385 .ivsize = AES_BLOCK_SIZE,
1386 .setkey = aes_set_key,
1387 .encrypt = ctr_crypt,
1388 .decrypt = ctr_crypt,
1389 },
1390 },
1391}, {
1392 .cra_name = "ctr(aes)",
1393 .cra_driver_name = "ctr-aes-aesni",
1394 .cra_priority = 400,
1395 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1396 .cra_blocksize = 1,
Jussi Kivilinnaa9629d72012-06-18 14:07:08 +03001397 .cra_ctxsize = sizeof(struct async_helper_ctx),
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001398 .cra_alignmask = 0,
1399 .cra_type = &crypto_ablkcipher_type,
1400 .cra_module = THIS_MODULE,
1401 .cra_init = ablk_ctr_init,
1402 .cra_exit = ablk_exit,
1403 .cra_u = {
1404 .ablkcipher = {
1405 .min_keysize = AES_MIN_KEY_SIZE,
1406 .max_keysize = AES_MAX_KEY_SIZE,
1407 .ivsize = AES_BLOCK_SIZE,
1408 .setkey = ablk_set_key,
1409 .encrypt = ablk_encrypt,
1410 .decrypt = ablk_encrypt,
1411 .geniv = "chainiv",
1412 },
1413 },
1414}, {
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001415 .cra_name = "__gcm-aes-aesni",
1416 .cra_driver_name = "__driver-gcm-aes-aesni",
1417 .cra_priority = 0,
Stephan Muellereabdc322015-03-30 21:58:17 +02001418 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_INTERNAL,
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001419 .cra_blocksize = 1,
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001420 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1421 AESNI_ALIGN,
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001422 .cra_alignmask = 0,
1423 .cra_type = &crypto_aead_type,
1424 .cra_module = THIS_MODULE,
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001425 .cra_u = {
1426 .aead = {
Tadeusz Struk81e397d2015-02-06 10:25:20 -08001427 .setkey = common_rfc4106_set_key,
1428 .setauthsize = common_rfc4106_set_authsize,
1429 .encrypt = helper_rfc4106_encrypt,
1430 .decrypt = helper_rfc4106_decrypt,
1431 .ivsize = 8,
1432 .maxauthsize = 16,
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001433 },
1434 },
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001435}, {
1436 .cra_name = "rfc4106(gcm(aes))",
1437 .cra_driver_name = "rfc4106-gcm-aesni",
1438 .cra_priority = 400,
1439 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1440 .cra_blocksize = 1,
1441 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1442 AESNI_ALIGN,
1443 .cra_alignmask = 0,
1444 .cra_type = &crypto_nivaead_type,
1445 .cra_module = THIS_MODULE,
1446 .cra_init = rfc4106_init,
1447 .cra_exit = rfc4106_exit,
1448 .cra_u = {
1449 .aead = {
1450 .setkey = rfc4106_set_key,
1451 .setauthsize = rfc4106_set_authsize,
1452 .encrypt = rfc4106_encrypt,
1453 .decrypt = rfc4106_decrypt,
1454 .geniv = "seqiv",
1455 .ivsize = 8,
1456 .maxauthsize = 16,
1457 },
1458 },
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001459#endif
Valentin Rothberg304576a2014-10-21 11:35:32 +02001460#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001461}, {
1462 .cra_name = "pcbc(aes)",
1463 .cra_driver_name = "pcbc-aes-aesni",
1464 .cra_priority = 400,
1465 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1466 .cra_blocksize = AES_BLOCK_SIZE,
Jussi Kivilinnaa9629d72012-06-18 14:07:08 +03001467 .cra_ctxsize = sizeof(struct async_helper_ctx),
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001468 .cra_alignmask = 0,
1469 .cra_type = &crypto_ablkcipher_type,
1470 .cra_module = THIS_MODULE,
1471 .cra_init = ablk_pcbc_init,
1472 .cra_exit = ablk_exit,
1473 .cra_u = {
1474 .ablkcipher = {
1475 .min_keysize = AES_MIN_KEY_SIZE,
1476 .max_keysize = AES_MAX_KEY_SIZE,
1477 .ivsize = AES_BLOCK_SIZE,
1478 .setkey = ablk_set_key,
1479 .encrypt = ablk_encrypt,
1480 .decrypt = ablk_decrypt,
1481 },
1482 },
1483#endif
Jussi Kivilinna023af602012-07-22 18:18:37 +03001484}, {
1485 .cra_name = "__lrw-aes-aesni",
1486 .cra_driver_name = "__driver-lrw-aes-aesni",
1487 .cra_priority = 0,
Stephan Muellereabdc322015-03-30 21:58:17 +02001488 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1489 CRYPTO_ALG_INTERNAL,
Jussi Kivilinna023af602012-07-22 18:18:37 +03001490 .cra_blocksize = AES_BLOCK_SIZE,
1491 .cra_ctxsize = sizeof(struct aesni_lrw_ctx),
1492 .cra_alignmask = 0,
1493 .cra_type = &crypto_blkcipher_type,
1494 .cra_module = THIS_MODULE,
1495 .cra_exit = lrw_aesni_exit_tfm,
1496 .cra_u = {
1497 .blkcipher = {
1498 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1499 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1500 .ivsize = AES_BLOCK_SIZE,
1501 .setkey = lrw_aesni_setkey,
1502 .encrypt = lrw_encrypt,
1503 .decrypt = lrw_decrypt,
1504 },
1505 },
1506}, {
1507 .cra_name = "__xts-aes-aesni",
1508 .cra_driver_name = "__driver-xts-aes-aesni",
1509 .cra_priority = 0,
Stephan Muellereabdc322015-03-30 21:58:17 +02001510 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1511 CRYPTO_ALG_INTERNAL,
Jussi Kivilinna023af602012-07-22 18:18:37 +03001512 .cra_blocksize = AES_BLOCK_SIZE,
1513 .cra_ctxsize = sizeof(struct aesni_xts_ctx),
1514 .cra_alignmask = 0,
1515 .cra_type = &crypto_blkcipher_type,
1516 .cra_module = THIS_MODULE,
1517 .cra_u = {
1518 .blkcipher = {
1519 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1520 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1521 .ivsize = AES_BLOCK_SIZE,
1522 .setkey = xts_aesni_setkey,
1523 .encrypt = xts_encrypt,
1524 .decrypt = xts_decrypt,
1525 },
1526 },
1527}, {
1528 .cra_name = "lrw(aes)",
1529 .cra_driver_name = "lrw-aes-aesni",
1530 .cra_priority = 400,
1531 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1532 .cra_blocksize = AES_BLOCK_SIZE,
1533 .cra_ctxsize = sizeof(struct async_helper_ctx),
1534 .cra_alignmask = 0,
1535 .cra_type = &crypto_ablkcipher_type,
1536 .cra_module = THIS_MODULE,
1537 .cra_init = ablk_init,
1538 .cra_exit = ablk_exit,
1539 .cra_u = {
1540 .ablkcipher = {
1541 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1542 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1543 .ivsize = AES_BLOCK_SIZE,
1544 .setkey = ablk_set_key,
1545 .encrypt = ablk_encrypt,
1546 .decrypt = ablk_decrypt,
1547 },
1548 },
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001549}, {
1550 .cra_name = "xts(aes)",
1551 .cra_driver_name = "xts-aes-aesni",
1552 .cra_priority = 400,
1553 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1554 .cra_blocksize = AES_BLOCK_SIZE,
Jussi Kivilinnaa9629d72012-06-18 14:07:08 +03001555 .cra_ctxsize = sizeof(struct async_helper_ctx),
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001556 .cra_alignmask = 0,
1557 .cra_type = &crypto_ablkcipher_type,
1558 .cra_module = THIS_MODULE,
Jussi Kivilinna023af602012-07-22 18:18:37 +03001559 .cra_init = ablk_init,
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001560 .cra_exit = ablk_exit,
1561 .cra_u = {
1562 .ablkcipher = {
1563 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1564 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1565 .ivsize = AES_BLOCK_SIZE,
1566 .setkey = ablk_set_key,
1567 .encrypt = ablk_encrypt,
1568 .decrypt = ablk_decrypt,
1569 },
1570 },
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001571} };
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001572
Andi Kleen3bd391f2012-01-26 00:09:06 +01001573
1574static const struct x86_cpu_id aesni_cpu_id[] = {
1575 X86_FEATURE_MATCH(X86_FEATURE_AES),
1576 {}
1577};
1578MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1579
Huang Ying54b6a1b2009-01-18 16:28:34 +11001580static int __init aesni_init(void)
1581{
Jussi Kivilinna7af6c242012-07-11 14:20:51 +03001582 int err;
Huang Ying54b6a1b2009-01-18 16:28:34 +11001583
Andi Kleen3bd391f2012-01-26 00:09:06 +01001584 if (!x86_match_cpu(aesni_cpu_id))
Huang Ying54b6a1b2009-01-18 16:28:34 +11001585 return -ENODEV;
Andy Shevchenko8610d7b2013-12-30 15:52:24 +02001586#ifdef CONFIG_X86_64
Tim Chend7645932013-12-11 14:28:41 -08001587#ifdef CONFIG_AS_AVX2
1588 if (boot_cpu_has(X86_FEATURE_AVX2)) {
1589 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1590 aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1591 aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1592 } else
1593#endif
1594#ifdef CONFIG_AS_AVX
1595 if (boot_cpu_has(X86_FEATURE_AVX)) {
1596 pr_info("AVX version of gcm_enc/dec engaged.\n");
1597 aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1598 aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1599 } else
1600#endif
1601 {
1602 pr_info("SSE version of gcm_enc/dec engaged.\n");
1603 aesni_gcm_enc_tfm = aesni_gcm_enc;
1604 aesni_gcm_dec_tfm = aesni_gcm_dec;
1605 }
chandramouli narayanan22cddcc2014-06-10 09:22:47 -07001606 aesni_ctr_enc_tfm = aesni_ctr_enc;
Mathias Krause5cfed7b2014-09-28 22:24:01 +02001607#ifdef CONFIG_AS_AVX
chandramouli narayanan22cddcc2014-06-10 09:22:47 -07001608 if (cpu_has_avx) {
1609 /* optimize performance of ctr mode encryption transform */
1610 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1611 pr_info("AES CTR mode by8 optimization enabled\n");
1612 }
1613#endif
Andy Shevchenko8610d7b2013-12-30 15:52:24 +02001614#endif
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001615
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001616 err = crypto_fpu_init();
1617 if (err)
1618 return err;
Huang Ying54b6a1b2009-01-18 16:28:34 +11001619
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001620 return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
Huang Ying54b6a1b2009-01-18 16:28:34 +11001621}
1622
1623static void __exit aesni_exit(void)
1624{
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001625 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
Andy Lutomirskib23b6452011-05-16 15:12:47 +10001626
1627 crypto_fpu_exit();
Huang Ying54b6a1b2009-01-18 16:28:34 +11001628}
1629
1630module_init(aesni_init);
1631module_exit(aesni_exit);
1632
1633MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1634MODULE_LICENSE("GPL");
Kees Cook5d26a102014-11-20 17:05:53 -08001635MODULE_ALIAS_CRYPTO("aes");