blob: 545d0ce5981839583d1ed6f185d6df64a9c13c1b [file] [log] [blame]
Huang Ying54b6a1b2009-01-18 16:28:34 +11001/*
2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
4 *
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
7 *
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04008 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
15 *
Huang Ying54b6a1b2009-01-18 16:28:34 +110016 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 */
21
22#include <linux/hardirq.h>
23#include <linux/types.h>
24#include <linux/crypto.h>
Paul Gortmaker7c52d552011-05-27 12:33:10 -040025#include <linux/module.h>
Huang Ying54b6a1b2009-01-18 16:28:34 +110026#include <linux/err.h>
27#include <crypto/algapi.h>
28#include <crypto/aes.h>
29#include <crypto/cryptd.h>
Huang Ying12387a42010-03-10 18:28:55 +080030#include <crypto/ctr.h>
Huang Ying54b6a1b2009-01-18 16:28:34 +110031#include <asm/i387.h>
32#include <asm/aes.h>
Tadeusz Struk0bd82f52010-11-04 15:00:45 -040033#include <crypto/scatterwalk.h>
34#include <crypto/internal/aead.h>
35#include <linux/workqueue.h>
36#include <linux/spinlock.h>
Huang Ying54b6a1b2009-01-18 16:28:34 +110037
Huang Ying2cf4ac82009-03-29 15:41:20 +080038#if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
39#define HAS_CTR
40#endif
41
42#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
43#define HAS_LRW
44#endif
45
46#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
47#define HAS_PCBC
48#endif
49
50#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
51#define HAS_XTS
52#endif
53
Huang Ying54b6a1b2009-01-18 16:28:34 +110054struct async_aes_ctx {
55 struct cryptd_ablkcipher *cryptd_tfm;
56};
57
Tadeusz Struk0bd82f52010-11-04 15:00:45 -040058/* This data is stored at the end of the crypto_tfm struct.
59 * It's a type of per "session" data storage location.
60 * This needs to be 16 byte aligned.
61 */
62struct aesni_rfc4106_gcm_ctx {
63 u8 hash_subkey[16];
64 struct crypto_aes_ctx aes_key_expanded;
65 u8 nonce[4];
66 struct cryptd_aead *cryptd_tfm;
67};
68
69struct aesni_gcm_set_hash_subkey_result {
70 int err;
71 struct completion completion;
72};
73
74struct aesni_hash_subkey_req_data {
75 u8 iv[16];
76 struct aesni_gcm_set_hash_subkey_result result;
77 struct scatterlist sg;
78};
79
80#define AESNI_ALIGN (16)
Huang Ying54b6a1b2009-01-18 16:28:34 +110081#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
Tadeusz Struk0bd82f52010-11-04 15:00:45 -040082#define RFC4106_HASH_SUBKEY_SIZE 16
Huang Ying54b6a1b2009-01-18 16:28:34 +110083
84asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
85 unsigned int key_len);
86asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
87 const u8 *in);
88asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
89 const u8 *in);
90asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
91 const u8 *in, unsigned int len);
92asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
93 const u8 *in, unsigned int len);
94asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
95 const u8 *in, unsigned int len, u8 *iv);
96asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
97 const u8 *in, unsigned int len, u8 *iv);
Randy Dunlap9bed4ac2011-05-18 09:03:34 +100098
99int crypto_fpu_init(void);
100void crypto_fpu_exit(void);
101
Mathias Krause0d258ef2010-11-27 16:34:46 +0800102#ifdef CONFIG_X86_64
Huang Ying12387a42010-03-10 18:28:55 +0800103asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
104 const u8 *in, unsigned int len, u8 *iv);
Huang Ying54b6a1b2009-01-18 16:28:34 +1100105
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400106/* asmlinkage void aesni_gcm_enc()
107 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
108 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
109 * const u8 *in, Plaintext input
110 * unsigned long plaintext_len, Length of data in bytes for encryption.
111 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
112 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
113 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
114 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
115 * const u8 *aad, Additional Authentication Data (AAD)
116 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
117 * is going to be 8 or 12 bytes
118 * u8 *auth_tag, Authenticated Tag output.
119 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
120 * Valid values are 16 (most likely), 12 or 8.
121 */
122asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
123 const u8 *in, unsigned long plaintext_len, u8 *iv,
124 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
125 u8 *auth_tag, unsigned long auth_tag_len);
126
127/* asmlinkage void aesni_gcm_dec()
128 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
129 * u8 *out, Plaintext output. Decrypt in-place is allowed.
130 * const u8 *in, Ciphertext input
131 * unsigned long ciphertext_len, Length of data in bytes for decryption.
132 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
133 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
134 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
135 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
136 * const u8 *aad, Additional Authentication Data (AAD)
137 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
138 * to be 8 or 12 bytes
139 * u8 *auth_tag, Authenticated Tag output.
140 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
141 * Valid values are 16 (most likely), 12 or 8.
142 */
143asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
144 const u8 *in, unsigned long ciphertext_len, u8 *iv,
145 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
146 u8 *auth_tag, unsigned long auth_tag_len);
147
148static inline struct
149aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
150{
151 return
152 (struct aesni_rfc4106_gcm_ctx *)
153 PTR_ALIGN((u8 *)
154 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
155}
Mathias Krause559ad0f2010-11-29 08:35:39 +0800156#endif
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400157
Huang Ying54b6a1b2009-01-18 16:28:34 +1100158static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
159{
160 unsigned long addr = (unsigned long)raw_ctx;
161 unsigned long align = AESNI_ALIGN;
162
163 if (align <= crypto_tfm_ctx_alignment())
164 align = 1;
165 return (struct crypto_aes_ctx *)ALIGN(addr, align);
166}
167
168static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
169 const u8 *in_key, unsigned int key_len)
170{
171 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
172 u32 *flags = &tfm->crt_flags;
173 int err;
174
175 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
176 key_len != AES_KEYSIZE_256) {
177 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
178 return -EINVAL;
179 }
180
Huang Ying13b79b972009-10-20 16:20:47 +0900181 if (!irq_fpu_usable())
Huang Ying54b6a1b2009-01-18 16:28:34 +1100182 err = crypto_aes_expand_key(ctx, in_key, key_len);
183 else {
184 kernel_fpu_begin();
185 err = aesni_set_key(ctx, in_key, key_len);
186 kernel_fpu_end();
187 }
188
189 return err;
190}
191
192static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
193 unsigned int key_len)
194{
195 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
196}
197
198static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
199{
200 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
201
Huang Ying13b79b972009-10-20 16:20:47 +0900202 if (!irq_fpu_usable())
Huang Ying54b6a1b2009-01-18 16:28:34 +1100203 crypto_aes_encrypt_x86(ctx, dst, src);
204 else {
205 kernel_fpu_begin();
206 aesni_enc(ctx, dst, src);
207 kernel_fpu_end();
208 }
209}
210
211static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
212{
213 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
214
Huang Ying13b79b972009-10-20 16:20:47 +0900215 if (!irq_fpu_usable())
Huang Ying54b6a1b2009-01-18 16:28:34 +1100216 crypto_aes_decrypt_x86(ctx, dst, src);
217 else {
218 kernel_fpu_begin();
219 aesni_dec(ctx, dst, src);
220 kernel_fpu_end();
221 }
222}
223
224static struct crypto_alg aesni_alg = {
225 .cra_name = "aes",
226 .cra_driver_name = "aes-aesni",
227 .cra_priority = 300,
228 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
229 .cra_blocksize = AES_BLOCK_SIZE,
230 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
231 .cra_alignmask = 0,
232 .cra_module = THIS_MODULE,
233 .cra_list = LIST_HEAD_INIT(aesni_alg.cra_list),
234 .cra_u = {
235 .cipher = {
236 .cia_min_keysize = AES_MIN_KEY_SIZE,
237 .cia_max_keysize = AES_MAX_KEY_SIZE,
238 .cia_setkey = aes_set_key,
239 .cia_encrypt = aes_encrypt,
240 .cia_decrypt = aes_decrypt
241 }
242 }
243};
244
Huang Ying2cf4ac82009-03-29 15:41:20 +0800245static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
246{
247 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
248
249 aesni_enc(ctx, dst, src);
250}
251
252static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
253{
254 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
255
256 aesni_dec(ctx, dst, src);
257}
258
259static struct crypto_alg __aesni_alg = {
260 .cra_name = "__aes-aesni",
261 .cra_driver_name = "__driver-aes-aesni",
262 .cra_priority = 0,
263 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
264 .cra_blocksize = AES_BLOCK_SIZE,
265 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
266 .cra_alignmask = 0,
267 .cra_module = THIS_MODULE,
268 .cra_list = LIST_HEAD_INIT(__aesni_alg.cra_list),
269 .cra_u = {
270 .cipher = {
271 .cia_min_keysize = AES_MIN_KEY_SIZE,
272 .cia_max_keysize = AES_MAX_KEY_SIZE,
273 .cia_setkey = aes_set_key,
274 .cia_encrypt = __aes_encrypt,
275 .cia_decrypt = __aes_decrypt
276 }
277 }
278};
279
Huang Ying54b6a1b2009-01-18 16:28:34 +1100280static int ecb_encrypt(struct blkcipher_desc *desc,
281 struct scatterlist *dst, struct scatterlist *src,
282 unsigned int nbytes)
283{
284 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
285 struct blkcipher_walk walk;
286 int err;
287
288 blkcipher_walk_init(&walk, dst, src, nbytes);
289 err = blkcipher_walk_virt(desc, &walk);
Huang Ying9251b642009-06-18 19:41:27 +0800290 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Huang Ying54b6a1b2009-01-18 16:28:34 +1100291
292 kernel_fpu_begin();
293 while ((nbytes = walk.nbytes)) {
294 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
295 nbytes & AES_BLOCK_MASK);
296 nbytes &= AES_BLOCK_SIZE - 1;
297 err = blkcipher_walk_done(desc, &walk, nbytes);
298 }
299 kernel_fpu_end();
300
301 return err;
302}
303
304static int ecb_decrypt(struct blkcipher_desc *desc,
305 struct scatterlist *dst, struct scatterlist *src,
306 unsigned int nbytes)
307{
308 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
309 struct blkcipher_walk walk;
310 int err;
311
312 blkcipher_walk_init(&walk, dst, src, nbytes);
313 err = blkcipher_walk_virt(desc, &walk);
Huang Ying9251b642009-06-18 19:41:27 +0800314 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Huang Ying54b6a1b2009-01-18 16:28:34 +1100315
316 kernel_fpu_begin();
317 while ((nbytes = walk.nbytes)) {
318 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
319 nbytes & AES_BLOCK_MASK);
320 nbytes &= AES_BLOCK_SIZE - 1;
321 err = blkcipher_walk_done(desc, &walk, nbytes);
322 }
323 kernel_fpu_end();
324
325 return err;
326}
327
328static struct crypto_alg blk_ecb_alg = {
329 .cra_name = "__ecb-aes-aesni",
330 .cra_driver_name = "__driver-ecb-aes-aesni",
331 .cra_priority = 0,
332 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
333 .cra_blocksize = AES_BLOCK_SIZE,
334 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
335 .cra_alignmask = 0,
336 .cra_type = &crypto_blkcipher_type,
337 .cra_module = THIS_MODULE,
338 .cra_list = LIST_HEAD_INIT(blk_ecb_alg.cra_list),
339 .cra_u = {
340 .blkcipher = {
341 .min_keysize = AES_MIN_KEY_SIZE,
342 .max_keysize = AES_MAX_KEY_SIZE,
343 .setkey = aes_set_key,
344 .encrypt = ecb_encrypt,
345 .decrypt = ecb_decrypt,
346 },
347 },
348};
349
350static int cbc_encrypt(struct blkcipher_desc *desc,
351 struct scatterlist *dst, struct scatterlist *src,
352 unsigned int nbytes)
353{
354 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
355 struct blkcipher_walk walk;
356 int err;
357
358 blkcipher_walk_init(&walk, dst, src, nbytes);
359 err = blkcipher_walk_virt(desc, &walk);
Huang Ying9251b642009-06-18 19:41:27 +0800360 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Huang Ying54b6a1b2009-01-18 16:28:34 +1100361
362 kernel_fpu_begin();
363 while ((nbytes = walk.nbytes)) {
364 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
365 nbytes & AES_BLOCK_MASK, walk.iv);
366 nbytes &= AES_BLOCK_SIZE - 1;
367 err = blkcipher_walk_done(desc, &walk, nbytes);
368 }
369 kernel_fpu_end();
370
371 return err;
372}
373
374static int cbc_decrypt(struct blkcipher_desc *desc,
375 struct scatterlist *dst, struct scatterlist *src,
376 unsigned int nbytes)
377{
378 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
379 struct blkcipher_walk walk;
380 int err;
381
382 blkcipher_walk_init(&walk, dst, src, nbytes);
383 err = blkcipher_walk_virt(desc, &walk);
Huang Ying9251b642009-06-18 19:41:27 +0800384 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Huang Ying54b6a1b2009-01-18 16:28:34 +1100385
386 kernel_fpu_begin();
387 while ((nbytes = walk.nbytes)) {
388 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
389 nbytes & AES_BLOCK_MASK, walk.iv);
390 nbytes &= AES_BLOCK_SIZE - 1;
391 err = blkcipher_walk_done(desc, &walk, nbytes);
392 }
393 kernel_fpu_end();
394
395 return err;
396}
397
398static struct crypto_alg blk_cbc_alg = {
399 .cra_name = "__cbc-aes-aesni",
400 .cra_driver_name = "__driver-cbc-aes-aesni",
401 .cra_priority = 0,
402 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
403 .cra_blocksize = AES_BLOCK_SIZE,
404 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
405 .cra_alignmask = 0,
406 .cra_type = &crypto_blkcipher_type,
407 .cra_module = THIS_MODULE,
408 .cra_list = LIST_HEAD_INIT(blk_cbc_alg.cra_list),
409 .cra_u = {
410 .blkcipher = {
411 .min_keysize = AES_MIN_KEY_SIZE,
412 .max_keysize = AES_MAX_KEY_SIZE,
413 .setkey = aes_set_key,
414 .encrypt = cbc_encrypt,
415 .decrypt = cbc_decrypt,
416 },
417 },
418};
419
Mathias Krause0d258ef2010-11-27 16:34:46 +0800420#ifdef CONFIG_X86_64
Huang Ying12387a42010-03-10 18:28:55 +0800421static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
422 struct blkcipher_walk *walk)
423{
424 u8 *ctrblk = walk->iv;
425 u8 keystream[AES_BLOCK_SIZE];
426 u8 *src = walk->src.virt.addr;
427 u8 *dst = walk->dst.virt.addr;
428 unsigned int nbytes = walk->nbytes;
429
430 aesni_enc(ctx, keystream, ctrblk);
431 crypto_xor(keystream, src, nbytes);
432 memcpy(dst, keystream, nbytes);
433 crypto_inc(ctrblk, AES_BLOCK_SIZE);
434}
435
436static int ctr_crypt(struct blkcipher_desc *desc,
437 struct scatterlist *dst, struct scatterlist *src,
438 unsigned int nbytes)
439{
440 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
441 struct blkcipher_walk walk;
442 int err;
443
444 blkcipher_walk_init(&walk, dst, src, nbytes);
445 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
446 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
447
448 kernel_fpu_begin();
449 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
450 aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
451 nbytes & AES_BLOCK_MASK, walk.iv);
452 nbytes &= AES_BLOCK_SIZE - 1;
453 err = blkcipher_walk_done(desc, &walk, nbytes);
454 }
455 if (walk.nbytes) {
456 ctr_crypt_final(ctx, &walk);
457 err = blkcipher_walk_done(desc, &walk, 0);
458 }
459 kernel_fpu_end();
460
461 return err;
462}
463
464static struct crypto_alg blk_ctr_alg = {
465 .cra_name = "__ctr-aes-aesni",
466 .cra_driver_name = "__driver-ctr-aes-aesni",
467 .cra_priority = 0,
468 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
469 .cra_blocksize = 1,
470 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
471 .cra_alignmask = 0,
472 .cra_type = &crypto_blkcipher_type,
473 .cra_module = THIS_MODULE,
474 .cra_list = LIST_HEAD_INIT(blk_ctr_alg.cra_list),
475 .cra_u = {
476 .blkcipher = {
477 .min_keysize = AES_MIN_KEY_SIZE,
478 .max_keysize = AES_MAX_KEY_SIZE,
479 .ivsize = AES_BLOCK_SIZE,
480 .setkey = aes_set_key,
481 .encrypt = ctr_crypt,
482 .decrypt = ctr_crypt,
483 },
484 },
485};
Mathias Krause0d258ef2010-11-27 16:34:46 +0800486#endif
Huang Ying12387a42010-03-10 18:28:55 +0800487
Huang Ying54b6a1b2009-01-18 16:28:34 +1100488static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
489 unsigned int key_len)
490{
491 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
Huang Ying2cf4ac82009-03-29 15:41:20 +0800492 struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
493 int err;
Huang Ying54b6a1b2009-01-18 16:28:34 +1100494
Huang Ying2cf4ac82009-03-29 15:41:20 +0800495 crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
496 crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
497 & CRYPTO_TFM_REQ_MASK);
498 err = crypto_ablkcipher_setkey(child, key, key_len);
499 crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
500 & CRYPTO_TFM_RES_MASK);
501 return err;
Huang Ying54b6a1b2009-01-18 16:28:34 +1100502}
503
504static int ablk_encrypt(struct ablkcipher_request *req)
505{
506 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
507 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
508
Huang Ying13b79b972009-10-20 16:20:47 +0900509 if (!irq_fpu_usable()) {
Huang Ying54b6a1b2009-01-18 16:28:34 +1100510 struct ablkcipher_request *cryptd_req =
511 ablkcipher_request_ctx(req);
512 memcpy(cryptd_req, req, sizeof(*req));
513 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
514 return crypto_ablkcipher_encrypt(cryptd_req);
515 } else {
516 struct blkcipher_desc desc;
517 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
518 desc.info = req->info;
519 desc.flags = 0;
520 return crypto_blkcipher_crt(desc.tfm)->encrypt(
521 &desc, req->dst, req->src, req->nbytes);
522 }
523}
524
525static int ablk_decrypt(struct ablkcipher_request *req)
526{
527 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
528 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
529
Huang Ying13b79b972009-10-20 16:20:47 +0900530 if (!irq_fpu_usable()) {
Huang Ying54b6a1b2009-01-18 16:28:34 +1100531 struct ablkcipher_request *cryptd_req =
532 ablkcipher_request_ctx(req);
533 memcpy(cryptd_req, req, sizeof(*req));
534 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
535 return crypto_ablkcipher_decrypt(cryptd_req);
536 } else {
537 struct blkcipher_desc desc;
538 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
539 desc.info = req->info;
540 desc.flags = 0;
541 return crypto_blkcipher_crt(desc.tfm)->decrypt(
542 &desc, req->dst, req->src, req->nbytes);
543 }
544}
545
546static void ablk_exit(struct crypto_tfm *tfm)
547{
548 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
549
550 cryptd_free_ablkcipher(ctx->cryptd_tfm);
551}
552
553static void ablk_init_common(struct crypto_tfm *tfm,
554 struct cryptd_ablkcipher *cryptd_tfm)
555{
556 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
557
558 ctx->cryptd_tfm = cryptd_tfm;
559 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
560 crypto_ablkcipher_reqsize(&cryptd_tfm->base);
561}
562
563static int ablk_ecb_init(struct crypto_tfm *tfm)
564{
565 struct cryptd_ablkcipher *cryptd_tfm;
566
567 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
568 if (IS_ERR(cryptd_tfm))
569 return PTR_ERR(cryptd_tfm);
570 ablk_init_common(tfm, cryptd_tfm);
571 return 0;
572}
573
574static struct crypto_alg ablk_ecb_alg = {
575 .cra_name = "ecb(aes)",
576 .cra_driver_name = "ecb-aes-aesni",
577 .cra_priority = 400,
578 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
579 .cra_blocksize = AES_BLOCK_SIZE,
580 .cra_ctxsize = sizeof(struct async_aes_ctx),
581 .cra_alignmask = 0,
582 .cra_type = &crypto_ablkcipher_type,
583 .cra_module = THIS_MODULE,
584 .cra_list = LIST_HEAD_INIT(ablk_ecb_alg.cra_list),
585 .cra_init = ablk_ecb_init,
586 .cra_exit = ablk_exit,
587 .cra_u = {
588 .ablkcipher = {
589 .min_keysize = AES_MIN_KEY_SIZE,
590 .max_keysize = AES_MAX_KEY_SIZE,
591 .setkey = ablk_set_key,
592 .encrypt = ablk_encrypt,
593 .decrypt = ablk_decrypt,
594 },
595 },
596};
597
598static int ablk_cbc_init(struct crypto_tfm *tfm)
599{
600 struct cryptd_ablkcipher *cryptd_tfm;
601
602 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
603 if (IS_ERR(cryptd_tfm))
604 return PTR_ERR(cryptd_tfm);
605 ablk_init_common(tfm, cryptd_tfm);
606 return 0;
607}
608
609static struct crypto_alg ablk_cbc_alg = {
610 .cra_name = "cbc(aes)",
611 .cra_driver_name = "cbc-aes-aesni",
612 .cra_priority = 400,
613 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
614 .cra_blocksize = AES_BLOCK_SIZE,
615 .cra_ctxsize = sizeof(struct async_aes_ctx),
616 .cra_alignmask = 0,
617 .cra_type = &crypto_ablkcipher_type,
618 .cra_module = THIS_MODULE,
619 .cra_list = LIST_HEAD_INIT(ablk_cbc_alg.cra_list),
620 .cra_init = ablk_cbc_init,
621 .cra_exit = ablk_exit,
622 .cra_u = {
623 .ablkcipher = {
624 .min_keysize = AES_MIN_KEY_SIZE,
625 .max_keysize = AES_MAX_KEY_SIZE,
626 .ivsize = AES_BLOCK_SIZE,
627 .setkey = ablk_set_key,
628 .encrypt = ablk_encrypt,
629 .decrypt = ablk_decrypt,
630 },
631 },
632};
633
Mathias Krause0d258ef2010-11-27 16:34:46 +0800634#ifdef CONFIG_X86_64
Huang Ying2cf4ac82009-03-29 15:41:20 +0800635static int ablk_ctr_init(struct crypto_tfm *tfm)
636{
637 struct cryptd_ablkcipher *cryptd_tfm;
638
Huang Ying12387a42010-03-10 18:28:55 +0800639 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
Huang Ying2cf4ac82009-03-29 15:41:20 +0800640 if (IS_ERR(cryptd_tfm))
641 return PTR_ERR(cryptd_tfm);
642 ablk_init_common(tfm, cryptd_tfm);
643 return 0;
644}
645
646static struct crypto_alg ablk_ctr_alg = {
647 .cra_name = "ctr(aes)",
648 .cra_driver_name = "ctr-aes-aesni",
649 .cra_priority = 400,
650 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
651 .cra_blocksize = 1,
652 .cra_ctxsize = sizeof(struct async_aes_ctx),
653 .cra_alignmask = 0,
654 .cra_type = &crypto_ablkcipher_type,
655 .cra_module = THIS_MODULE,
656 .cra_list = LIST_HEAD_INIT(ablk_ctr_alg.cra_list),
657 .cra_init = ablk_ctr_init,
658 .cra_exit = ablk_exit,
659 .cra_u = {
660 .ablkcipher = {
661 .min_keysize = AES_MIN_KEY_SIZE,
662 .max_keysize = AES_MAX_KEY_SIZE,
663 .ivsize = AES_BLOCK_SIZE,
664 .setkey = ablk_set_key,
665 .encrypt = ablk_encrypt,
Huang Ying12387a42010-03-10 18:28:55 +0800666 .decrypt = ablk_encrypt,
Huang Ying2cf4ac82009-03-29 15:41:20 +0800667 .geniv = "chainiv",
668 },
669 },
670};
Huang Ying12387a42010-03-10 18:28:55 +0800671
672#ifdef HAS_CTR
673static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
674{
675 struct cryptd_ablkcipher *cryptd_tfm;
676
677 cryptd_tfm = cryptd_alloc_ablkcipher(
678 "rfc3686(__driver-ctr-aes-aesni)", 0, 0);
679 if (IS_ERR(cryptd_tfm))
680 return PTR_ERR(cryptd_tfm);
681 ablk_init_common(tfm, cryptd_tfm);
682 return 0;
683}
684
685static struct crypto_alg ablk_rfc3686_ctr_alg = {
686 .cra_name = "rfc3686(ctr(aes))",
687 .cra_driver_name = "rfc3686-ctr-aes-aesni",
688 .cra_priority = 400,
689 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
690 .cra_blocksize = 1,
691 .cra_ctxsize = sizeof(struct async_aes_ctx),
692 .cra_alignmask = 0,
693 .cra_type = &crypto_ablkcipher_type,
694 .cra_module = THIS_MODULE,
695 .cra_list = LIST_HEAD_INIT(ablk_rfc3686_ctr_alg.cra_list),
696 .cra_init = ablk_rfc3686_ctr_init,
697 .cra_exit = ablk_exit,
698 .cra_u = {
699 .ablkcipher = {
700 .min_keysize = AES_MIN_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
701 .max_keysize = AES_MAX_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
702 .ivsize = CTR_RFC3686_IV_SIZE,
703 .setkey = ablk_set_key,
704 .encrypt = ablk_encrypt,
705 .decrypt = ablk_decrypt,
706 .geniv = "seqiv",
707 },
708 },
709};
Huang Ying2cf4ac82009-03-29 15:41:20 +0800710#endif
Mathias Krause0d258ef2010-11-27 16:34:46 +0800711#endif
Huang Ying2cf4ac82009-03-29 15:41:20 +0800712
713#ifdef HAS_LRW
714static int ablk_lrw_init(struct crypto_tfm *tfm)
715{
716 struct cryptd_ablkcipher *cryptd_tfm;
717
718 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))",
719 0, 0);
720 if (IS_ERR(cryptd_tfm))
721 return PTR_ERR(cryptd_tfm);
722 ablk_init_common(tfm, cryptd_tfm);
723 return 0;
724}
725
726static struct crypto_alg ablk_lrw_alg = {
727 .cra_name = "lrw(aes)",
728 .cra_driver_name = "lrw-aes-aesni",
729 .cra_priority = 400,
730 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
731 .cra_blocksize = AES_BLOCK_SIZE,
732 .cra_ctxsize = sizeof(struct async_aes_ctx),
733 .cra_alignmask = 0,
734 .cra_type = &crypto_ablkcipher_type,
735 .cra_module = THIS_MODULE,
736 .cra_list = LIST_HEAD_INIT(ablk_lrw_alg.cra_list),
737 .cra_init = ablk_lrw_init,
738 .cra_exit = ablk_exit,
739 .cra_u = {
740 .ablkcipher = {
741 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
742 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
743 .ivsize = AES_BLOCK_SIZE,
744 .setkey = ablk_set_key,
745 .encrypt = ablk_encrypt,
746 .decrypt = ablk_decrypt,
747 },
748 },
749};
750#endif
751
752#ifdef HAS_PCBC
753static int ablk_pcbc_init(struct crypto_tfm *tfm)
754{
755 struct cryptd_ablkcipher *cryptd_tfm;
756
757 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
758 0, 0);
759 if (IS_ERR(cryptd_tfm))
760 return PTR_ERR(cryptd_tfm);
761 ablk_init_common(tfm, cryptd_tfm);
762 return 0;
763}
764
765static struct crypto_alg ablk_pcbc_alg = {
766 .cra_name = "pcbc(aes)",
767 .cra_driver_name = "pcbc-aes-aesni",
768 .cra_priority = 400,
769 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
770 .cra_blocksize = AES_BLOCK_SIZE,
771 .cra_ctxsize = sizeof(struct async_aes_ctx),
772 .cra_alignmask = 0,
773 .cra_type = &crypto_ablkcipher_type,
774 .cra_module = THIS_MODULE,
775 .cra_list = LIST_HEAD_INIT(ablk_pcbc_alg.cra_list),
776 .cra_init = ablk_pcbc_init,
777 .cra_exit = ablk_exit,
778 .cra_u = {
779 .ablkcipher = {
780 .min_keysize = AES_MIN_KEY_SIZE,
781 .max_keysize = AES_MAX_KEY_SIZE,
782 .ivsize = AES_BLOCK_SIZE,
783 .setkey = ablk_set_key,
784 .encrypt = ablk_encrypt,
785 .decrypt = ablk_decrypt,
786 },
787 },
788};
789#endif
790
791#ifdef HAS_XTS
792static int ablk_xts_init(struct crypto_tfm *tfm)
793{
794 struct cryptd_ablkcipher *cryptd_tfm;
795
796 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))",
797 0, 0);
798 if (IS_ERR(cryptd_tfm))
799 return PTR_ERR(cryptd_tfm);
800 ablk_init_common(tfm, cryptd_tfm);
801 return 0;
802}
803
804static struct crypto_alg ablk_xts_alg = {
805 .cra_name = "xts(aes)",
806 .cra_driver_name = "xts-aes-aesni",
807 .cra_priority = 400,
808 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
809 .cra_blocksize = AES_BLOCK_SIZE,
810 .cra_ctxsize = sizeof(struct async_aes_ctx),
811 .cra_alignmask = 0,
812 .cra_type = &crypto_ablkcipher_type,
813 .cra_module = THIS_MODULE,
814 .cra_list = LIST_HEAD_INIT(ablk_xts_alg.cra_list),
815 .cra_init = ablk_xts_init,
816 .cra_exit = ablk_exit,
817 .cra_u = {
818 .ablkcipher = {
819 .min_keysize = 2 * AES_MIN_KEY_SIZE,
820 .max_keysize = 2 * AES_MAX_KEY_SIZE,
821 .ivsize = AES_BLOCK_SIZE,
822 .setkey = ablk_set_key,
823 .encrypt = ablk_encrypt,
824 .decrypt = ablk_decrypt,
825 },
826 },
827};
828#endif
829
Mathias Krause559ad0f2010-11-29 08:35:39 +0800830#ifdef CONFIG_X86_64
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400831static int rfc4106_init(struct crypto_tfm *tfm)
832{
833 struct cryptd_aead *cryptd_tfm;
834 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
835 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
Tadeusz Struk60af5202011-03-13 16:56:17 +0800836 struct crypto_aead *cryptd_child;
837 struct aesni_rfc4106_gcm_ctx *child_ctx;
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400838 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
839 if (IS_ERR(cryptd_tfm))
840 return PTR_ERR(cryptd_tfm);
Tadeusz Struk60af5202011-03-13 16:56:17 +0800841
842 cryptd_child = cryptd_aead_child(cryptd_tfm);
843 child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
844 memcpy(child_ctx, ctx, sizeof(*ctx));
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400845 ctx->cryptd_tfm = cryptd_tfm;
846 tfm->crt_aead.reqsize = sizeof(struct aead_request)
847 + crypto_aead_reqsize(&cryptd_tfm->base);
848 return 0;
849}
850
851static void rfc4106_exit(struct crypto_tfm *tfm)
852{
853 struct aesni_rfc4106_gcm_ctx *ctx =
854 (struct aesni_rfc4106_gcm_ctx *)
855 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
856 if (!IS_ERR(ctx->cryptd_tfm))
857 cryptd_free_aead(ctx->cryptd_tfm);
858 return;
859}
860
861static void
862rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
863{
864 struct aesni_gcm_set_hash_subkey_result *result = req->data;
865
866 if (err == -EINPROGRESS)
867 return;
868 result->err = err;
869 complete(&result->completion);
870}
871
872static int
873rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
874{
875 struct crypto_ablkcipher *ctr_tfm;
876 struct ablkcipher_request *req;
877 int ret = -EINVAL;
878 struct aesni_hash_subkey_req_data *req_data;
879
880 ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
881 if (IS_ERR(ctr_tfm))
882 return PTR_ERR(ctr_tfm);
883
884 crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
885
886 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
Jesper Juhl7efd95f2011-01-23 18:56:36 +1100887 if (ret)
Jesper Juhl7efd95f2011-01-23 18:56:36 +1100888 goto out_free_ablkcipher;
Jesper Juhlfc9044e22011-02-16 13:04:09 +1100889
890 ret = -ENOMEM;
891 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
892 if (!req)
893 goto out_free_ablkcipher;
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400894
895 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
Jesper Juhlfc9044e22011-02-16 13:04:09 +1100896 if (!req_data)
Jesper Juhl7efd95f2011-01-23 18:56:36 +1100897 goto out_free_request;
Jesper Juhlfc9044e22011-02-16 13:04:09 +1100898
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400899 memset(req_data->iv, 0, sizeof(req_data->iv));
900
901 /* Clear the data in the hash sub key container to zero.*/
902 /* We want to cipher all zeros to create the hash sub key. */
903 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
904
905 init_completion(&req_data->result.completion);
906 sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
907 ablkcipher_request_set_tfm(req, ctr_tfm);
908 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
909 CRYPTO_TFM_REQ_MAY_BACKLOG,
910 rfc4106_set_hash_subkey_done,
911 &req_data->result);
912
913 ablkcipher_request_set_crypt(req, &req_data->sg,
914 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
915
916 ret = crypto_ablkcipher_encrypt(req);
917 if (ret == -EINPROGRESS || ret == -EBUSY) {
918 ret = wait_for_completion_interruptible
919 (&req_data->result.completion);
920 if (!ret)
921 ret = req_data->result.err;
922 }
Jesper Juhlfc9044e22011-02-16 13:04:09 +1100923 kfree(req_data);
Jesper Juhl7efd95f2011-01-23 18:56:36 +1100924out_free_request:
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400925 ablkcipher_request_free(req);
Jesper Juhl7efd95f2011-01-23 18:56:36 +1100926out_free_ablkcipher:
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400927 crypto_free_ablkcipher(ctr_tfm);
928 return ret;
929}
930
931static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
932 unsigned int key_len)
933{
934 int ret = 0;
935 struct crypto_tfm *tfm = crypto_aead_tfm(parent);
936 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
Tadeusz Struk60af5202011-03-13 16:56:17 +0800937 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
938 struct aesni_rfc4106_gcm_ctx *child_ctx =
939 aesni_rfc4106_gcm_ctx_get(cryptd_child);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400940 u8 *new_key_mem = NULL;
941
942 if (key_len < 4) {
943 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
944 return -EINVAL;
945 }
946 /*Account for 4 byte nonce at the end.*/
947 key_len -= 4;
948 if (key_len != AES_KEYSIZE_128) {
949 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
950 return -EINVAL;
951 }
952
953 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
954 /*This must be on a 16 byte boundary!*/
955 if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
956 return -EINVAL;
957
958 if ((unsigned long)key % AESNI_ALIGN) {
959 /*key is not aligned: use an auxuliar aligned pointer*/
960 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
961 if (!new_key_mem)
962 return -ENOMEM;
963
964 new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
965 memcpy(new_key_mem, key, key_len);
966 key = new_key_mem;
967 }
968
969 if (!irq_fpu_usable())
970 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
971 key, key_len);
972 else {
973 kernel_fpu_begin();
974 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
975 kernel_fpu_end();
976 }
977 /*This must be on a 16 byte boundary!*/
978 if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
979 ret = -EINVAL;
980 goto exit;
981 }
982 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
Tadeusz Struk60af5202011-03-13 16:56:17 +0800983 memcpy(child_ctx, ctx, sizeof(*ctx));
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400984exit:
985 kfree(new_key_mem);
986 return ret;
987}
988
989/* This is the Integrity Check Value (aka the authentication tag length and can
990 * be 8, 12 or 16 bytes long. */
991static int rfc4106_set_authsize(struct crypto_aead *parent,
992 unsigned int authsize)
993{
994 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
995 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
996
997 switch (authsize) {
998 case 8:
999 case 12:
1000 case 16:
1001 break;
1002 default:
1003 return -EINVAL;
1004 }
1005 crypto_aead_crt(parent)->authsize = authsize;
1006 crypto_aead_crt(cryptd_child)->authsize = authsize;
1007 return 0;
1008}
1009
1010static int rfc4106_encrypt(struct aead_request *req)
1011{
1012 int ret;
1013 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1014 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001015
1016 if (!irq_fpu_usable()) {
1017 struct aead_request *cryptd_req =
1018 (struct aead_request *) aead_request_ctx(req);
1019 memcpy(cryptd_req, req, sizeof(*req));
1020 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1021 return crypto_aead_encrypt(cryptd_req);
1022 } else {
Tadeusz Struk60af5202011-03-13 16:56:17 +08001023 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001024 kernel_fpu_begin();
1025 ret = cryptd_child->base.crt_aead.encrypt(req);
1026 kernel_fpu_end();
1027 return ret;
1028 }
1029}
1030
1031static int rfc4106_decrypt(struct aead_request *req)
1032{
1033 int ret;
1034 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1035 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001036
1037 if (!irq_fpu_usable()) {
1038 struct aead_request *cryptd_req =
1039 (struct aead_request *) aead_request_ctx(req);
1040 memcpy(cryptd_req, req, sizeof(*req));
1041 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1042 return crypto_aead_decrypt(cryptd_req);
1043 } else {
Tadeusz Struk60af5202011-03-13 16:56:17 +08001044 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001045 kernel_fpu_begin();
1046 ret = cryptd_child->base.crt_aead.decrypt(req);
1047 kernel_fpu_end();
1048 return ret;
1049 }
1050}
1051
1052static struct crypto_alg rfc4106_alg = {
1053 .cra_name = "rfc4106(gcm(aes))",
1054 .cra_driver_name = "rfc4106-gcm-aesni",
1055 .cra_priority = 400,
1056 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1057 .cra_blocksize = 1,
1058 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1059 .cra_alignmask = 0,
1060 .cra_type = &crypto_nivaead_type,
1061 .cra_module = THIS_MODULE,
1062 .cra_list = LIST_HEAD_INIT(rfc4106_alg.cra_list),
1063 .cra_init = rfc4106_init,
1064 .cra_exit = rfc4106_exit,
1065 .cra_u = {
1066 .aead = {
1067 .setkey = rfc4106_set_key,
1068 .setauthsize = rfc4106_set_authsize,
1069 .encrypt = rfc4106_encrypt,
1070 .decrypt = rfc4106_decrypt,
1071 .geniv = "seqiv",
1072 .ivsize = 8,
1073 .maxauthsize = 16,
1074 },
1075 },
1076};
1077
1078static int __driver_rfc4106_encrypt(struct aead_request *req)
1079{
1080 u8 one_entry_in_sg = 0;
1081 u8 *src, *dst, *assoc;
1082 __be32 counter = cpu_to_be32(1);
1083 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1084 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1085 void *aes_ctx = &(ctx->aes_key_expanded);
1086 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1087 u8 iv_tab[16+AESNI_ALIGN];
1088 u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
1089 struct scatter_walk src_sg_walk;
1090 struct scatter_walk assoc_sg_walk;
1091 struct scatter_walk dst_sg_walk;
1092 unsigned int i;
1093
1094 /* Assuming we are supporting rfc4106 64-bit extended */
1095 /* sequence numbers We need to have the AAD length equal */
1096 /* to 8 or 12 bytes */
1097 if (unlikely(req->assoclen != 8 && req->assoclen != 12))
1098 return -EINVAL;
1099 /* IV below built */
1100 for (i = 0; i < 4; i++)
1101 *(iv+i) = ctx->nonce[i];
1102 for (i = 0; i < 8; i++)
1103 *(iv+4+i) = req->iv[i];
1104 *((__be32 *)(iv+12)) = counter;
1105
1106 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1107 one_entry_in_sg = 1;
1108 scatterwalk_start(&src_sg_walk, req->src);
1109 scatterwalk_start(&assoc_sg_walk, req->assoc);
1110 src = scatterwalk_map(&src_sg_walk, 0);
1111 assoc = scatterwalk_map(&assoc_sg_walk, 0);
1112 dst = src;
1113 if (unlikely(req->src != req->dst)) {
1114 scatterwalk_start(&dst_sg_walk, req->dst);
1115 dst = scatterwalk_map(&dst_sg_walk, 0);
1116 }
1117
1118 } else {
1119 /* Allocate memory for src, dst, assoc */
1120 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
1121 GFP_ATOMIC);
1122 if (unlikely(!src))
1123 return -ENOMEM;
1124 assoc = (src + req->cryptlen + auth_tag_len);
1125 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1126 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1127 req->assoclen, 0);
1128 dst = src;
1129 }
1130
1131 aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
1132 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
1133 + ((unsigned long)req->cryptlen), auth_tag_len);
1134
1135 /* The authTag (aka the Integrity Check Value) needs to be written
1136 * back to the packet. */
1137 if (one_entry_in_sg) {
1138 if (unlikely(req->src != req->dst)) {
1139 scatterwalk_unmap(dst, 0);
1140 scatterwalk_done(&dst_sg_walk, 0, 0);
1141 }
1142 scatterwalk_unmap(src, 0);
1143 scatterwalk_unmap(assoc, 0);
1144 scatterwalk_done(&src_sg_walk, 0, 0);
1145 scatterwalk_done(&assoc_sg_walk, 0, 0);
1146 } else {
1147 scatterwalk_map_and_copy(dst, req->dst, 0,
1148 req->cryptlen + auth_tag_len, 1);
1149 kfree(src);
1150 }
1151 return 0;
1152}
1153
1154static int __driver_rfc4106_decrypt(struct aead_request *req)
1155{
1156 u8 one_entry_in_sg = 0;
1157 u8 *src, *dst, *assoc;
1158 unsigned long tempCipherLen = 0;
1159 __be32 counter = cpu_to_be32(1);
1160 int retval = 0;
1161 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1162 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1163 void *aes_ctx = &(ctx->aes_key_expanded);
1164 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1165 u8 iv_and_authTag[32+AESNI_ALIGN];
1166 u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
1167 u8 *authTag = iv + 16;
1168 struct scatter_walk src_sg_walk;
1169 struct scatter_walk assoc_sg_walk;
1170 struct scatter_walk dst_sg_walk;
1171 unsigned int i;
1172
1173 if (unlikely((req->cryptlen < auth_tag_len) ||
1174 (req->assoclen != 8 && req->assoclen != 12)))
1175 return -EINVAL;
1176 /* Assuming we are supporting rfc4106 64-bit extended */
1177 /* sequence numbers We need to have the AAD length */
1178 /* equal to 8 or 12 bytes */
1179
1180 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1181 /* IV below built */
1182 for (i = 0; i < 4; i++)
1183 *(iv+i) = ctx->nonce[i];
1184 for (i = 0; i < 8; i++)
1185 *(iv+4+i) = req->iv[i];
1186 *((__be32 *)(iv+12)) = counter;
1187
1188 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1189 one_entry_in_sg = 1;
1190 scatterwalk_start(&src_sg_walk, req->src);
1191 scatterwalk_start(&assoc_sg_walk, req->assoc);
1192 src = scatterwalk_map(&src_sg_walk, 0);
1193 assoc = scatterwalk_map(&assoc_sg_walk, 0);
1194 dst = src;
1195 if (unlikely(req->src != req->dst)) {
1196 scatterwalk_start(&dst_sg_walk, req->dst);
1197 dst = scatterwalk_map(&dst_sg_walk, 0);
1198 }
1199
1200 } else {
1201 /* Allocate memory for src, dst, assoc */
1202 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1203 if (!src)
1204 return -ENOMEM;
1205 assoc = (src + req->cryptlen + auth_tag_len);
1206 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1207 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1208 req->assoclen, 0);
1209 dst = src;
1210 }
1211
1212 aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
1213 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1214 authTag, auth_tag_len);
1215
1216 /* Compare generated tag with passed in tag. */
1217 retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
1218 -EBADMSG : 0;
1219
1220 if (one_entry_in_sg) {
1221 if (unlikely(req->src != req->dst)) {
1222 scatterwalk_unmap(dst, 0);
1223 scatterwalk_done(&dst_sg_walk, 0, 0);
1224 }
1225 scatterwalk_unmap(src, 0);
1226 scatterwalk_unmap(assoc, 0);
1227 scatterwalk_done(&src_sg_walk, 0, 0);
1228 scatterwalk_done(&assoc_sg_walk, 0, 0);
1229 } else {
1230 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
1231 kfree(src);
1232 }
1233 return retval;
1234}
1235
1236static struct crypto_alg __rfc4106_alg = {
1237 .cra_name = "__gcm-aes-aesni",
1238 .cra_driver_name = "__driver-gcm-aes-aesni",
1239 .cra_priority = 0,
1240 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
1241 .cra_blocksize = 1,
1242 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1243 .cra_alignmask = 0,
1244 .cra_type = &crypto_aead_type,
1245 .cra_module = THIS_MODULE,
1246 .cra_list = LIST_HEAD_INIT(__rfc4106_alg.cra_list),
1247 .cra_u = {
1248 .aead = {
1249 .encrypt = __driver_rfc4106_encrypt,
1250 .decrypt = __driver_rfc4106_decrypt,
1251 },
1252 },
1253};
Mathias Krause559ad0f2010-11-29 08:35:39 +08001254#endif
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001255
Huang Ying54b6a1b2009-01-18 16:28:34 +11001256static int __init aesni_init(void)
1257{
1258 int err;
1259
1260 if (!cpu_has_aes) {
Roland Dreierc9944882009-06-24 13:42:40 +08001261 printk(KERN_INFO "Intel AES-NI instructions are not detected.\n");
Huang Ying54b6a1b2009-01-18 16:28:34 +11001262 return -ENODEV;
1263 }
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001264
Andy Lutomirskib23b6452011-05-16 15:12:47 +10001265 if ((err = crypto_fpu_init()))
1266 goto fpu_err;
Huang Ying54b6a1b2009-01-18 16:28:34 +11001267 if ((err = crypto_register_alg(&aesni_alg)))
1268 goto aes_err;
Huang Ying2cf4ac82009-03-29 15:41:20 +08001269 if ((err = crypto_register_alg(&__aesni_alg)))
1270 goto __aes_err;
Huang Ying54b6a1b2009-01-18 16:28:34 +11001271 if ((err = crypto_register_alg(&blk_ecb_alg)))
1272 goto blk_ecb_err;
1273 if ((err = crypto_register_alg(&blk_cbc_alg)))
1274 goto blk_cbc_err;
1275 if ((err = crypto_register_alg(&ablk_ecb_alg)))
1276 goto ablk_ecb_err;
1277 if ((err = crypto_register_alg(&ablk_cbc_alg)))
1278 goto ablk_cbc_err;
Mathias Krause0d258ef2010-11-27 16:34:46 +08001279#ifdef CONFIG_X86_64
1280 if ((err = crypto_register_alg(&blk_ctr_alg)))
1281 goto blk_ctr_err;
Huang Ying2cf4ac82009-03-29 15:41:20 +08001282 if ((err = crypto_register_alg(&ablk_ctr_alg)))
1283 goto ablk_ctr_err;
Mathias Krause559ad0f2010-11-29 08:35:39 +08001284 if ((err = crypto_register_alg(&__rfc4106_alg)))
1285 goto __aead_gcm_err;
1286 if ((err = crypto_register_alg(&rfc4106_alg)))
1287 goto aead_gcm_err;
Huang Ying12387a42010-03-10 18:28:55 +08001288#ifdef HAS_CTR
1289 if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
1290 goto ablk_rfc3686_ctr_err;
Huang Ying2cf4ac82009-03-29 15:41:20 +08001291#endif
Mathias Krause0d258ef2010-11-27 16:34:46 +08001292#endif
Huang Ying2cf4ac82009-03-29 15:41:20 +08001293#ifdef HAS_LRW
1294 if ((err = crypto_register_alg(&ablk_lrw_alg)))
1295 goto ablk_lrw_err;
1296#endif
1297#ifdef HAS_PCBC
1298 if ((err = crypto_register_alg(&ablk_pcbc_alg)))
1299 goto ablk_pcbc_err;
1300#endif
1301#ifdef HAS_XTS
1302 if ((err = crypto_register_alg(&ablk_xts_alg)))
1303 goto ablk_xts_err;
1304#endif
Huang Ying54b6a1b2009-01-18 16:28:34 +11001305 return err;
1306
Huang Ying2cf4ac82009-03-29 15:41:20 +08001307#ifdef HAS_XTS
1308ablk_xts_err:
1309#endif
1310#ifdef HAS_PCBC
1311 crypto_unregister_alg(&ablk_pcbc_alg);
1312ablk_pcbc_err:
1313#endif
1314#ifdef HAS_LRW
1315 crypto_unregister_alg(&ablk_lrw_alg);
1316ablk_lrw_err:
1317#endif
Mathias Krause0d258ef2010-11-27 16:34:46 +08001318#ifdef CONFIG_X86_64
Huang Ying2cf4ac82009-03-29 15:41:20 +08001319#ifdef HAS_CTR
Huang Ying12387a42010-03-10 18:28:55 +08001320 crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
1321ablk_rfc3686_ctr_err:
1322#endif
Mathias Krause559ad0f2010-11-29 08:35:39 +08001323 crypto_unregister_alg(&rfc4106_alg);
1324aead_gcm_err:
1325 crypto_unregister_alg(&__rfc4106_alg);
1326__aead_gcm_err:
Huang Ying2cf4ac82009-03-29 15:41:20 +08001327 crypto_unregister_alg(&ablk_ctr_alg);
1328ablk_ctr_err:
Mathias Krause0d258ef2010-11-27 16:34:46 +08001329 crypto_unregister_alg(&blk_ctr_alg);
1330blk_ctr_err:
1331#endif
Huang Ying2cf4ac82009-03-29 15:41:20 +08001332 crypto_unregister_alg(&ablk_cbc_alg);
Huang Ying54b6a1b2009-01-18 16:28:34 +11001333ablk_cbc_err:
1334 crypto_unregister_alg(&ablk_ecb_alg);
1335ablk_ecb_err:
1336 crypto_unregister_alg(&blk_cbc_alg);
1337blk_cbc_err:
1338 crypto_unregister_alg(&blk_ecb_alg);
1339blk_ecb_err:
Huang Ying2cf4ac82009-03-29 15:41:20 +08001340 crypto_unregister_alg(&__aesni_alg);
1341__aes_err:
Huang Ying54b6a1b2009-01-18 16:28:34 +11001342 crypto_unregister_alg(&aesni_alg);
1343aes_err:
Andy Lutomirskib23b6452011-05-16 15:12:47 +10001344fpu_err:
Huang Ying54b6a1b2009-01-18 16:28:34 +11001345 return err;
1346}
1347
1348static void __exit aesni_exit(void)
1349{
Huang Ying2cf4ac82009-03-29 15:41:20 +08001350#ifdef HAS_XTS
1351 crypto_unregister_alg(&ablk_xts_alg);
1352#endif
1353#ifdef HAS_PCBC
1354 crypto_unregister_alg(&ablk_pcbc_alg);
1355#endif
1356#ifdef HAS_LRW
1357 crypto_unregister_alg(&ablk_lrw_alg);
1358#endif
Mathias Krause0d258ef2010-11-27 16:34:46 +08001359#ifdef CONFIG_X86_64
Huang Ying2cf4ac82009-03-29 15:41:20 +08001360#ifdef HAS_CTR
Huang Ying12387a42010-03-10 18:28:55 +08001361 crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
Huang Ying2cf4ac82009-03-29 15:41:20 +08001362#endif
Mathias Krause559ad0f2010-11-29 08:35:39 +08001363 crypto_unregister_alg(&rfc4106_alg);
1364 crypto_unregister_alg(&__rfc4106_alg);
Huang Ying12387a42010-03-10 18:28:55 +08001365 crypto_unregister_alg(&ablk_ctr_alg);
Mathias Krause0d258ef2010-11-27 16:34:46 +08001366 crypto_unregister_alg(&blk_ctr_alg);
1367#endif
Huang Ying54b6a1b2009-01-18 16:28:34 +11001368 crypto_unregister_alg(&ablk_cbc_alg);
1369 crypto_unregister_alg(&ablk_ecb_alg);
1370 crypto_unregister_alg(&blk_cbc_alg);
1371 crypto_unregister_alg(&blk_ecb_alg);
Huang Ying2cf4ac82009-03-29 15:41:20 +08001372 crypto_unregister_alg(&__aesni_alg);
Huang Ying54b6a1b2009-01-18 16:28:34 +11001373 crypto_unregister_alg(&aesni_alg);
Andy Lutomirskib23b6452011-05-16 15:12:47 +10001374
1375 crypto_fpu_exit();
Huang Ying54b6a1b2009-01-18 16:28:34 +11001376}
1377
1378module_init(aesni_init);
1379module_exit(aesni_exit);
1380
1381MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1382MODULE_LICENSE("GPL");
1383MODULE_ALIAS("aes");