blob: a0795da22c0273286119312018f5dfe21c4ac9ca [file] [log] [blame]
Huang Ying54b6a1b2009-01-18 16:28:34 +11001/*
2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
4 *
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
7 *
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04008 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
15 *
Huang Ying54b6a1b2009-01-18 16:28:34 +110016 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 */
21
22#include <linux/hardirq.h>
23#include <linux/types.h>
24#include <linux/crypto.h>
Paul Gortmaker7c52d552011-05-27 12:33:10 -040025#include <linux/module.h>
Huang Ying54b6a1b2009-01-18 16:28:34 +110026#include <linux/err.h>
27#include <crypto/algapi.h>
28#include <crypto/aes.h>
29#include <crypto/cryptd.h>
Huang Ying12387a42010-03-10 18:28:55 +080030#include <crypto/ctr.h>
Jussi Kivilinna023af602012-07-22 18:18:37 +030031#include <crypto/b128ops.h>
32#include <crypto/lrw.h>
33#include <crypto/xts.h>
Andi Kleen3bd391f2012-01-26 00:09:06 +010034#include <asm/cpu_device_id.h>
Huang Ying54b6a1b2009-01-18 16:28:34 +110035#include <asm/i387.h>
Jussi Kivilinna70ef2602012-06-18 14:07:50 +030036#include <asm/crypto/aes.h>
Jussi Kivilinnaa9629d72012-06-18 14:07:08 +030037#include <asm/crypto/ablk_helper.h>
Tadeusz Struk0bd82f52010-11-04 15:00:45 -040038#include <crypto/scatterwalk.h>
39#include <crypto/internal/aead.h>
40#include <linux/workqueue.h>
41#include <linux/spinlock.h>
Huang Ying54b6a1b2009-01-18 16:28:34 +110042
Huang Ying2cf4ac82009-03-29 15:41:20 +080043#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
44#define HAS_PCBC
45#endif
46
Tadeusz Struk0bd82f52010-11-04 15:00:45 -040047/* This data is stored at the end of the crypto_tfm struct.
48 * It's a type of per "session" data storage location.
49 * This needs to be 16 byte aligned.
50 */
51struct aesni_rfc4106_gcm_ctx {
52 u8 hash_subkey[16];
53 struct crypto_aes_ctx aes_key_expanded;
54 u8 nonce[4];
55 struct cryptd_aead *cryptd_tfm;
56};
57
58struct aesni_gcm_set_hash_subkey_result {
59 int err;
60 struct completion completion;
61};
62
63struct aesni_hash_subkey_req_data {
64 u8 iv[16];
65 struct aesni_gcm_set_hash_subkey_result result;
66 struct scatterlist sg;
67};
68
69#define AESNI_ALIGN (16)
Huang Ying54b6a1b2009-01-18 16:28:34 +110070#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
Tadeusz Struk0bd82f52010-11-04 15:00:45 -040071#define RFC4106_HASH_SUBKEY_SIZE 16
Huang Ying54b6a1b2009-01-18 16:28:34 +110072
Jussi Kivilinna023af602012-07-22 18:18:37 +030073struct aesni_lrw_ctx {
74 struct lrw_table_ctx lrw_table;
75 u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
76};
77
78struct aesni_xts_ctx {
79 u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
80 u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
81};
82
Huang Ying54b6a1b2009-01-18 16:28:34 +110083asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
84 unsigned int key_len);
85asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
86 const u8 *in);
87asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
88 const u8 *in);
89asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
90 const u8 *in, unsigned int len);
91asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
92 const u8 *in, unsigned int len);
93asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
94 const u8 *in, unsigned int len, u8 *iv);
95asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
96 const u8 *in, unsigned int len, u8 *iv);
Randy Dunlap9bed4ac2011-05-18 09:03:34 +100097
98int crypto_fpu_init(void);
99void crypto_fpu_exit(void);
100
Mathias Krause0d258ef2010-11-27 16:34:46 +0800101#ifdef CONFIG_X86_64
Huang Ying12387a42010-03-10 18:28:55 +0800102asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
103 const u8 *in, unsigned int len, u8 *iv);
Huang Ying54b6a1b2009-01-18 16:28:34 +1100104
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400105/* asmlinkage void aesni_gcm_enc()
106 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
107 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
108 * const u8 *in, Plaintext input
109 * unsigned long plaintext_len, Length of data in bytes for encryption.
110 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
111 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
112 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
113 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
114 * const u8 *aad, Additional Authentication Data (AAD)
115 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
116 * is going to be 8 or 12 bytes
117 * u8 *auth_tag, Authenticated Tag output.
118 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
119 * Valid values are 16 (most likely), 12 or 8.
120 */
121asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
122 const u8 *in, unsigned long plaintext_len, u8 *iv,
123 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
124 u8 *auth_tag, unsigned long auth_tag_len);
125
126/* asmlinkage void aesni_gcm_dec()
127 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
128 * u8 *out, Plaintext output. Decrypt in-place is allowed.
129 * const u8 *in, Ciphertext input
130 * unsigned long ciphertext_len, Length of data in bytes for decryption.
131 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
132 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
133 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
134 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
135 * const u8 *aad, Additional Authentication Data (AAD)
136 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
137 * to be 8 or 12 bytes
138 * u8 *auth_tag, Authenticated Tag output.
139 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
140 * Valid values are 16 (most likely), 12 or 8.
141 */
142asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
143 const u8 *in, unsigned long ciphertext_len, u8 *iv,
144 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
145 u8 *auth_tag, unsigned long auth_tag_len);
146
147static inline struct
148aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
149{
150 return
151 (struct aesni_rfc4106_gcm_ctx *)
152 PTR_ALIGN((u8 *)
153 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
154}
Mathias Krause559ad0f2010-11-29 08:35:39 +0800155#endif
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400156
Huang Ying54b6a1b2009-01-18 16:28:34 +1100157static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
158{
159 unsigned long addr = (unsigned long)raw_ctx;
160 unsigned long align = AESNI_ALIGN;
161
162 if (align <= crypto_tfm_ctx_alignment())
163 align = 1;
164 return (struct crypto_aes_ctx *)ALIGN(addr, align);
165}
166
167static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
168 const u8 *in_key, unsigned int key_len)
169{
170 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
171 u32 *flags = &tfm->crt_flags;
172 int err;
173
174 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
175 key_len != AES_KEYSIZE_256) {
176 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
177 return -EINVAL;
178 }
179
Huang Ying13b79b972009-10-20 16:20:47 +0900180 if (!irq_fpu_usable())
Huang Ying54b6a1b2009-01-18 16:28:34 +1100181 err = crypto_aes_expand_key(ctx, in_key, key_len);
182 else {
183 kernel_fpu_begin();
184 err = aesni_set_key(ctx, in_key, key_len);
185 kernel_fpu_end();
186 }
187
188 return err;
189}
190
191static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
192 unsigned int key_len)
193{
194 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
195}
196
197static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
198{
199 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
200
Huang Ying13b79b972009-10-20 16:20:47 +0900201 if (!irq_fpu_usable())
Huang Ying54b6a1b2009-01-18 16:28:34 +1100202 crypto_aes_encrypt_x86(ctx, dst, src);
203 else {
204 kernel_fpu_begin();
205 aesni_enc(ctx, dst, src);
206 kernel_fpu_end();
207 }
208}
209
210static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
211{
212 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
213
Huang Ying13b79b972009-10-20 16:20:47 +0900214 if (!irq_fpu_usable())
Huang Ying54b6a1b2009-01-18 16:28:34 +1100215 crypto_aes_decrypt_x86(ctx, dst, src);
216 else {
217 kernel_fpu_begin();
218 aesni_dec(ctx, dst, src);
219 kernel_fpu_end();
220 }
221}
222
Huang Ying2cf4ac82009-03-29 15:41:20 +0800223static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
224{
225 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
226
227 aesni_enc(ctx, dst, src);
228}
229
230static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
231{
232 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
233
234 aesni_dec(ctx, dst, src);
235}
236
Huang Ying54b6a1b2009-01-18 16:28:34 +1100237static int ecb_encrypt(struct blkcipher_desc *desc,
238 struct scatterlist *dst, struct scatterlist *src,
239 unsigned int nbytes)
240{
241 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
242 struct blkcipher_walk walk;
243 int err;
244
245 blkcipher_walk_init(&walk, dst, src, nbytes);
246 err = blkcipher_walk_virt(desc, &walk);
Huang Ying9251b642009-06-18 19:41:27 +0800247 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Huang Ying54b6a1b2009-01-18 16:28:34 +1100248
249 kernel_fpu_begin();
250 while ((nbytes = walk.nbytes)) {
251 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
252 nbytes & AES_BLOCK_MASK);
253 nbytes &= AES_BLOCK_SIZE - 1;
254 err = blkcipher_walk_done(desc, &walk, nbytes);
255 }
256 kernel_fpu_end();
257
258 return err;
259}
260
261static int ecb_decrypt(struct blkcipher_desc *desc,
262 struct scatterlist *dst, struct scatterlist *src,
263 unsigned int nbytes)
264{
265 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
266 struct blkcipher_walk walk;
267 int err;
268
269 blkcipher_walk_init(&walk, dst, src, nbytes);
270 err = blkcipher_walk_virt(desc, &walk);
Huang Ying9251b642009-06-18 19:41:27 +0800271 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Huang Ying54b6a1b2009-01-18 16:28:34 +1100272
273 kernel_fpu_begin();
274 while ((nbytes = walk.nbytes)) {
275 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
276 nbytes & AES_BLOCK_MASK);
277 nbytes &= AES_BLOCK_SIZE - 1;
278 err = blkcipher_walk_done(desc, &walk, nbytes);
279 }
280 kernel_fpu_end();
281
282 return err;
283}
284
Huang Ying54b6a1b2009-01-18 16:28:34 +1100285static int cbc_encrypt(struct blkcipher_desc *desc,
286 struct scatterlist *dst, struct scatterlist *src,
287 unsigned int nbytes)
288{
289 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
290 struct blkcipher_walk walk;
291 int err;
292
293 blkcipher_walk_init(&walk, dst, src, nbytes);
294 err = blkcipher_walk_virt(desc, &walk);
Huang Ying9251b642009-06-18 19:41:27 +0800295 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Huang Ying54b6a1b2009-01-18 16:28:34 +1100296
297 kernel_fpu_begin();
298 while ((nbytes = walk.nbytes)) {
299 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
300 nbytes & AES_BLOCK_MASK, walk.iv);
301 nbytes &= AES_BLOCK_SIZE - 1;
302 err = blkcipher_walk_done(desc, &walk, nbytes);
303 }
304 kernel_fpu_end();
305
306 return err;
307}
308
309static int cbc_decrypt(struct blkcipher_desc *desc,
310 struct scatterlist *dst, struct scatterlist *src,
311 unsigned int nbytes)
312{
313 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
314 struct blkcipher_walk walk;
315 int err;
316
317 blkcipher_walk_init(&walk, dst, src, nbytes);
318 err = blkcipher_walk_virt(desc, &walk);
Huang Ying9251b642009-06-18 19:41:27 +0800319 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Huang Ying54b6a1b2009-01-18 16:28:34 +1100320
321 kernel_fpu_begin();
322 while ((nbytes = walk.nbytes)) {
323 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
324 nbytes & AES_BLOCK_MASK, walk.iv);
325 nbytes &= AES_BLOCK_SIZE - 1;
326 err = blkcipher_walk_done(desc, &walk, nbytes);
327 }
328 kernel_fpu_end();
329
330 return err;
331}
332
Mathias Krause0d258ef2010-11-27 16:34:46 +0800333#ifdef CONFIG_X86_64
Huang Ying12387a42010-03-10 18:28:55 +0800334static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
335 struct blkcipher_walk *walk)
336{
337 u8 *ctrblk = walk->iv;
338 u8 keystream[AES_BLOCK_SIZE];
339 u8 *src = walk->src.virt.addr;
340 u8 *dst = walk->dst.virt.addr;
341 unsigned int nbytes = walk->nbytes;
342
343 aesni_enc(ctx, keystream, ctrblk);
344 crypto_xor(keystream, src, nbytes);
345 memcpy(dst, keystream, nbytes);
346 crypto_inc(ctrblk, AES_BLOCK_SIZE);
347}
348
349static int ctr_crypt(struct blkcipher_desc *desc,
350 struct scatterlist *dst, struct scatterlist *src,
351 unsigned int nbytes)
352{
353 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
354 struct blkcipher_walk walk;
355 int err;
356
357 blkcipher_walk_init(&walk, dst, src, nbytes);
358 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
359 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
360
361 kernel_fpu_begin();
362 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
363 aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
364 nbytes & AES_BLOCK_MASK, walk.iv);
365 nbytes &= AES_BLOCK_SIZE - 1;
366 err = blkcipher_walk_done(desc, &walk, nbytes);
367 }
368 if (walk.nbytes) {
369 ctr_crypt_final(ctx, &walk);
370 err = blkcipher_walk_done(desc, &walk, 0);
371 }
372 kernel_fpu_end();
373
374 return err;
375}
Mathias Krause0d258ef2010-11-27 16:34:46 +0800376#endif
Huang Ying12387a42010-03-10 18:28:55 +0800377
Huang Ying54b6a1b2009-01-18 16:28:34 +1100378static int ablk_ecb_init(struct crypto_tfm *tfm)
379{
Jussi Kivilinnaef45b832012-05-11 16:00:54 +0300380 return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
Huang Ying54b6a1b2009-01-18 16:28:34 +1100381}
382
Huang Ying54b6a1b2009-01-18 16:28:34 +1100383static int ablk_cbc_init(struct crypto_tfm *tfm)
384{
Jussi Kivilinnaef45b832012-05-11 16:00:54 +0300385 return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
Huang Ying54b6a1b2009-01-18 16:28:34 +1100386}
387
Mathias Krause0d258ef2010-11-27 16:34:46 +0800388#ifdef CONFIG_X86_64
Huang Ying2cf4ac82009-03-29 15:41:20 +0800389static int ablk_ctr_init(struct crypto_tfm *tfm)
390{
Jussi Kivilinnaef45b832012-05-11 16:00:54 +0300391 return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
Huang Ying2cf4ac82009-03-29 15:41:20 +0800392}
393
Mathias Krause0d258ef2010-11-27 16:34:46 +0800394#endif
Huang Ying2cf4ac82009-03-29 15:41:20 +0800395
Huang Ying2cf4ac82009-03-29 15:41:20 +0800396#ifdef HAS_PCBC
397static int ablk_pcbc_init(struct crypto_tfm *tfm)
398{
Jussi Kivilinnaef45b832012-05-11 16:00:54 +0300399 return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
Huang Ying2cf4ac82009-03-29 15:41:20 +0800400}
Huang Ying2cf4ac82009-03-29 15:41:20 +0800401#endif
402
Jussi Kivilinna023af602012-07-22 18:18:37 +0300403static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
Huang Ying2cf4ac82009-03-29 15:41:20 +0800404{
Jussi Kivilinna023af602012-07-22 18:18:37 +0300405 aesni_ecb_enc(ctx, blks, blks, nbytes);
Huang Ying2cf4ac82009-03-29 15:41:20 +0800406}
Jussi Kivilinna023af602012-07-22 18:18:37 +0300407
408static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
409{
410 aesni_ecb_dec(ctx, blks, blks, nbytes);
411}
412
413static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
414 unsigned int keylen)
415{
416 struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
417 int err;
418
419 err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
420 keylen - AES_BLOCK_SIZE);
421 if (err)
422 return err;
423
424 return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
425}
426
427static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
428{
429 struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
430
431 lrw_free_table(&ctx->lrw_table);
432}
433
434static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
435 struct scatterlist *src, unsigned int nbytes)
436{
437 struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
438 be128 buf[8];
439 struct lrw_crypt_req req = {
440 .tbuf = buf,
441 .tbuflen = sizeof(buf),
442
443 .table_ctx = &ctx->lrw_table,
444 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
445 .crypt_fn = lrw_xts_encrypt_callback,
446 };
447 int ret;
448
449 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
450
451 kernel_fpu_begin();
452 ret = lrw_crypt(desc, dst, src, nbytes, &req);
453 kernel_fpu_end();
454
455 return ret;
456}
457
458static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
459 struct scatterlist *src, unsigned int nbytes)
460{
461 struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
462 be128 buf[8];
463 struct lrw_crypt_req req = {
464 .tbuf = buf,
465 .tbuflen = sizeof(buf),
466
467 .table_ctx = &ctx->lrw_table,
468 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
469 .crypt_fn = lrw_xts_decrypt_callback,
470 };
471 int ret;
472
473 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
474
475 kernel_fpu_begin();
476 ret = lrw_crypt(desc, dst, src, nbytes, &req);
477 kernel_fpu_end();
478
479 return ret;
480}
481
482static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
483 unsigned int keylen)
484{
485 struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
486 u32 *flags = &tfm->crt_flags;
487 int err;
488
489 /* key consists of keys of equal size concatenated, therefore
490 * the length must be even
491 */
492 if (keylen % 2) {
493 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
494 return -EINVAL;
495 }
496
497 /* first half of xts-key is for crypt */
498 err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
499 if (err)
500 return err;
501
502 /* second half of xts-key is for tweak */
503 return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
504 keylen / 2);
505}
506
507
Jussi Kivilinna32bec972012-10-18 23:24:57 +0300508static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
509{
510 aesni_enc(ctx, out, in);
511}
512
Jussi Kivilinna023af602012-07-22 18:18:37 +0300513static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
514 struct scatterlist *src, unsigned int nbytes)
515{
516 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
517 be128 buf[8];
518 struct xts_crypt_req req = {
519 .tbuf = buf,
520 .tbuflen = sizeof(buf),
521
522 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
Jussi Kivilinna32bec972012-10-18 23:24:57 +0300523 .tweak_fn = aesni_xts_tweak,
Jussi Kivilinna023af602012-07-22 18:18:37 +0300524 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
525 .crypt_fn = lrw_xts_encrypt_callback,
526 };
527 int ret;
528
529 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
530
531 kernel_fpu_begin();
532 ret = xts_crypt(desc, dst, src, nbytes, &req);
533 kernel_fpu_end();
534
535 return ret;
536}
537
538static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
539 struct scatterlist *src, unsigned int nbytes)
540{
541 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
542 be128 buf[8];
543 struct xts_crypt_req req = {
544 .tbuf = buf,
545 .tbuflen = sizeof(buf),
546
547 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
Jussi Kivilinna32bec972012-10-18 23:24:57 +0300548 .tweak_fn = aesni_xts_tweak,
Jussi Kivilinna023af602012-07-22 18:18:37 +0300549 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
550 .crypt_fn = lrw_xts_decrypt_callback,
551 };
552 int ret;
553
554 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
555
556 kernel_fpu_begin();
557 ret = xts_crypt(desc, dst, src, nbytes, &req);
558 kernel_fpu_end();
559
560 return ret;
561}
Huang Ying2cf4ac82009-03-29 15:41:20 +0800562
Mathias Krause559ad0f2010-11-29 08:35:39 +0800563#ifdef CONFIG_X86_64
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400564static int rfc4106_init(struct crypto_tfm *tfm)
565{
566 struct cryptd_aead *cryptd_tfm;
567 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
568 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
Tadeusz Struk60af5202011-03-13 16:56:17 +0800569 struct crypto_aead *cryptd_child;
570 struct aesni_rfc4106_gcm_ctx *child_ctx;
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400571 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
572 if (IS_ERR(cryptd_tfm))
573 return PTR_ERR(cryptd_tfm);
Tadeusz Struk60af5202011-03-13 16:56:17 +0800574
575 cryptd_child = cryptd_aead_child(cryptd_tfm);
576 child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
577 memcpy(child_ctx, ctx, sizeof(*ctx));
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400578 ctx->cryptd_tfm = cryptd_tfm;
579 tfm->crt_aead.reqsize = sizeof(struct aead_request)
580 + crypto_aead_reqsize(&cryptd_tfm->base);
581 return 0;
582}
583
584static void rfc4106_exit(struct crypto_tfm *tfm)
585{
586 struct aesni_rfc4106_gcm_ctx *ctx =
587 (struct aesni_rfc4106_gcm_ctx *)
588 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
589 if (!IS_ERR(ctx->cryptd_tfm))
590 cryptd_free_aead(ctx->cryptd_tfm);
591 return;
592}
593
594static void
595rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
596{
597 struct aesni_gcm_set_hash_subkey_result *result = req->data;
598
599 if (err == -EINPROGRESS)
600 return;
601 result->err = err;
602 complete(&result->completion);
603}
604
605static int
606rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
607{
608 struct crypto_ablkcipher *ctr_tfm;
609 struct ablkcipher_request *req;
610 int ret = -EINVAL;
611 struct aesni_hash_subkey_req_data *req_data;
612
613 ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
614 if (IS_ERR(ctr_tfm))
615 return PTR_ERR(ctr_tfm);
616
617 crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
618
619 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
Jesper Juhl7efd95f62011-01-23 18:56:36 +1100620 if (ret)
Jesper Juhl7efd95f62011-01-23 18:56:36 +1100621 goto out_free_ablkcipher;
Jesper Juhlfc9044e22011-02-16 13:04:09 +1100622
623 ret = -ENOMEM;
624 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
625 if (!req)
626 goto out_free_ablkcipher;
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400627
628 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
Jesper Juhlfc9044e22011-02-16 13:04:09 +1100629 if (!req_data)
Jesper Juhl7efd95f62011-01-23 18:56:36 +1100630 goto out_free_request;
Jesper Juhlfc9044e22011-02-16 13:04:09 +1100631
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400632 memset(req_data->iv, 0, sizeof(req_data->iv));
633
634 /* Clear the data in the hash sub key container to zero.*/
635 /* We want to cipher all zeros to create the hash sub key. */
636 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
637
638 init_completion(&req_data->result.completion);
639 sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
640 ablkcipher_request_set_tfm(req, ctr_tfm);
641 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
642 CRYPTO_TFM_REQ_MAY_BACKLOG,
643 rfc4106_set_hash_subkey_done,
644 &req_data->result);
645
646 ablkcipher_request_set_crypt(req, &req_data->sg,
647 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
648
649 ret = crypto_ablkcipher_encrypt(req);
650 if (ret == -EINPROGRESS || ret == -EBUSY) {
651 ret = wait_for_completion_interruptible
652 (&req_data->result.completion);
653 if (!ret)
654 ret = req_data->result.err;
655 }
Jesper Juhlfc9044e22011-02-16 13:04:09 +1100656 kfree(req_data);
Jesper Juhl7efd95f62011-01-23 18:56:36 +1100657out_free_request:
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400658 ablkcipher_request_free(req);
Jesper Juhl7efd95f62011-01-23 18:56:36 +1100659out_free_ablkcipher:
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400660 crypto_free_ablkcipher(ctr_tfm);
661 return ret;
662}
663
664static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
665 unsigned int key_len)
666{
667 int ret = 0;
668 struct crypto_tfm *tfm = crypto_aead_tfm(parent);
669 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
Tadeusz Struk60af5202011-03-13 16:56:17 +0800670 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
671 struct aesni_rfc4106_gcm_ctx *child_ctx =
672 aesni_rfc4106_gcm_ctx_get(cryptd_child);
Milan Brozbf084d82012-06-28 17:26:02 +0200673 u8 *new_key_align, *new_key_mem = NULL;
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400674
675 if (key_len < 4) {
676 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
677 return -EINVAL;
678 }
679 /*Account for 4 byte nonce at the end.*/
680 key_len -= 4;
681 if (key_len != AES_KEYSIZE_128) {
682 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
683 return -EINVAL;
684 }
685
686 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
687 /*This must be on a 16 byte boundary!*/
688 if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
689 return -EINVAL;
690
691 if ((unsigned long)key % AESNI_ALIGN) {
692 /*key is not aligned: use an auxuliar aligned pointer*/
693 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
694 if (!new_key_mem)
695 return -ENOMEM;
696
Milan Brozbf084d82012-06-28 17:26:02 +0200697 new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
698 memcpy(new_key_align, key, key_len);
699 key = new_key_align;
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400700 }
701
702 if (!irq_fpu_usable())
703 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
704 key, key_len);
705 else {
706 kernel_fpu_begin();
707 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
708 kernel_fpu_end();
709 }
710 /*This must be on a 16 byte boundary!*/
711 if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
712 ret = -EINVAL;
713 goto exit;
714 }
715 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
Tadeusz Struk60af5202011-03-13 16:56:17 +0800716 memcpy(child_ctx, ctx, sizeof(*ctx));
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400717exit:
718 kfree(new_key_mem);
719 return ret;
720}
721
722/* This is the Integrity Check Value (aka the authentication tag length and can
723 * be 8, 12 or 16 bytes long. */
724static int rfc4106_set_authsize(struct crypto_aead *parent,
725 unsigned int authsize)
726{
727 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
728 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
729
730 switch (authsize) {
731 case 8:
732 case 12:
733 case 16:
734 break;
735 default:
736 return -EINVAL;
737 }
738 crypto_aead_crt(parent)->authsize = authsize;
739 crypto_aead_crt(cryptd_child)->authsize = authsize;
740 return 0;
741}
742
743static int rfc4106_encrypt(struct aead_request *req)
744{
745 int ret;
746 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
747 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400748
749 if (!irq_fpu_usable()) {
750 struct aead_request *cryptd_req =
751 (struct aead_request *) aead_request_ctx(req);
752 memcpy(cryptd_req, req, sizeof(*req));
753 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
754 return crypto_aead_encrypt(cryptd_req);
755 } else {
Tadeusz Struk60af5202011-03-13 16:56:17 +0800756 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400757 kernel_fpu_begin();
758 ret = cryptd_child->base.crt_aead.encrypt(req);
759 kernel_fpu_end();
760 return ret;
761 }
762}
763
764static int rfc4106_decrypt(struct aead_request *req)
765{
766 int ret;
767 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
768 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400769
770 if (!irq_fpu_usable()) {
771 struct aead_request *cryptd_req =
772 (struct aead_request *) aead_request_ctx(req);
773 memcpy(cryptd_req, req, sizeof(*req));
774 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
775 return crypto_aead_decrypt(cryptd_req);
776 } else {
Tadeusz Struk60af5202011-03-13 16:56:17 +0800777 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400778 kernel_fpu_begin();
779 ret = cryptd_child->base.crt_aead.decrypt(req);
780 kernel_fpu_end();
781 return ret;
782 }
783}
784
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400785static int __driver_rfc4106_encrypt(struct aead_request *req)
786{
787 u8 one_entry_in_sg = 0;
788 u8 *src, *dst, *assoc;
789 __be32 counter = cpu_to_be32(1);
790 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
791 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
792 void *aes_ctx = &(ctx->aes_key_expanded);
793 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
794 u8 iv_tab[16+AESNI_ALIGN];
795 u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
796 struct scatter_walk src_sg_walk;
797 struct scatter_walk assoc_sg_walk;
798 struct scatter_walk dst_sg_walk;
799 unsigned int i;
800
801 /* Assuming we are supporting rfc4106 64-bit extended */
802 /* sequence numbers We need to have the AAD length equal */
803 /* to 8 or 12 bytes */
804 if (unlikely(req->assoclen != 8 && req->assoclen != 12))
805 return -EINVAL;
806 /* IV below built */
807 for (i = 0; i < 4; i++)
808 *(iv+i) = ctx->nonce[i];
809 for (i = 0; i < 8; i++)
810 *(iv+4+i) = req->iv[i];
811 *((__be32 *)(iv+12)) = counter;
812
813 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
814 one_entry_in_sg = 1;
815 scatterwalk_start(&src_sg_walk, req->src);
816 scatterwalk_start(&assoc_sg_walk, req->assoc);
Cong Wang8fd75e12011-11-25 23:14:17 +0800817 src = scatterwalk_map(&src_sg_walk);
818 assoc = scatterwalk_map(&assoc_sg_walk);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400819 dst = src;
820 if (unlikely(req->src != req->dst)) {
821 scatterwalk_start(&dst_sg_walk, req->dst);
Cong Wang8fd75e12011-11-25 23:14:17 +0800822 dst = scatterwalk_map(&dst_sg_walk);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400823 }
824
825 } else {
826 /* Allocate memory for src, dst, assoc */
827 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
828 GFP_ATOMIC);
829 if (unlikely(!src))
830 return -ENOMEM;
831 assoc = (src + req->cryptlen + auth_tag_len);
832 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
833 scatterwalk_map_and_copy(assoc, req->assoc, 0,
834 req->assoclen, 0);
835 dst = src;
836 }
837
838 aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
839 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
840 + ((unsigned long)req->cryptlen), auth_tag_len);
841
842 /* The authTag (aka the Integrity Check Value) needs to be written
843 * back to the packet. */
844 if (one_entry_in_sg) {
845 if (unlikely(req->src != req->dst)) {
Cong Wang8fd75e12011-11-25 23:14:17 +0800846 scatterwalk_unmap(dst);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400847 scatterwalk_done(&dst_sg_walk, 0, 0);
848 }
Cong Wang8fd75e12011-11-25 23:14:17 +0800849 scatterwalk_unmap(src);
850 scatterwalk_unmap(assoc);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400851 scatterwalk_done(&src_sg_walk, 0, 0);
852 scatterwalk_done(&assoc_sg_walk, 0, 0);
853 } else {
854 scatterwalk_map_and_copy(dst, req->dst, 0,
855 req->cryptlen + auth_tag_len, 1);
856 kfree(src);
857 }
858 return 0;
859}
860
861static int __driver_rfc4106_decrypt(struct aead_request *req)
862{
863 u8 one_entry_in_sg = 0;
864 u8 *src, *dst, *assoc;
865 unsigned long tempCipherLen = 0;
866 __be32 counter = cpu_to_be32(1);
867 int retval = 0;
868 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
869 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
870 void *aes_ctx = &(ctx->aes_key_expanded);
871 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
872 u8 iv_and_authTag[32+AESNI_ALIGN];
873 u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
874 u8 *authTag = iv + 16;
875 struct scatter_walk src_sg_walk;
876 struct scatter_walk assoc_sg_walk;
877 struct scatter_walk dst_sg_walk;
878 unsigned int i;
879
880 if (unlikely((req->cryptlen < auth_tag_len) ||
881 (req->assoclen != 8 && req->assoclen != 12)))
882 return -EINVAL;
883 /* Assuming we are supporting rfc4106 64-bit extended */
884 /* sequence numbers We need to have the AAD length */
885 /* equal to 8 or 12 bytes */
886
887 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
888 /* IV below built */
889 for (i = 0; i < 4; i++)
890 *(iv+i) = ctx->nonce[i];
891 for (i = 0; i < 8; i++)
892 *(iv+4+i) = req->iv[i];
893 *((__be32 *)(iv+12)) = counter;
894
895 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
896 one_entry_in_sg = 1;
897 scatterwalk_start(&src_sg_walk, req->src);
898 scatterwalk_start(&assoc_sg_walk, req->assoc);
Cong Wang8fd75e12011-11-25 23:14:17 +0800899 src = scatterwalk_map(&src_sg_walk);
900 assoc = scatterwalk_map(&assoc_sg_walk);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400901 dst = src;
902 if (unlikely(req->src != req->dst)) {
903 scatterwalk_start(&dst_sg_walk, req->dst);
Cong Wang8fd75e12011-11-25 23:14:17 +0800904 dst = scatterwalk_map(&dst_sg_walk);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400905 }
906
907 } else {
908 /* Allocate memory for src, dst, assoc */
909 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
910 if (!src)
911 return -ENOMEM;
912 assoc = (src + req->cryptlen + auth_tag_len);
913 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
914 scatterwalk_map_and_copy(assoc, req->assoc, 0,
915 req->assoclen, 0);
916 dst = src;
917 }
918
919 aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
920 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
921 authTag, auth_tag_len);
922
923 /* Compare generated tag with passed in tag. */
924 retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
925 -EBADMSG : 0;
926
927 if (one_entry_in_sg) {
928 if (unlikely(req->src != req->dst)) {
Cong Wang8fd75e12011-11-25 23:14:17 +0800929 scatterwalk_unmap(dst);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400930 scatterwalk_done(&dst_sg_walk, 0, 0);
931 }
Cong Wang8fd75e12011-11-25 23:14:17 +0800932 scatterwalk_unmap(src);
933 scatterwalk_unmap(assoc);
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400934 scatterwalk_done(&src_sg_walk, 0, 0);
935 scatterwalk_done(&assoc_sg_walk, 0, 0);
936 } else {
937 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
938 kfree(src);
939 }
940 return retval;
941}
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +0300942#endif
Tadeusz Struk0bd82f52010-11-04 15:00:45 -0400943
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +0300944static struct crypto_alg aesni_algs[] = { {
945 .cra_name = "aes",
946 .cra_driver_name = "aes-aesni",
947 .cra_priority = 300,
948 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
949 .cra_blocksize = AES_BLOCK_SIZE,
950 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
951 AESNI_ALIGN - 1,
952 .cra_alignmask = 0,
953 .cra_module = THIS_MODULE,
954 .cra_u = {
955 .cipher = {
956 .cia_min_keysize = AES_MIN_KEY_SIZE,
957 .cia_max_keysize = AES_MAX_KEY_SIZE,
958 .cia_setkey = aes_set_key,
959 .cia_encrypt = aes_encrypt,
960 .cia_decrypt = aes_decrypt
961 }
962 }
963}, {
964 .cra_name = "__aes-aesni",
965 .cra_driver_name = "__driver-aes-aesni",
966 .cra_priority = 0,
967 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
968 .cra_blocksize = AES_BLOCK_SIZE,
969 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
970 AESNI_ALIGN - 1,
971 .cra_alignmask = 0,
972 .cra_module = THIS_MODULE,
973 .cra_u = {
974 .cipher = {
975 .cia_min_keysize = AES_MIN_KEY_SIZE,
976 .cia_max_keysize = AES_MAX_KEY_SIZE,
977 .cia_setkey = aes_set_key,
978 .cia_encrypt = __aes_encrypt,
979 .cia_decrypt = __aes_decrypt
980 }
981 }
982}, {
983 .cra_name = "__ecb-aes-aesni",
984 .cra_driver_name = "__driver-ecb-aes-aesni",
985 .cra_priority = 0,
986 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
987 .cra_blocksize = AES_BLOCK_SIZE,
988 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
989 AESNI_ALIGN - 1,
990 .cra_alignmask = 0,
991 .cra_type = &crypto_blkcipher_type,
992 .cra_module = THIS_MODULE,
993 .cra_u = {
994 .blkcipher = {
995 .min_keysize = AES_MIN_KEY_SIZE,
996 .max_keysize = AES_MAX_KEY_SIZE,
997 .setkey = aes_set_key,
998 .encrypt = ecb_encrypt,
999 .decrypt = ecb_decrypt,
1000 },
1001 },
1002}, {
1003 .cra_name = "__cbc-aes-aesni",
1004 .cra_driver_name = "__driver-cbc-aes-aesni",
1005 .cra_priority = 0,
1006 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1007 .cra_blocksize = AES_BLOCK_SIZE,
1008 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1009 AESNI_ALIGN - 1,
1010 .cra_alignmask = 0,
1011 .cra_type = &crypto_blkcipher_type,
1012 .cra_module = THIS_MODULE,
1013 .cra_u = {
1014 .blkcipher = {
1015 .min_keysize = AES_MIN_KEY_SIZE,
1016 .max_keysize = AES_MAX_KEY_SIZE,
1017 .setkey = aes_set_key,
1018 .encrypt = cbc_encrypt,
1019 .decrypt = cbc_decrypt,
1020 },
1021 },
1022}, {
1023 .cra_name = "ecb(aes)",
1024 .cra_driver_name = "ecb-aes-aesni",
1025 .cra_priority = 400,
1026 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1027 .cra_blocksize = AES_BLOCK_SIZE,
Jussi Kivilinnaa9629d72012-06-18 14:07:08 +03001028 .cra_ctxsize = sizeof(struct async_helper_ctx),
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001029 .cra_alignmask = 0,
1030 .cra_type = &crypto_ablkcipher_type,
1031 .cra_module = THIS_MODULE,
1032 .cra_init = ablk_ecb_init,
1033 .cra_exit = ablk_exit,
1034 .cra_u = {
1035 .ablkcipher = {
1036 .min_keysize = AES_MIN_KEY_SIZE,
1037 .max_keysize = AES_MAX_KEY_SIZE,
1038 .setkey = ablk_set_key,
1039 .encrypt = ablk_encrypt,
1040 .decrypt = ablk_decrypt,
1041 },
1042 },
1043}, {
1044 .cra_name = "cbc(aes)",
1045 .cra_driver_name = "cbc-aes-aesni",
1046 .cra_priority = 400,
1047 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1048 .cra_blocksize = AES_BLOCK_SIZE,
Jussi Kivilinnaa9629d72012-06-18 14:07:08 +03001049 .cra_ctxsize = sizeof(struct async_helper_ctx),
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001050 .cra_alignmask = 0,
1051 .cra_type = &crypto_ablkcipher_type,
1052 .cra_module = THIS_MODULE,
1053 .cra_init = ablk_cbc_init,
1054 .cra_exit = ablk_exit,
1055 .cra_u = {
1056 .ablkcipher = {
1057 .min_keysize = AES_MIN_KEY_SIZE,
1058 .max_keysize = AES_MAX_KEY_SIZE,
1059 .ivsize = AES_BLOCK_SIZE,
1060 .setkey = ablk_set_key,
1061 .encrypt = ablk_encrypt,
1062 .decrypt = ablk_decrypt,
1063 },
1064 },
1065#ifdef CONFIG_X86_64
1066}, {
1067 .cra_name = "__ctr-aes-aesni",
1068 .cra_driver_name = "__driver-ctr-aes-aesni",
1069 .cra_priority = 0,
1070 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1071 .cra_blocksize = 1,
1072 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1073 AESNI_ALIGN - 1,
1074 .cra_alignmask = 0,
1075 .cra_type = &crypto_blkcipher_type,
1076 .cra_module = THIS_MODULE,
1077 .cra_u = {
1078 .blkcipher = {
1079 .min_keysize = AES_MIN_KEY_SIZE,
1080 .max_keysize = AES_MAX_KEY_SIZE,
1081 .ivsize = AES_BLOCK_SIZE,
1082 .setkey = aes_set_key,
1083 .encrypt = ctr_crypt,
1084 .decrypt = ctr_crypt,
1085 },
1086 },
1087}, {
1088 .cra_name = "ctr(aes)",
1089 .cra_driver_name = "ctr-aes-aesni",
1090 .cra_priority = 400,
1091 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1092 .cra_blocksize = 1,
Jussi Kivilinnaa9629d72012-06-18 14:07:08 +03001093 .cra_ctxsize = sizeof(struct async_helper_ctx),
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001094 .cra_alignmask = 0,
1095 .cra_type = &crypto_ablkcipher_type,
1096 .cra_module = THIS_MODULE,
1097 .cra_init = ablk_ctr_init,
1098 .cra_exit = ablk_exit,
1099 .cra_u = {
1100 .ablkcipher = {
1101 .min_keysize = AES_MIN_KEY_SIZE,
1102 .max_keysize = AES_MAX_KEY_SIZE,
1103 .ivsize = AES_BLOCK_SIZE,
1104 .setkey = ablk_set_key,
1105 .encrypt = ablk_encrypt,
1106 .decrypt = ablk_encrypt,
1107 .geniv = "chainiv",
1108 },
1109 },
1110}, {
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001111 .cra_name = "__gcm-aes-aesni",
1112 .cra_driver_name = "__driver-gcm-aes-aesni",
1113 .cra_priority = 0,
1114 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
1115 .cra_blocksize = 1,
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001116 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1117 AESNI_ALIGN,
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001118 .cra_alignmask = 0,
1119 .cra_type = &crypto_aead_type,
1120 .cra_module = THIS_MODULE,
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001121 .cra_u = {
1122 .aead = {
1123 .encrypt = __driver_rfc4106_encrypt,
1124 .decrypt = __driver_rfc4106_decrypt,
1125 },
1126 },
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001127}, {
1128 .cra_name = "rfc4106(gcm(aes))",
1129 .cra_driver_name = "rfc4106-gcm-aesni",
1130 .cra_priority = 400,
1131 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1132 .cra_blocksize = 1,
1133 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1134 AESNI_ALIGN,
1135 .cra_alignmask = 0,
1136 .cra_type = &crypto_nivaead_type,
1137 .cra_module = THIS_MODULE,
1138 .cra_init = rfc4106_init,
1139 .cra_exit = rfc4106_exit,
1140 .cra_u = {
1141 .aead = {
1142 .setkey = rfc4106_set_key,
1143 .setauthsize = rfc4106_set_authsize,
1144 .encrypt = rfc4106_encrypt,
1145 .decrypt = rfc4106_decrypt,
1146 .geniv = "seqiv",
1147 .ivsize = 8,
1148 .maxauthsize = 16,
1149 },
1150 },
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001151#endif
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001152#ifdef HAS_PCBC
1153}, {
1154 .cra_name = "pcbc(aes)",
1155 .cra_driver_name = "pcbc-aes-aesni",
1156 .cra_priority = 400,
1157 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1158 .cra_blocksize = AES_BLOCK_SIZE,
Jussi Kivilinnaa9629d72012-06-18 14:07:08 +03001159 .cra_ctxsize = sizeof(struct async_helper_ctx),
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001160 .cra_alignmask = 0,
1161 .cra_type = &crypto_ablkcipher_type,
1162 .cra_module = THIS_MODULE,
1163 .cra_init = ablk_pcbc_init,
1164 .cra_exit = ablk_exit,
1165 .cra_u = {
1166 .ablkcipher = {
1167 .min_keysize = AES_MIN_KEY_SIZE,
1168 .max_keysize = AES_MAX_KEY_SIZE,
1169 .ivsize = AES_BLOCK_SIZE,
1170 .setkey = ablk_set_key,
1171 .encrypt = ablk_encrypt,
1172 .decrypt = ablk_decrypt,
1173 },
1174 },
1175#endif
Jussi Kivilinna023af602012-07-22 18:18:37 +03001176}, {
1177 .cra_name = "__lrw-aes-aesni",
1178 .cra_driver_name = "__driver-lrw-aes-aesni",
1179 .cra_priority = 0,
1180 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1181 .cra_blocksize = AES_BLOCK_SIZE,
1182 .cra_ctxsize = sizeof(struct aesni_lrw_ctx),
1183 .cra_alignmask = 0,
1184 .cra_type = &crypto_blkcipher_type,
1185 .cra_module = THIS_MODULE,
1186 .cra_exit = lrw_aesni_exit_tfm,
1187 .cra_u = {
1188 .blkcipher = {
1189 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1190 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1191 .ivsize = AES_BLOCK_SIZE,
1192 .setkey = lrw_aesni_setkey,
1193 .encrypt = lrw_encrypt,
1194 .decrypt = lrw_decrypt,
1195 },
1196 },
1197}, {
1198 .cra_name = "__xts-aes-aesni",
1199 .cra_driver_name = "__driver-xts-aes-aesni",
1200 .cra_priority = 0,
1201 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1202 .cra_blocksize = AES_BLOCK_SIZE,
1203 .cra_ctxsize = sizeof(struct aesni_xts_ctx),
1204 .cra_alignmask = 0,
1205 .cra_type = &crypto_blkcipher_type,
1206 .cra_module = THIS_MODULE,
1207 .cra_u = {
1208 .blkcipher = {
1209 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1210 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1211 .ivsize = AES_BLOCK_SIZE,
1212 .setkey = xts_aesni_setkey,
1213 .encrypt = xts_encrypt,
1214 .decrypt = xts_decrypt,
1215 },
1216 },
1217}, {
1218 .cra_name = "lrw(aes)",
1219 .cra_driver_name = "lrw-aes-aesni",
1220 .cra_priority = 400,
1221 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1222 .cra_blocksize = AES_BLOCK_SIZE,
1223 .cra_ctxsize = sizeof(struct async_helper_ctx),
1224 .cra_alignmask = 0,
1225 .cra_type = &crypto_ablkcipher_type,
1226 .cra_module = THIS_MODULE,
1227 .cra_init = ablk_init,
1228 .cra_exit = ablk_exit,
1229 .cra_u = {
1230 .ablkcipher = {
1231 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1232 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1233 .ivsize = AES_BLOCK_SIZE,
1234 .setkey = ablk_set_key,
1235 .encrypt = ablk_encrypt,
1236 .decrypt = ablk_decrypt,
1237 },
1238 },
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001239}, {
1240 .cra_name = "xts(aes)",
1241 .cra_driver_name = "xts-aes-aesni",
1242 .cra_priority = 400,
1243 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1244 .cra_blocksize = AES_BLOCK_SIZE,
Jussi Kivilinnaa9629d72012-06-18 14:07:08 +03001245 .cra_ctxsize = sizeof(struct async_helper_ctx),
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001246 .cra_alignmask = 0,
1247 .cra_type = &crypto_ablkcipher_type,
1248 .cra_module = THIS_MODULE,
Jussi Kivilinna023af602012-07-22 18:18:37 +03001249 .cra_init = ablk_init,
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001250 .cra_exit = ablk_exit,
1251 .cra_u = {
1252 .ablkcipher = {
1253 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1254 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1255 .ivsize = AES_BLOCK_SIZE,
1256 .setkey = ablk_set_key,
1257 .encrypt = ablk_encrypt,
1258 .decrypt = ablk_decrypt,
1259 },
1260 },
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001261} };
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001262
Andi Kleen3bd391f2012-01-26 00:09:06 +01001263
1264static const struct x86_cpu_id aesni_cpu_id[] = {
1265 X86_FEATURE_MATCH(X86_FEATURE_AES),
1266 {}
1267};
1268MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1269
Huang Ying54b6a1b2009-01-18 16:28:34 +11001270static int __init aesni_init(void)
1271{
Jussi Kivilinna7af6c242012-07-11 14:20:51 +03001272 int err;
Huang Ying54b6a1b2009-01-18 16:28:34 +11001273
Andi Kleen3bd391f2012-01-26 00:09:06 +01001274 if (!x86_match_cpu(aesni_cpu_id))
Huang Ying54b6a1b2009-01-18 16:28:34 +11001275 return -ENODEV;
Tadeusz Struk0bd82f52010-11-04 15:00:45 -04001276
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001277 err = crypto_fpu_init();
1278 if (err)
1279 return err;
Huang Ying54b6a1b2009-01-18 16:28:34 +11001280
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001281 return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
Huang Ying54b6a1b2009-01-18 16:28:34 +11001282}
1283
1284static void __exit aesni_exit(void)
1285{
Jussi Kivilinnafa46ccb2012-05-11 16:00:48 +03001286 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
Andy Lutomirskib23b6452011-05-16 15:12:47 +10001287
1288 crypto_fpu_exit();
Huang Ying54b6a1b2009-01-18 16:28:34 +11001289}
1290
1291module_init(aesni_init);
1292module_exit(aesni_exit);
1293
1294MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1295MODULE_LICENSE("GPL");
1296MODULE_ALIAS("aes");