blob: 8857531915bfb0261cc35b9b1d8c70e1c4912f55 [file] [log] [blame]
Ard Biesheuvel86464852015-03-10 09:47:47 +01001/*
2 * aes-ce-glue.c - wrapper code for ARMv8 AES
3 *
4 * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <asm/hwcap.h>
12#include <asm/neon.h>
13#include <asm/hwcap.h>
14#include <crypto/aes.h>
Herbert Xuda40e7a2016-11-22 20:08:37 +080015#include <crypto/internal/simd.h>
16#include <crypto/internal/skcipher.h>
Ard Biesheuvel86464852015-03-10 09:47:47 +010017#include <linux/module.h>
Stephan Mueller49abc0d2016-02-17 07:00:01 +010018#include <crypto/xts.h>
Ard Biesheuvel86464852015-03-10 09:47:47 +010019
20MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
21MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
22MODULE_LICENSE("GPL v2");
23
24/* defined in aes-ce-core.S */
25asmlinkage u32 ce_aes_sub(u32 input);
26asmlinkage void ce_aes_invert(void *dst, void *src);
27
28asmlinkage void ce_aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
29 int rounds, int blocks);
30asmlinkage void ce_aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[],
31 int rounds, int blocks);
32
33asmlinkage void ce_aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[],
34 int rounds, int blocks, u8 iv[]);
35asmlinkage void ce_aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
36 int rounds, int blocks, u8 iv[]);
37
38asmlinkage void ce_aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
39 int rounds, int blocks, u8 ctr[]);
40
41asmlinkage void ce_aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[],
42 int rounds, int blocks, u8 iv[],
43 u8 const rk2[], int first);
44asmlinkage void ce_aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[],
45 int rounds, int blocks, u8 iv[],
46 u8 const rk2[], int first);
47
48struct aes_block {
49 u8 b[AES_BLOCK_SIZE];
50};
51
52static int num_rounds(struct crypto_aes_ctx *ctx)
53{
54 /*
55 * # of rounds specified by AES:
56 * 128 bit key 10 rounds
57 * 192 bit key 12 rounds
58 * 256 bit key 14 rounds
59 * => n byte key => 6 + (n/4) rounds
60 */
61 return 6 + ctx->key_length / 4;
62}
63
64static int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
65 unsigned int key_len)
66{
67 /*
68 * The AES key schedule round constants
69 */
70 static u8 const rcon[] = {
71 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36,
72 };
73
74 u32 kwords = key_len / sizeof(u32);
75 struct aes_block *key_enc, *key_dec;
76 int i, j;
77
78 if (key_len != AES_KEYSIZE_128 &&
79 key_len != AES_KEYSIZE_192 &&
80 key_len != AES_KEYSIZE_256)
81 return -EINVAL;
82
83 memcpy(ctx->key_enc, in_key, key_len);
84 ctx->key_length = key_len;
85
86 kernel_neon_begin();
87 for (i = 0; i < sizeof(rcon); i++) {
88 u32 *rki = ctx->key_enc + (i * kwords);
89 u32 *rko = rki + kwords;
90
Ard Biesheuvel58010fa2016-10-11 19:15:20 +010091#ifndef CONFIG_CPU_BIG_ENDIAN
Ard Biesheuvel86464852015-03-10 09:47:47 +010092 rko[0] = ror32(ce_aes_sub(rki[kwords - 1]), 8);
93 rko[0] = rko[0] ^ rki[0] ^ rcon[i];
Ard Biesheuvel58010fa2016-10-11 19:15:20 +010094#else
95 rko[0] = rol32(ce_aes_sub(rki[kwords - 1]), 8);
96 rko[0] = rko[0] ^ rki[0] ^ (rcon[i] << 24);
97#endif
Ard Biesheuvel86464852015-03-10 09:47:47 +010098 rko[1] = rko[0] ^ rki[1];
99 rko[2] = rko[1] ^ rki[2];
100 rko[3] = rko[2] ^ rki[3];
101
102 if (key_len == AES_KEYSIZE_192) {
103 if (i >= 7)
104 break;
105 rko[4] = rko[3] ^ rki[4];
106 rko[5] = rko[4] ^ rki[5];
107 } else if (key_len == AES_KEYSIZE_256) {
108 if (i >= 6)
109 break;
110 rko[4] = ce_aes_sub(rko[3]) ^ rki[4];
111 rko[5] = rko[4] ^ rki[5];
112 rko[6] = rko[5] ^ rki[6];
113 rko[7] = rko[6] ^ rki[7];
114 }
115 }
116
117 /*
118 * Generate the decryption keys for the Equivalent Inverse Cipher.
119 * This involves reversing the order of the round keys, and applying
120 * the Inverse Mix Columns transformation on all but the first and
121 * the last one.
122 */
123 key_enc = (struct aes_block *)ctx->key_enc;
124 key_dec = (struct aes_block *)ctx->key_dec;
125 j = num_rounds(ctx);
126
127 key_dec[0] = key_enc[j];
128 for (i = 1, j--; j > 0; i++, j--)
129 ce_aes_invert(key_dec + i, key_enc + j);
130 key_dec[i] = key_enc[0];
131
132 kernel_neon_end();
133 return 0;
134}
135
Herbert Xuda40e7a2016-11-22 20:08:37 +0800136static int ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
Ard Biesheuvel86464852015-03-10 09:47:47 +0100137 unsigned int key_len)
138{
Herbert Xuda40e7a2016-11-22 20:08:37 +0800139 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
Ard Biesheuvel86464852015-03-10 09:47:47 +0100140 int ret;
141
142 ret = ce_aes_expandkey(ctx, in_key, key_len);
143 if (!ret)
144 return 0;
145
Herbert Xuda40e7a2016-11-22 20:08:37 +0800146 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
Ard Biesheuvel86464852015-03-10 09:47:47 +0100147 return -EINVAL;
148}
149
150struct crypto_aes_xts_ctx {
151 struct crypto_aes_ctx key1;
152 struct crypto_aes_ctx __aligned(8) key2;
153};
154
Herbert Xuda40e7a2016-11-22 20:08:37 +0800155static int xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
Ard Biesheuvel86464852015-03-10 09:47:47 +0100156 unsigned int key_len)
157{
Herbert Xuda40e7a2016-11-22 20:08:37 +0800158 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
Ard Biesheuvel86464852015-03-10 09:47:47 +0100159 int ret;
160
Herbert Xuda40e7a2016-11-22 20:08:37 +0800161 ret = xts_verify_key(tfm, in_key, key_len);
Stephan Mueller28856a92016-02-09 15:37:47 +0100162 if (ret)
163 return ret;
164
Ard Biesheuvel86464852015-03-10 09:47:47 +0100165 ret = ce_aes_expandkey(&ctx->key1, in_key, key_len / 2);
166 if (!ret)
167 ret = ce_aes_expandkey(&ctx->key2, &in_key[key_len / 2],
168 key_len / 2);
169 if (!ret)
170 return 0;
171
Herbert Xuda40e7a2016-11-22 20:08:37 +0800172 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
Ard Biesheuvel86464852015-03-10 09:47:47 +0100173 return -EINVAL;
174}
175
Herbert Xuda40e7a2016-11-22 20:08:37 +0800176static int ecb_encrypt(struct skcipher_request *req)
Ard Biesheuvel86464852015-03-10 09:47:47 +0100177{
Herbert Xuda40e7a2016-11-22 20:08:37 +0800178 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
179 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
180 struct skcipher_walk walk;
Ard Biesheuvel86464852015-03-10 09:47:47 +0100181 unsigned int blocks;
182 int err;
183
Herbert Xuda40e7a2016-11-22 20:08:37 +0800184 err = skcipher_walk_virt(&walk, req, true);
Ard Biesheuvel86464852015-03-10 09:47:47 +0100185
186 kernel_neon_begin();
187 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
188 ce_aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
189 (u8 *)ctx->key_enc, num_rounds(ctx), blocks);
Herbert Xuda40e7a2016-11-22 20:08:37 +0800190 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
Ard Biesheuvel86464852015-03-10 09:47:47 +0100191 }
192 kernel_neon_end();
193 return err;
194}
195
Herbert Xuda40e7a2016-11-22 20:08:37 +0800196static int ecb_decrypt(struct skcipher_request *req)
Ard Biesheuvel86464852015-03-10 09:47:47 +0100197{
Herbert Xuda40e7a2016-11-22 20:08:37 +0800198 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
199 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
200 struct skcipher_walk walk;
Ard Biesheuvel86464852015-03-10 09:47:47 +0100201 unsigned int blocks;
202 int err;
203
Herbert Xuda40e7a2016-11-22 20:08:37 +0800204 err = skcipher_walk_virt(&walk, req, true);
Ard Biesheuvel86464852015-03-10 09:47:47 +0100205
206 kernel_neon_begin();
207 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
208 ce_aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
209 (u8 *)ctx->key_dec, num_rounds(ctx), blocks);
Herbert Xuda40e7a2016-11-22 20:08:37 +0800210 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
Ard Biesheuvel86464852015-03-10 09:47:47 +0100211 }
212 kernel_neon_end();
213 return err;
214}
215
Herbert Xuda40e7a2016-11-22 20:08:37 +0800216static int cbc_encrypt(struct skcipher_request *req)
Ard Biesheuvel86464852015-03-10 09:47:47 +0100217{
Herbert Xuda40e7a2016-11-22 20:08:37 +0800218 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
219 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
220 struct skcipher_walk walk;
Ard Biesheuvel86464852015-03-10 09:47:47 +0100221 unsigned int blocks;
222 int err;
223
Herbert Xuda40e7a2016-11-22 20:08:37 +0800224 err = skcipher_walk_virt(&walk, req, true);
Ard Biesheuvel86464852015-03-10 09:47:47 +0100225
226 kernel_neon_begin();
227 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
228 ce_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
229 (u8 *)ctx->key_enc, num_rounds(ctx), blocks,
230 walk.iv);
Herbert Xuda40e7a2016-11-22 20:08:37 +0800231 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
Ard Biesheuvel86464852015-03-10 09:47:47 +0100232 }
233 kernel_neon_end();
234 return err;
235}
236
Herbert Xuda40e7a2016-11-22 20:08:37 +0800237static int cbc_decrypt(struct skcipher_request *req)
Ard Biesheuvel86464852015-03-10 09:47:47 +0100238{
Herbert Xuda40e7a2016-11-22 20:08:37 +0800239 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
240 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
241 struct skcipher_walk walk;
Ard Biesheuvel86464852015-03-10 09:47:47 +0100242 unsigned int blocks;
243 int err;
244
Herbert Xuda40e7a2016-11-22 20:08:37 +0800245 err = skcipher_walk_virt(&walk, req, true);
Ard Biesheuvel86464852015-03-10 09:47:47 +0100246
247 kernel_neon_begin();
248 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
249 ce_aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
250 (u8 *)ctx->key_dec, num_rounds(ctx), blocks,
251 walk.iv);
Herbert Xuda40e7a2016-11-22 20:08:37 +0800252 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
Ard Biesheuvel86464852015-03-10 09:47:47 +0100253 }
254 kernel_neon_end();
255 return err;
256}
257
Herbert Xuda40e7a2016-11-22 20:08:37 +0800258static int ctr_encrypt(struct skcipher_request *req)
Ard Biesheuvel86464852015-03-10 09:47:47 +0100259{
Herbert Xuda40e7a2016-11-22 20:08:37 +0800260 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
261 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
262 struct skcipher_walk walk;
Ard Biesheuvel86464852015-03-10 09:47:47 +0100263 int err, blocks;
264
Herbert Xuda40e7a2016-11-22 20:08:37 +0800265 err = skcipher_walk_virt(&walk, req, true);
Ard Biesheuvel86464852015-03-10 09:47:47 +0100266
267 kernel_neon_begin();
268 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
269 ce_aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
270 (u8 *)ctx->key_enc, num_rounds(ctx), blocks,
271 walk.iv);
Herbert Xuda40e7a2016-11-22 20:08:37 +0800272 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
Ard Biesheuvel86464852015-03-10 09:47:47 +0100273 }
Herbert Xuda40e7a2016-11-22 20:08:37 +0800274 if (walk.nbytes) {
Ard Biesheuvel86464852015-03-10 09:47:47 +0100275 u8 __aligned(8) tail[AES_BLOCK_SIZE];
Herbert Xuda40e7a2016-11-22 20:08:37 +0800276 unsigned int nbytes = walk.nbytes;
277 u8 *tdst = walk.dst.virt.addr;
278 u8 *tsrc = walk.src.virt.addr;
Ard Biesheuvel86464852015-03-10 09:47:47 +0100279
280 /*
281 * Minimum alignment is 8 bytes, so if nbytes is <= 8, we need
282 * to tell aes_ctr_encrypt() to only read half a block.
283 */
284 blocks = (nbytes <= 8) ? -1 : 1;
285
286 ce_aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc,
287 num_rounds(ctx), blocks, walk.iv);
288 memcpy(tdst, tail, nbytes);
Herbert Xuda40e7a2016-11-22 20:08:37 +0800289 err = skcipher_walk_done(&walk, 0);
Ard Biesheuvel86464852015-03-10 09:47:47 +0100290 }
291 kernel_neon_end();
292
293 return err;
294}
295
Herbert Xuda40e7a2016-11-22 20:08:37 +0800296static int xts_encrypt(struct skcipher_request *req)
Ard Biesheuvel86464852015-03-10 09:47:47 +0100297{
Herbert Xuda40e7a2016-11-22 20:08:37 +0800298 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
299 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
Ard Biesheuvel86464852015-03-10 09:47:47 +0100300 int err, first, rounds = num_rounds(&ctx->key1);
Herbert Xuda40e7a2016-11-22 20:08:37 +0800301 struct skcipher_walk walk;
Ard Biesheuvel86464852015-03-10 09:47:47 +0100302 unsigned int blocks;
303
Herbert Xuda40e7a2016-11-22 20:08:37 +0800304 err = skcipher_walk_virt(&walk, req, true);
Ard Biesheuvel86464852015-03-10 09:47:47 +0100305
306 kernel_neon_begin();
307 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
308 ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
309 (u8 *)ctx->key1.key_enc, rounds, blocks,
310 walk.iv, (u8 *)ctx->key2.key_enc, first);
Herbert Xuda40e7a2016-11-22 20:08:37 +0800311 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
Ard Biesheuvel86464852015-03-10 09:47:47 +0100312 }
313 kernel_neon_end();
314
315 return err;
316}
317
Herbert Xuda40e7a2016-11-22 20:08:37 +0800318static int xts_decrypt(struct skcipher_request *req)
Ard Biesheuvel86464852015-03-10 09:47:47 +0100319{
Herbert Xuda40e7a2016-11-22 20:08:37 +0800320 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
321 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
Ard Biesheuvel86464852015-03-10 09:47:47 +0100322 int err, first, rounds = num_rounds(&ctx->key1);
Herbert Xuda40e7a2016-11-22 20:08:37 +0800323 struct skcipher_walk walk;
Ard Biesheuvel86464852015-03-10 09:47:47 +0100324 unsigned int blocks;
325
Herbert Xuda40e7a2016-11-22 20:08:37 +0800326 err = skcipher_walk_virt(&walk, req, true);
Ard Biesheuvel86464852015-03-10 09:47:47 +0100327
328 kernel_neon_begin();
329 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
330 ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
331 (u8 *)ctx->key1.key_dec, rounds, blocks,
332 walk.iv, (u8 *)ctx->key2.key_enc, first);
Herbert Xuda40e7a2016-11-22 20:08:37 +0800333 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
Ard Biesheuvel86464852015-03-10 09:47:47 +0100334 }
335 kernel_neon_end();
336
337 return err;
338}
339
Herbert Xuda40e7a2016-11-22 20:08:37 +0800340static struct skcipher_alg aes_algs[] = { {
341 .base = {
342 .cra_name = "__ecb(aes)",
343 .cra_driver_name = "__ecb-aes-ce",
344 .cra_priority = 300,
345 .cra_flags = CRYPTO_ALG_INTERNAL,
346 .cra_blocksize = AES_BLOCK_SIZE,
347 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
348 .cra_alignmask = 7,
349 .cra_module = THIS_MODULE,
Ard Biesheuvel86464852015-03-10 09:47:47 +0100350 },
Herbert Xuda40e7a2016-11-22 20:08:37 +0800351 .min_keysize = AES_MIN_KEY_SIZE,
352 .max_keysize = AES_MAX_KEY_SIZE,
353 .setkey = ce_aes_setkey,
354 .encrypt = ecb_encrypt,
355 .decrypt = ecb_decrypt,
Ard Biesheuvel86464852015-03-10 09:47:47 +0100356}, {
Herbert Xuda40e7a2016-11-22 20:08:37 +0800357 .base = {
358 .cra_name = "__cbc(aes)",
359 .cra_driver_name = "__cbc-aes-ce",
360 .cra_priority = 300,
361 .cra_flags = CRYPTO_ALG_INTERNAL,
362 .cra_blocksize = AES_BLOCK_SIZE,
363 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
364 .cra_alignmask = 7,
365 .cra_module = THIS_MODULE,
Ard Biesheuvel86464852015-03-10 09:47:47 +0100366 },
Herbert Xuda40e7a2016-11-22 20:08:37 +0800367 .min_keysize = AES_MIN_KEY_SIZE,
368 .max_keysize = AES_MAX_KEY_SIZE,
369 .ivsize = AES_BLOCK_SIZE,
370 .setkey = ce_aes_setkey,
371 .encrypt = cbc_encrypt,
372 .decrypt = cbc_decrypt,
Ard Biesheuvel86464852015-03-10 09:47:47 +0100373}, {
Herbert Xuda40e7a2016-11-22 20:08:37 +0800374 .base = {
375 .cra_name = "__ctr(aes)",
376 .cra_driver_name = "__ctr-aes-ce",
377 .cra_priority = 300,
378 .cra_flags = CRYPTO_ALG_INTERNAL,
379 .cra_blocksize = 1,
380 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
381 .cra_alignmask = 7,
382 .cra_module = THIS_MODULE,
Ard Biesheuvel86464852015-03-10 09:47:47 +0100383 },
Herbert Xuda40e7a2016-11-22 20:08:37 +0800384 .min_keysize = AES_MIN_KEY_SIZE,
385 .max_keysize = AES_MAX_KEY_SIZE,
386 .ivsize = AES_BLOCK_SIZE,
387 .chunksize = AES_BLOCK_SIZE,
388 .setkey = ce_aes_setkey,
389 .encrypt = ctr_encrypt,
390 .decrypt = ctr_encrypt,
Ard Biesheuvel86464852015-03-10 09:47:47 +0100391}, {
Herbert Xuda40e7a2016-11-22 20:08:37 +0800392 .base = {
393 .cra_name = "__xts(aes)",
394 .cra_driver_name = "__xts-aes-ce",
395 .cra_priority = 300,
396 .cra_flags = CRYPTO_ALG_INTERNAL,
397 .cra_blocksize = AES_BLOCK_SIZE,
398 .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
399 .cra_alignmask = 7,
400 .cra_module = THIS_MODULE,
Ard Biesheuvel86464852015-03-10 09:47:47 +0100401 },
Herbert Xuda40e7a2016-11-22 20:08:37 +0800402 .min_keysize = 2 * AES_MIN_KEY_SIZE,
403 .max_keysize = 2 * AES_MAX_KEY_SIZE,
404 .ivsize = AES_BLOCK_SIZE,
405 .setkey = xts_set_key,
406 .encrypt = xts_encrypt,
407 .decrypt = xts_decrypt,
Ard Biesheuvel86464852015-03-10 09:47:47 +0100408} };
409
Herbert Xuefad2b62016-12-01 13:45:05 +0800410static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
Herbert Xuda40e7a2016-11-22 20:08:37 +0800411
412static void aes_exit(void)
413{
414 int i;
415
416 for (i = 0; i < ARRAY_SIZE(aes_simd_algs) && aes_simd_algs[i]; i++)
417 simd_skcipher_free(aes_simd_algs[i]);
418
419 crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
420}
421
Ard Biesheuvel86464852015-03-10 09:47:47 +0100422static int __init aes_init(void)
423{
Herbert Xuda40e7a2016-11-22 20:08:37 +0800424 struct simd_skcipher_alg *simd;
425 const char *basename;
426 const char *algname;
427 const char *drvname;
428 int err;
429 int i;
430
Ard Biesheuvel86464852015-03-10 09:47:47 +0100431 if (!(elf_hwcap2 & HWCAP2_AES))
432 return -ENODEV;
Ard Biesheuvel86464852015-03-10 09:47:47 +0100433
Herbert Xuda40e7a2016-11-22 20:08:37 +0800434 err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
435 if (err)
436 return err;
437
438 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
439 algname = aes_algs[i].base.cra_name + 2;
440 drvname = aes_algs[i].base.cra_driver_name + 2;
441 basename = aes_algs[i].base.cra_driver_name;
442 simd = simd_skcipher_create_compat(algname, drvname, basename);
443 err = PTR_ERR(simd);
444 if (IS_ERR(simd))
445 goto unregister_simds;
446
447 aes_simd_algs[i] = simd;
448 }
449
450 return 0;
451
452unregister_simds:
453 aes_exit();
454 return err;
Ard Biesheuvel86464852015-03-10 09:47:47 +0100455}
456
457module_init(aes_init);
458module_exit(aes_exit);