blob: df922f52d76dd6f5173531a1e48b010003c68f36 [file] [log] [blame]
David S. Miller9bf4852d2012-08-21 03:58:13 -07001/* Glue code for AES encryption optimized for sparc64 crypto opcodes.
2 *
3 * This is based largely upon arch/x86/crypto/aesni-intel_glue.c
4 *
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
7 *
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
15 */
16
David S. Miller71741682012-09-15 09:17:10 -070017#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
David S. Miller9bf4852d2012-08-21 03:58:13 -070019#include <linux/crypto.h>
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/mm.h>
23#include <linux/types.h>
24#include <crypto/algapi.h>
25#include <crypto/aes.h>
26
27#include <asm/fpumacro.h>
28#include <asm/pstate.h>
29#include <asm/elf.h>
30
David S. Miller10803622012-09-15 09:06:30 -070031#include "opcodes.h"
32
David S. Miller0bdcaf72012-08-29 12:50:16 -070033struct aes_ops {
34 void (*encrypt)(const u64 *key, const u32 *input, u32 *output);
35 void (*decrypt)(const u64 *key, const u32 *input, u32 *output);
36 void (*load_encrypt_keys)(const u64 *key);
37 void (*load_decrypt_keys)(const u64 *key);
38 void (*ecb_encrypt)(const u64 *key, const u64 *input, u64 *output,
39 unsigned int len);
40 void (*ecb_decrypt)(const u64 *key, const u64 *input, u64 *output,
41 unsigned int len);
42 void (*cbc_encrypt)(const u64 *key, const u64 *input, u64 *output,
43 unsigned int len, u64 *iv);
44 void (*cbc_decrypt)(const u64 *key, const u64 *input, u64 *output,
45 unsigned int len, u64 *iv);
David S. Miller9fd130e2012-08-29 14:49:23 -070046 void (*ctr_crypt)(const u64 *key, const u64 *input, u64 *output,
47 unsigned int len, u64 *iv);
David S. Miller0bdcaf72012-08-29 12:50:16 -070048};
49
David S. Miller9bf4852d2012-08-21 03:58:13 -070050struct crypto_sparc64_aes_ctx {
David S. Miller0bdcaf72012-08-29 12:50:16 -070051 struct aes_ops *ops;
David S. Miller9bf4852d2012-08-21 03:58:13 -070052 u64 key[AES_MAX_KEYLENGTH / sizeof(u64)];
53 u32 key_length;
54 u32 expanded_key_length;
55};
56
David S. Miller0bdcaf72012-08-29 12:50:16 -070057extern void aes_sparc64_encrypt_128(const u64 *key, const u32 *input,
58 u32 *output);
59extern void aes_sparc64_encrypt_192(const u64 *key, const u32 *input,
60 u32 *output);
61extern void aes_sparc64_encrypt_256(const u64 *key, const u32 *input,
62 u32 *output);
63
64extern void aes_sparc64_decrypt_128(const u64 *key, const u32 *input,
65 u32 *output);
66extern void aes_sparc64_decrypt_192(const u64 *key, const u32 *input,
67 u32 *output);
68extern void aes_sparc64_decrypt_256(const u64 *key, const u32 *input,
69 u32 *output);
70
71extern void aes_sparc64_load_encrypt_keys_128(const u64 *key);
72extern void aes_sparc64_load_encrypt_keys_192(const u64 *key);
73extern void aes_sparc64_load_encrypt_keys_256(const u64 *key);
74
75extern void aes_sparc64_load_decrypt_keys_128(const u64 *key);
76extern void aes_sparc64_load_decrypt_keys_192(const u64 *key);
77extern void aes_sparc64_load_decrypt_keys_256(const u64 *key);
78
79extern void aes_sparc64_ecb_encrypt_128(const u64 *key, const u64 *input,
80 u64 *output, unsigned int len);
81extern void aes_sparc64_ecb_encrypt_192(const u64 *key, const u64 *input,
82 u64 *output, unsigned int len);
83extern void aes_sparc64_ecb_encrypt_256(const u64 *key, const u64 *input,
84 u64 *output, unsigned int len);
85
86extern void aes_sparc64_ecb_decrypt_128(const u64 *key, const u64 *input,
87 u64 *output, unsigned int len);
88extern void aes_sparc64_ecb_decrypt_192(const u64 *key, const u64 *input,
89 u64 *output, unsigned int len);
90extern void aes_sparc64_ecb_decrypt_256(const u64 *key, const u64 *input,
91 u64 *output, unsigned int len);
92
93extern void aes_sparc64_cbc_encrypt_128(const u64 *key, const u64 *input,
94 u64 *output, unsigned int len,
95 u64 *iv);
96
97extern void aes_sparc64_cbc_encrypt_192(const u64 *key, const u64 *input,
98 u64 *output, unsigned int len,
99 u64 *iv);
100
101extern void aes_sparc64_cbc_encrypt_256(const u64 *key, const u64 *input,
102 u64 *output, unsigned int len,
103 u64 *iv);
104
105extern void aes_sparc64_cbc_decrypt_128(const u64 *key, const u64 *input,
106 u64 *output, unsigned int len,
107 u64 *iv);
108
109extern void aes_sparc64_cbc_decrypt_192(const u64 *key, const u64 *input,
110 u64 *output, unsigned int len,
111 u64 *iv);
112
113extern void aes_sparc64_cbc_decrypt_256(const u64 *key, const u64 *input,
114 u64 *output, unsigned int len,
115 u64 *iv);
116
David S. Miller9fd130e2012-08-29 14:49:23 -0700117extern void aes_sparc64_ctr_crypt_128(const u64 *key, const u64 *input,
118 u64 *output, unsigned int len,
119 u64 *iv);
120extern void aes_sparc64_ctr_crypt_192(const u64 *key, const u64 *input,
121 u64 *output, unsigned int len,
122 u64 *iv);
123extern void aes_sparc64_ctr_crypt_256(const u64 *key, const u64 *input,
124 u64 *output, unsigned int len,
125 u64 *iv);
126
Sam Ravnborg756382c2014-05-16 23:26:06 +0200127static struct aes_ops aes128_ops = {
David S. Miller0bdcaf72012-08-29 12:50:16 -0700128 .encrypt = aes_sparc64_encrypt_128,
129 .decrypt = aes_sparc64_decrypt_128,
130 .load_encrypt_keys = aes_sparc64_load_encrypt_keys_128,
131 .load_decrypt_keys = aes_sparc64_load_decrypt_keys_128,
132 .ecb_encrypt = aes_sparc64_ecb_encrypt_128,
133 .ecb_decrypt = aes_sparc64_ecb_decrypt_128,
134 .cbc_encrypt = aes_sparc64_cbc_encrypt_128,
135 .cbc_decrypt = aes_sparc64_cbc_decrypt_128,
David S. Miller9fd130e2012-08-29 14:49:23 -0700136 .ctr_crypt = aes_sparc64_ctr_crypt_128,
David S. Miller0bdcaf72012-08-29 12:50:16 -0700137};
138
Sam Ravnborg756382c2014-05-16 23:26:06 +0200139static struct aes_ops aes192_ops = {
David S. Miller0bdcaf72012-08-29 12:50:16 -0700140 .encrypt = aes_sparc64_encrypt_192,
141 .decrypt = aes_sparc64_decrypt_192,
142 .load_encrypt_keys = aes_sparc64_load_encrypt_keys_192,
143 .load_decrypt_keys = aes_sparc64_load_decrypt_keys_192,
144 .ecb_encrypt = aes_sparc64_ecb_encrypt_192,
145 .ecb_decrypt = aes_sparc64_ecb_decrypt_192,
146 .cbc_encrypt = aes_sparc64_cbc_encrypt_192,
147 .cbc_decrypt = aes_sparc64_cbc_decrypt_192,
David S. Miller9fd130e2012-08-29 14:49:23 -0700148 .ctr_crypt = aes_sparc64_ctr_crypt_192,
David S. Miller0bdcaf72012-08-29 12:50:16 -0700149};
150
Sam Ravnborg756382c2014-05-16 23:26:06 +0200151static struct aes_ops aes256_ops = {
David S. Miller0bdcaf72012-08-29 12:50:16 -0700152 .encrypt = aes_sparc64_encrypt_256,
153 .decrypt = aes_sparc64_decrypt_256,
154 .load_encrypt_keys = aes_sparc64_load_encrypt_keys_256,
155 .load_decrypt_keys = aes_sparc64_load_decrypt_keys_256,
156 .ecb_encrypt = aes_sparc64_ecb_encrypt_256,
157 .ecb_decrypt = aes_sparc64_ecb_decrypt_256,
158 .cbc_encrypt = aes_sparc64_cbc_encrypt_256,
159 .cbc_decrypt = aes_sparc64_cbc_decrypt_256,
David S. Miller9fd130e2012-08-29 14:49:23 -0700160 .ctr_crypt = aes_sparc64_ctr_crypt_256,
David S. Miller0bdcaf72012-08-29 12:50:16 -0700161};
162
David S. Miller9bf4852d2012-08-21 03:58:13 -0700163extern void aes_sparc64_key_expand(const u32 *in_key, u64 *output_key,
164 unsigned int key_len);
165
166static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
167 unsigned int key_len)
168{
169 struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm);
170 u32 *flags = &tfm->crt_flags;
171
172 switch (key_len) {
173 case AES_KEYSIZE_128:
174 ctx->expanded_key_length = 0xb0;
David S. Miller0bdcaf72012-08-29 12:50:16 -0700175 ctx->ops = &aes128_ops;
David S. Miller9bf4852d2012-08-21 03:58:13 -0700176 break;
177
178 case AES_KEYSIZE_192:
179 ctx->expanded_key_length = 0xd0;
David S. Miller0bdcaf72012-08-29 12:50:16 -0700180 ctx->ops = &aes192_ops;
David S. Miller9bf4852d2012-08-21 03:58:13 -0700181 break;
182
183 case AES_KEYSIZE_256:
184 ctx->expanded_key_length = 0xf0;
David S. Miller0bdcaf72012-08-29 12:50:16 -0700185 ctx->ops = &aes256_ops;
David S. Miller9bf4852d2012-08-21 03:58:13 -0700186 break;
187
188 default:
189 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
190 return -EINVAL;
191 }
192
193 aes_sparc64_key_expand((const u32 *)in_key, &ctx->key[0], key_len);
194 ctx->key_length = key_len;
195
196 return 0;
197}
198
David S. Miller9bf4852d2012-08-21 03:58:13 -0700199static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
200{
201 struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm);
202
David S. Miller0bdcaf72012-08-29 12:50:16 -0700203 ctx->ops->encrypt(&ctx->key[0], (const u32 *) src, (u32 *) dst);
David S. Miller9bf4852d2012-08-21 03:58:13 -0700204}
205
David S. Miller9bf4852d2012-08-21 03:58:13 -0700206static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
207{
208 struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm);
209
David S. Miller0bdcaf72012-08-29 12:50:16 -0700210 ctx->ops->decrypt(&ctx->key[0], (const u32 *) src, (u32 *) dst);
David S. Miller9bf4852d2012-08-21 03:58:13 -0700211}
212
David S. Miller9bf4852d2012-08-21 03:58:13 -0700213#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
214
David S. Miller9bf4852d2012-08-21 03:58:13 -0700215static int ecb_encrypt(struct blkcipher_desc *desc,
216 struct scatterlist *dst, struct scatterlist *src,
217 unsigned int nbytes)
218{
219 struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
220 struct blkcipher_walk walk;
221 int err;
222
223 blkcipher_walk_init(&walk, dst, src, nbytes);
224 err = blkcipher_walk_virt(desc, &walk);
David S. Millerb35d2822012-12-19 15:22:03 -0800225 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
David S. Miller9bf4852d2012-08-21 03:58:13 -0700226
David S. Miller0bdcaf72012-08-29 12:50:16 -0700227 ctx->ops->load_encrypt_keys(&ctx->key[0]);
David S. Miller9bf4852d2012-08-21 03:58:13 -0700228 while ((nbytes = walk.nbytes)) {
229 unsigned int block_len = nbytes & AES_BLOCK_MASK;
230
231 if (likely(block_len)) {
David S. Miller0bdcaf72012-08-29 12:50:16 -0700232 ctx->ops->ecb_encrypt(&ctx->key[0],
233 (const u64 *)walk.src.virt.addr,
234 (u64 *) walk.dst.virt.addr,
235 block_len);
David S. Miller9bf4852d2012-08-21 03:58:13 -0700236 }
237 nbytes &= AES_BLOCK_SIZE - 1;
238 err = blkcipher_walk_done(desc, &walk, nbytes);
239 }
240 fprs_write(0);
241 return err;
242}
243
David S. Miller9bf4852d2012-08-21 03:58:13 -0700244static int ecb_decrypt(struct blkcipher_desc *desc,
245 struct scatterlist *dst, struct scatterlist *src,
246 unsigned int nbytes)
247{
248 struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
249 struct blkcipher_walk walk;
250 u64 *key_end;
251 int err;
252
253 blkcipher_walk_init(&walk, dst, src, nbytes);
254 err = blkcipher_walk_virt(desc, &walk);
David S. Millerb35d2822012-12-19 15:22:03 -0800255 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
David S. Miller9bf4852d2012-08-21 03:58:13 -0700256
David S. Miller0bdcaf72012-08-29 12:50:16 -0700257 ctx->ops->load_decrypt_keys(&ctx->key[0]);
David S. Miller9bf4852d2012-08-21 03:58:13 -0700258 key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)];
259 while ((nbytes = walk.nbytes)) {
260 unsigned int block_len = nbytes & AES_BLOCK_MASK;
261
David S. Miller0bdcaf72012-08-29 12:50:16 -0700262 if (likely(block_len)) {
263 ctx->ops->ecb_decrypt(key_end,
264 (const u64 *) walk.src.virt.addr,
265 (u64 *) walk.dst.virt.addr, block_len);
266 }
David S. Miller9bf4852d2012-08-21 03:58:13 -0700267 nbytes &= AES_BLOCK_SIZE - 1;
268 err = blkcipher_walk_done(desc, &walk, nbytes);
269 }
270 fprs_write(0);
271
272 return err;
273}
274
David S. Miller9bf4852d2012-08-21 03:58:13 -0700275static int cbc_encrypt(struct blkcipher_desc *desc,
276 struct scatterlist *dst, struct scatterlist *src,
277 unsigned int nbytes)
278{
279 struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
280 struct blkcipher_walk walk;
281 int err;
282
283 blkcipher_walk_init(&walk, dst, src, nbytes);
284 err = blkcipher_walk_virt(desc, &walk);
David S. Millerb35d2822012-12-19 15:22:03 -0800285 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
David S. Miller9bf4852d2012-08-21 03:58:13 -0700286
David S. Miller0bdcaf72012-08-29 12:50:16 -0700287 ctx->ops->load_encrypt_keys(&ctx->key[0]);
David S. Miller9bf4852d2012-08-21 03:58:13 -0700288 while ((nbytes = walk.nbytes)) {
289 unsigned int block_len = nbytes & AES_BLOCK_MASK;
290
291 if (likely(block_len)) {
David S. Miller0bdcaf72012-08-29 12:50:16 -0700292 ctx->ops->cbc_encrypt(&ctx->key[0],
293 (const u64 *)walk.src.virt.addr,
294 (u64 *) walk.dst.virt.addr,
295 block_len, (u64 *) walk.iv);
David S. Miller9bf4852d2012-08-21 03:58:13 -0700296 }
297 nbytes &= AES_BLOCK_SIZE - 1;
298 err = blkcipher_walk_done(desc, &walk, nbytes);
299 }
300 fprs_write(0);
301 return err;
302}
303
David S. Miller9bf4852d2012-08-21 03:58:13 -0700304static int cbc_decrypt(struct blkcipher_desc *desc,
305 struct scatterlist *dst, struct scatterlist *src,
306 unsigned int nbytes)
307{
308 struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
309 struct blkcipher_walk walk;
310 u64 *key_end;
311 int err;
312
313 blkcipher_walk_init(&walk, dst, src, nbytes);
314 err = blkcipher_walk_virt(desc, &walk);
David S. Millerb35d2822012-12-19 15:22:03 -0800315 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
David S. Miller9bf4852d2012-08-21 03:58:13 -0700316
David S. Miller0bdcaf72012-08-29 12:50:16 -0700317 ctx->ops->load_decrypt_keys(&ctx->key[0]);
David S. Miller9bf4852d2012-08-21 03:58:13 -0700318 key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)];
319 while ((nbytes = walk.nbytes)) {
320 unsigned int block_len = nbytes & AES_BLOCK_MASK;
321
David S. Miller0bdcaf72012-08-29 12:50:16 -0700322 if (likely(block_len)) {
323 ctx->ops->cbc_decrypt(key_end,
324 (const u64 *) walk.src.virt.addr,
325 (u64 *) walk.dst.virt.addr,
326 block_len, (u64 *) walk.iv);
327 }
David S. Miller9bf4852d2012-08-21 03:58:13 -0700328 nbytes &= AES_BLOCK_SIZE - 1;
329 err = blkcipher_walk_done(desc, &walk, nbytes);
330 }
331 fprs_write(0);
332
333 return err;
334}
335
David S. Millera8d97ce2012-12-19 15:20:23 -0800336static void ctr_crypt_final(struct crypto_sparc64_aes_ctx *ctx,
337 struct blkcipher_walk *walk)
338{
339 u8 *ctrblk = walk->iv;
340 u64 keystream[AES_BLOCK_SIZE / sizeof(u64)];
341 u8 *src = walk->src.virt.addr;
342 u8 *dst = walk->dst.virt.addr;
343 unsigned int nbytes = walk->nbytes;
344
345 ctx->ops->ecb_encrypt(&ctx->key[0], (const u64 *)ctrblk,
346 keystream, AES_BLOCK_SIZE);
347 crypto_xor((u8 *) keystream, src, nbytes);
348 memcpy(dst, keystream, nbytes);
349 crypto_inc(ctrblk, AES_BLOCK_SIZE);
350}
351
David S. Miller9fd130e2012-08-29 14:49:23 -0700352static int ctr_crypt(struct blkcipher_desc *desc,
353 struct scatterlist *dst, struct scatterlist *src,
354 unsigned int nbytes)
355{
356 struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
357 struct blkcipher_walk walk;
358 int err;
359
360 blkcipher_walk_init(&walk, dst, src, nbytes);
David S. Millera8d97ce2012-12-19 15:20:23 -0800361 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
362 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
David S. Miller9fd130e2012-08-29 14:49:23 -0700363
364 ctx->ops->load_encrypt_keys(&ctx->key[0]);
David S. Millera8d97ce2012-12-19 15:20:23 -0800365 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
David S. Miller9fd130e2012-08-29 14:49:23 -0700366 unsigned int block_len = nbytes & AES_BLOCK_MASK;
367
368 if (likely(block_len)) {
369 ctx->ops->ctr_crypt(&ctx->key[0],
370 (const u64 *)walk.src.virt.addr,
371 (u64 *) walk.dst.virt.addr,
372 block_len, (u64 *) walk.iv);
373 }
374 nbytes &= AES_BLOCK_SIZE - 1;
375 err = blkcipher_walk_done(desc, &walk, nbytes);
376 }
David S. Millera8d97ce2012-12-19 15:20:23 -0800377 if (walk.nbytes) {
378 ctr_crypt_final(ctx, &walk);
379 err = blkcipher_walk_done(desc, &walk, 0);
380 }
David S. Miller9fd130e2012-08-29 14:49:23 -0700381 fprs_write(0);
382 return err;
383}
384
David S. Miller9bf4852d2012-08-21 03:58:13 -0700385static struct crypto_alg algs[] = { {
386 .cra_name = "aes",
387 .cra_driver_name = "aes-sparc64",
David S. Miller10803622012-09-15 09:06:30 -0700388 .cra_priority = SPARC_CR_OPCODE_PRIORITY,
David S. Miller9bf4852d2012-08-21 03:58:13 -0700389 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
390 .cra_blocksize = AES_BLOCK_SIZE,
391 .cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx),
392 .cra_alignmask = 3,
393 .cra_module = THIS_MODULE,
394 .cra_u = {
395 .cipher = {
396 .cia_min_keysize = AES_MIN_KEY_SIZE,
397 .cia_max_keysize = AES_MAX_KEY_SIZE,
398 .cia_setkey = aes_set_key,
399 .cia_encrypt = aes_encrypt,
400 .cia_decrypt = aes_decrypt
401 }
402 }
403}, {
404 .cra_name = "ecb(aes)",
405 .cra_driver_name = "ecb-aes-sparc64",
David S. Miller10803622012-09-15 09:06:30 -0700406 .cra_priority = SPARC_CR_OPCODE_PRIORITY,
David S. Miller9bf4852d2012-08-21 03:58:13 -0700407 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
408 .cra_blocksize = AES_BLOCK_SIZE,
409 .cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx),
410 .cra_alignmask = 7,
411 .cra_type = &crypto_blkcipher_type,
412 .cra_module = THIS_MODULE,
413 .cra_u = {
414 .blkcipher = {
415 .min_keysize = AES_MIN_KEY_SIZE,
416 .max_keysize = AES_MAX_KEY_SIZE,
417 .setkey = aes_set_key,
418 .encrypt = ecb_encrypt,
419 .decrypt = ecb_decrypt,
420 },
421 },
422}, {
423 .cra_name = "cbc(aes)",
424 .cra_driver_name = "cbc-aes-sparc64",
David S. Miller10803622012-09-15 09:06:30 -0700425 .cra_priority = SPARC_CR_OPCODE_PRIORITY,
David S. Miller9bf4852d2012-08-21 03:58:13 -0700426 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
427 .cra_blocksize = AES_BLOCK_SIZE,
428 .cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx),
429 .cra_alignmask = 7,
430 .cra_type = &crypto_blkcipher_type,
431 .cra_module = THIS_MODULE,
432 .cra_u = {
433 .blkcipher = {
434 .min_keysize = AES_MIN_KEY_SIZE,
435 .max_keysize = AES_MAX_KEY_SIZE,
436 .setkey = aes_set_key,
437 .encrypt = cbc_encrypt,
438 .decrypt = cbc_decrypt,
439 },
440 },
David S. Miller9fd130e2012-08-29 14:49:23 -0700441}, {
442 .cra_name = "ctr(aes)",
443 .cra_driver_name = "ctr-aes-sparc64",
David S. Miller10803622012-09-15 09:06:30 -0700444 .cra_priority = SPARC_CR_OPCODE_PRIORITY,
David S. Miller9fd130e2012-08-29 14:49:23 -0700445 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
David S. Millera8d97ce2012-12-19 15:20:23 -0800446 .cra_blocksize = 1,
David S. Miller9fd130e2012-08-29 14:49:23 -0700447 .cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx),
448 .cra_alignmask = 7,
449 .cra_type = &crypto_blkcipher_type,
450 .cra_module = THIS_MODULE,
451 .cra_u = {
452 .blkcipher = {
453 .min_keysize = AES_MIN_KEY_SIZE,
454 .max_keysize = AES_MAX_KEY_SIZE,
455 .setkey = aes_set_key,
456 .encrypt = ctr_crypt,
457 .decrypt = ctr_crypt,
458 },
459 },
David S. Miller9bf4852d2012-08-21 03:58:13 -0700460} };
461
462static bool __init sparc64_has_aes_opcode(void)
463{
464 unsigned long cfr;
465
466 if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
467 return false;
468
469 __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
470 if (!(cfr & CFR_AES))
471 return false;
472
473 return true;
474}
475
476static int __init aes_sparc64_mod_init(void)
477{
478 int i;
479
480 for (i = 0; i < ARRAY_SIZE(algs); i++)
481 INIT_LIST_HEAD(&algs[i].cra_list);
482
483 if (sparc64_has_aes_opcode()) {
484 pr_info("Using sparc64 aes opcodes optimized AES implementation\n");
485 return crypto_register_algs(algs, ARRAY_SIZE(algs));
486 }
487 pr_info("sparc64 aes opcodes not available.\n");
488 return -ENODEV;
489}
490
491static void __exit aes_sparc64_mod_fini(void)
492{
493 crypto_unregister_algs(algs, ARRAY_SIZE(algs));
494}
495
496module_init(aes_sparc64_mod_init);
497module_exit(aes_sparc64_mod_fini);
498
499MODULE_LICENSE("GPL");
500MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated");
501
502MODULE_ALIAS("aes");
David S. Miller226f7ce2012-11-09 20:53:32 -0800503
504#include "crop_devid.c"