blob: 954f59eeb7b4337ceba3ee520153b37c2540650d [file] [log] [blame]
Jussi Kivilinna8280daa2011-09-26 16:47:25 +03001/*
2 * Glue Code for 3-way parallel assembler optimized version of Twofish
3 *
4 * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
5 *
6 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8 * CTR part based on code (crypto/ctr.c) by:
9 * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
24 * USA
25 *
26 */
27
28#include <linux/crypto.h>
29#include <linux/init.h>
30#include <linux/module.h>
31#include <linux/types.h>
32#include <crypto/algapi.h>
33#include <crypto/twofish.h>
34#include <crypto/b128ops.h>
Jussi Kivilinna81559f92011-10-18 13:33:02 +030035#include <crypto/lrw.h>
Jussi Kivilinnabae6d302011-10-18 13:33:43 +030036#include <crypto/xts.h>
Jussi Kivilinna81559f92011-10-18 13:33:02 +030037
38#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
39#define HAS_LRW
40#endif
Jussi Kivilinna8280daa2011-09-26 16:47:25 +030041
Jussi Kivilinnabae6d302011-10-18 13:33:43 +030042#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
43#define HAS_XTS
44#endif
45
Jussi Kivilinna8280daa2011-09-26 16:47:25 +030046/* regular block cipher functions from twofish_x86_64 module */
47asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
48 const u8 *src);
49asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
50 const u8 *src);
51
52/* 3-way parallel cipher functions */
53asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
54 const u8 *src, bool xor);
55asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst,
56 const u8 *src);
57
58static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
59 const u8 *src)
60{
61 __twofish_enc_blk_3way(ctx, dst, src, false);
62}
63
64static inline void twofish_enc_blk_xor_3way(struct twofish_ctx *ctx, u8 *dst,
65 const u8 *src)
66{
67 __twofish_enc_blk_3way(ctx, dst, src, true);
68}
69
70static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
71 void (*fn)(struct twofish_ctx *, u8 *, const u8 *),
72 void (*fn_3way)(struct twofish_ctx *, u8 *, const u8 *))
73{
74 struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
75 unsigned int bsize = TF_BLOCK_SIZE;
76 unsigned int nbytes;
77 int err;
78
79 err = blkcipher_walk_virt(desc, walk);
80
81 while ((nbytes = walk->nbytes)) {
82 u8 *wsrc = walk->src.virt.addr;
83 u8 *wdst = walk->dst.virt.addr;
84
85 /* Process three block batch */
86 if (nbytes >= bsize * 3) {
87 do {
88 fn_3way(ctx, wdst, wsrc);
89
90 wsrc += bsize * 3;
91 wdst += bsize * 3;
92 nbytes -= bsize * 3;
93 } while (nbytes >= bsize * 3);
94
95 if (nbytes < bsize)
96 goto done;
97 }
98
99 /* Handle leftovers */
100 do {
101 fn(ctx, wdst, wsrc);
102
103 wsrc += bsize;
104 wdst += bsize;
105 nbytes -= bsize;
106 } while (nbytes >= bsize);
107
108done:
109 err = blkcipher_walk_done(desc, walk, nbytes);
110 }
111
112 return err;
113}
114
115static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
116 struct scatterlist *src, unsigned int nbytes)
117{
118 struct blkcipher_walk walk;
119
120 blkcipher_walk_init(&walk, dst, src, nbytes);
121 return ecb_crypt(desc, &walk, twofish_enc_blk, twofish_enc_blk_3way);
122}
123
124static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
125 struct scatterlist *src, unsigned int nbytes)
126{
127 struct blkcipher_walk walk;
128
129 blkcipher_walk_init(&walk, dst, src, nbytes);
130 return ecb_crypt(desc, &walk, twofish_dec_blk, twofish_dec_blk_3way);
131}
132
133static struct crypto_alg blk_ecb_alg = {
134 .cra_name = "ecb(twofish)",
135 .cra_driver_name = "ecb-twofish-3way",
136 .cra_priority = 300,
137 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
138 .cra_blocksize = TF_BLOCK_SIZE,
139 .cra_ctxsize = sizeof(struct twofish_ctx),
140 .cra_alignmask = 0,
141 .cra_type = &crypto_blkcipher_type,
142 .cra_module = THIS_MODULE,
143 .cra_list = LIST_HEAD_INIT(blk_ecb_alg.cra_list),
144 .cra_u = {
145 .blkcipher = {
146 .min_keysize = TF_MIN_KEY_SIZE,
147 .max_keysize = TF_MAX_KEY_SIZE,
148 .setkey = twofish_setkey,
149 .encrypt = ecb_encrypt,
150 .decrypt = ecb_decrypt,
151 },
152 },
153};
154
155static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
156 struct blkcipher_walk *walk)
157{
158 struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
159 unsigned int bsize = TF_BLOCK_SIZE;
160 unsigned int nbytes = walk->nbytes;
161 u128 *src = (u128 *)walk->src.virt.addr;
162 u128 *dst = (u128 *)walk->dst.virt.addr;
163 u128 *iv = (u128 *)walk->iv;
164
165 do {
166 u128_xor(dst, src, iv);
167 twofish_enc_blk(ctx, (u8 *)dst, (u8 *)dst);
168 iv = dst;
169
170 src += 1;
171 dst += 1;
172 nbytes -= bsize;
173 } while (nbytes >= bsize);
174
175 u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv);
176 return nbytes;
177}
178
179static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
180 struct scatterlist *src, unsigned int nbytes)
181{
182 struct blkcipher_walk walk;
183 int err;
184
185 blkcipher_walk_init(&walk, dst, src, nbytes);
186 err = blkcipher_walk_virt(desc, &walk);
187
188 while ((nbytes = walk.nbytes)) {
189 nbytes = __cbc_encrypt(desc, &walk);
190 err = blkcipher_walk_done(desc, &walk, nbytes);
191 }
192
193 return err;
194}
195
196static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
197 struct blkcipher_walk *walk)
198{
199 struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
200 unsigned int bsize = TF_BLOCK_SIZE;
201 unsigned int nbytes = walk->nbytes;
202 u128 *src = (u128 *)walk->src.virt.addr;
203 u128 *dst = (u128 *)walk->dst.virt.addr;
204 u128 ivs[3 - 1];
205 u128 last_iv;
206
207 /* Start of the last block. */
208 src += nbytes / bsize - 1;
209 dst += nbytes / bsize - 1;
210
211 last_iv = *src;
212
213 /* Process three block batch */
214 if (nbytes >= bsize * 3) {
215 do {
216 nbytes -= bsize * (3 - 1);
217 src -= 3 - 1;
218 dst -= 3 - 1;
219
220 ivs[0] = src[0];
221 ivs[1] = src[1];
222
223 twofish_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src);
224
225 u128_xor(dst + 1, dst + 1, ivs + 0);
226 u128_xor(dst + 2, dst + 2, ivs + 1);
227
228 nbytes -= bsize;
229 if (nbytes < bsize)
230 goto done;
231
232 u128_xor(dst, dst, src - 1);
233 src -= 1;
234 dst -= 1;
235 } while (nbytes >= bsize * 3);
236
237 if (nbytes < bsize)
238 goto done;
239 }
240
241 /* Handle leftovers */
242 for (;;) {
243 twofish_dec_blk(ctx, (u8 *)dst, (u8 *)src);
244
245 nbytes -= bsize;
246 if (nbytes < bsize)
247 break;
248
249 u128_xor(dst, dst, src - 1);
250 src -= 1;
251 dst -= 1;
252 }
253
254done:
255 u128_xor(dst, dst, (u128 *)walk->iv);
256 *(u128 *)walk->iv = last_iv;
257
258 return nbytes;
259}
260
261static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
262 struct scatterlist *src, unsigned int nbytes)
263{
264 struct blkcipher_walk walk;
265 int err;
266
267 blkcipher_walk_init(&walk, dst, src, nbytes);
268 err = blkcipher_walk_virt(desc, &walk);
269
270 while ((nbytes = walk.nbytes)) {
271 nbytes = __cbc_decrypt(desc, &walk);
272 err = blkcipher_walk_done(desc, &walk, nbytes);
273 }
274
275 return err;
276}
277
278static struct crypto_alg blk_cbc_alg = {
279 .cra_name = "cbc(twofish)",
280 .cra_driver_name = "cbc-twofish-3way",
281 .cra_priority = 300,
282 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
283 .cra_blocksize = TF_BLOCK_SIZE,
284 .cra_ctxsize = sizeof(struct twofish_ctx),
285 .cra_alignmask = 0,
286 .cra_type = &crypto_blkcipher_type,
287 .cra_module = THIS_MODULE,
288 .cra_list = LIST_HEAD_INIT(blk_cbc_alg.cra_list),
289 .cra_u = {
290 .blkcipher = {
291 .min_keysize = TF_MIN_KEY_SIZE,
292 .max_keysize = TF_MAX_KEY_SIZE,
293 .ivsize = TF_BLOCK_SIZE,
294 .setkey = twofish_setkey,
295 .encrypt = cbc_encrypt,
296 .decrypt = cbc_decrypt,
297 },
298 },
299};
300
301static inline void u128_to_be128(be128 *dst, const u128 *src)
302{
303 dst->a = cpu_to_be64(src->a);
304 dst->b = cpu_to_be64(src->b);
305}
306
307static inline void be128_to_u128(u128 *dst, const be128 *src)
308{
309 dst->a = be64_to_cpu(src->a);
310 dst->b = be64_to_cpu(src->b);
311}
312
313static inline void u128_inc(u128 *i)
314{
315 i->b++;
316 if (!i->b)
317 i->a++;
318}
319
320static void ctr_crypt_final(struct blkcipher_desc *desc,
321 struct blkcipher_walk *walk)
322{
323 struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
324 u8 *ctrblk = walk->iv;
325 u8 keystream[TF_BLOCK_SIZE];
326 u8 *src = walk->src.virt.addr;
327 u8 *dst = walk->dst.virt.addr;
328 unsigned int nbytes = walk->nbytes;
329
330 twofish_enc_blk(ctx, keystream, ctrblk);
331 crypto_xor(keystream, src, nbytes);
332 memcpy(dst, keystream, nbytes);
333
334 crypto_inc(ctrblk, TF_BLOCK_SIZE);
335}
336
337static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
338 struct blkcipher_walk *walk)
339{
340 struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
341 unsigned int bsize = TF_BLOCK_SIZE;
342 unsigned int nbytes = walk->nbytes;
343 u128 *src = (u128 *)walk->src.virt.addr;
344 u128 *dst = (u128 *)walk->dst.virt.addr;
345 u128 ctrblk;
346 be128 ctrblocks[3];
347
348 be128_to_u128(&ctrblk, (be128 *)walk->iv);
349
350 /* Process three block batch */
351 if (nbytes >= bsize * 3) {
352 do {
353 if (dst != src) {
354 dst[0] = src[0];
355 dst[1] = src[1];
356 dst[2] = src[2];
357 }
358
359 /* create ctrblks for parallel encrypt */
360 u128_to_be128(&ctrblocks[0], &ctrblk);
361 u128_inc(&ctrblk);
362 u128_to_be128(&ctrblocks[1], &ctrblk);
363 u128_inc(&ctrblk);
364 u128_to_be128(&ctrblocks[2], &ctrblk);
365 u128_inc(&ctrblk);
366
367 twofish_enc_blk_xor_3way(ctx, (u8 *)dst,
368 (u8 *)ctrblocks);
369
370 src += 3;
371 dst += 3;
372 nbytes -= bsize * 3;
373 } while (nbytes >= bsize * 3);
374
375 if (nbytes < bsize)
376 goto done;
377 }
378
379 /* Handle leftovers */
380 do {
381 if (dst != src)
382 *dst = *src;
383
384 u128_to_be128(&ctrblocks[0], &ctrblk);
385 u128_inc(&ctrblk);
386
387 twofish_enc_blk(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks);
388 u128_xor(dst, dst, (u128 *)ctrblocks);
389
390 src += 1;
391 dst += 1;
392 nbytes -= bsize;
393 } while (nbytes >= bsize);
394
395done:
396 u128_to_be128((be128 *)walk->iv, &ctrblk);
397 return nbytes;
398}
399
400static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
401 struct scatterlist *src, unsigned int nbytes)
402{
403 struct blkcipher_walk walk;
404 int err;
405
406 blkcipher_walk_init(&walk, dst, src, nbytes);
407 err = blkcipher_walk_virt_block(desc, &walk, TF_BLOCK_SIZE);
408
409 while ((nbytes = walk.nbytes) >= TF_BLOCK_SIZE) {
410 nbytes = __ctr_crypt(desc, &walk);
411 err = blkcipher_walk_done(desc, &walk, nbytes);
412 }
413
414 if (walk.nbytes) {
415 ctr_crypt_final(desc, &walk);
416 err = blkcipher_walk_done(desc, &walk, 0);
417 }
418
419 return err;
420}
421
422static struct crypto_alg blk_ctr_alg = {
423 .cra_name = "ctr(twofish)",
424 .cra_driver_name = "ctr-twofish-3way",
425 .cra_priority = 300,
426 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
Jussi Kivilinna906b2c92011-10-10 12:33:02 +0300427 .cra_blocksize = 1,
Jussi Kivilinna8280daa2011-09-26 16:47:25 +0300428 .cra_ctxsize = sizeof(struct twofish_ctx),
429 .cra_alignmask = 0,
430 .cra_type = &crypto_blkcipher_type,
431 .cra_module = THIS_MODULE,
432 .cra_list = LIST_HEAD_INIT(blk_ctr_alg.cra_list),
433 .cra_u = {
434 .blkcipher = {
435 .min_keysize = TF_MIN_KEY_SIZE,
436 .max_keysize = TF_MAX_KEY_SIZE,
437 .ivsize = TF_BLOCK_SIZE,
438 .setkey = twofish_setkey,
439 .encrypt = ctr_crypt,
440 .decrypt = ctr_crypt,
441 },
442 },
443};
444
Jussi Kivilinnabae6d302011-10-18 13:33:43 +0300445#if defined(HAS_LRW) || defined(HAS_XTS)
Jussi Kivilinna81559f92011-10-18 13:33:02 +0300446
447static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
448{
449 const unsigned int bsize = TF_BLOCK_SIZE;
450 struct twofish_ctx *ctx = priv;
451 int i;
452
453 if (nbytes == 3 * bsize) {
454 twofish_enc_blk_3way(ctx, srcdst, srcdst);
455 return;
456 }
457
458 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
459 twofish_enc_blk(ctx, srcdst, srcdst);
460}
461
462static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
463{
464 const unsigned int bsize = TF_BLOCK_SIZE;
465 struct twofish_ctx *ctx = priv;
466 int i;
467
468 if (nbytes == 3 * bsize) {
469 twofish_dec_blk_3way(ctx, srcdst, srcdst);
470 return;
471 }
472
473 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
474 twofish_dec_blk(ctx, srcdst, srcdst);
475}
476
Jussi Kivilinnabae6d302011-10-18 13:33:43 +0300477#endif
478
479#ifdef HAS_LRW
480
Jussi Kivilinna81559f92011-10-18 13:33:02 +0300481struct twofish_lrw_ctx {
482 struct lrw_table_ctx lrw_table;
483 struct twofish_ctx twofish_ctx;
484};
485
486static int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
487 unsigned int keylen)
488{
489 struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
490 int err;
491
492 err = __twofish_setkey(&ctx->twofish_ctx, key, keylen - TF_BLOCK_SIZE,
493 &tfm->crt_flags);
494 if (err)
495 return err;
496
497 return lrw_init_table(&ctx->lrw_table, key + keylen - TF_BLOCK_SIZE);
498}
499
500static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
501 struct scatterlist *src, unsigned int nbytes)
502{
503 struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
504 be128 buf[3];
505 struct lrw_crypt_req req = {
506 .tbuf = buf,
507 .tbuflen = sizeof(buf),
508
509 .table_ctx = &ctx->lrw_table,
510 .crypt_ctx = &ctx->twofish_ctx,
511 .crypt_fn = encrypt_callback,
512 };
513
514 return lrw_crypt(desc, dst, src, nbytes, &req);
515}
516
517static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
518 struct scatterlist *src, unsigned int nbytes)
519{
520 struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
521 be128 buf[3];
522 struct lrw_crypt_req req = {
523 .tbuf = buf,
524 .tbuflen = sizeof(buf),
525
526 .table_ctx = &ctx->lrw_table,
527 .crypt_ctx = &ctx->twofish_ctx,
528 .crypt_fn = decrypt_callback,
529 };
530
531 return lrw_crypt(desc, dst, src, nbytes, &req);
532}
533
534static void lrw_exit_tfm(struct crypto_tfm *tfm)
535{
536 struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
537
538 lrw_free_table(&ctx->lrw_table);
539}
540
541static struct crypto_alg blk_lrw_alg = {
542 .cra_name = "lrw(twofish)",
543 .cra_driver_name = "lrw-twofish-3way",
544 .cra_priority = 300,
545 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
546 .cra_blocksize = TF_BLOCK_SIZE,
547 .cra_ctxsize = sizeof(struct twofish_lrw_ctx),
548 .cra_alignmask = 0,
549 .cra_type = &crypto_blkcipher_type,
550 .cra_module = THIS_MODULE,
551 .cra_list = LIST_HEAD_INIT(blk_lrw_alg.cra_list),
552 .cra_exit = lrw_exit_tfm,
553 .cra_u = {
554 .blkcipher = {
555 .min_keysize = TF_MIN_KEY_SIZE + TF_BLOCK_SIZE,
556 .max_keysize = TF_MAX_KEY_SIZE + TF_BLOCK_SIZE,
557 .ivsize = TF_BLOCK_SIZE,
558 .setkey = lrw_twofish_setkey,
559 .encrypt = lrw_encrypt,
560 .decrypt = lrw_decrypt,
561 },
562 },
563};
564
565#endif
566
Jussi Kivilinnabae6d302011-10-18 13:33:43 +0300567#ifdef HAS_XTS
568
569struct twofish_xts_ctx {
570 struct twofish_ctx tweak_ctx;
571 struct twofish_ctx crypt_ctx;
572};
573
574static int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
575 unsigned int keylen)
576{
577 struct twofish_xts_ctx *ctx = crypto_tfm_ctx(tfm);
578 u32 *flags = &tfm->crt_flags;
579 int err;
580
581 /* key consists of keys of equal size concatenated, therefore
582 * the length must be even
583 */
584 if (keylen % 2) {
585 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
586 return -EINVAL;
587 }
588
589 /* first half of xts-key is for crypt */
590 err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
591 if (err)
592 return err;
593
594 /* second half of xts-key is for tweak */
595 return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
596 flags);
597}
598
599static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
600 struct scatterlist *src, unsigned int nbytes)
601{
602 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
603 be128 buf[3];
604 struct xts_crypt_req req = {
605 .tbuf = buf,
606 .tbuflen = sizeof(buf),
607
608 .tweak_ctx = &ctx->tweak_ctx,
609 .tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
610 .crypt_ctx = &ctx->crypt_ctx,
611 .crypt_fn = encrypt_callback,
612 };
613
614 return xts_crypt(desc, dst, src, nbytes, &req);
615}
616
617static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
618 struct scatterlist *src, unsigned int nbytes)
619{
620 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
621 be128 buf[3];
622 struct xts_crypt_req req = {
623 .tbuf = buf,
624 .tbuflen = sizeof(buf),
625
626 .tweak_ctx = &ctx->tweak_ctx,
627 .tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
628 .crypt_ctx = &ctx->crypt_ctx,
629 .crypt_fn = decrypt_callback,
630 };
631
632 return xts_crypt(desc, dst, src, nbytes, &req);
633}
634
635static struct crypto_alg blk_xts_alg = {
636 .cra_name = "xts(twofish)",
637 .cra_driver_name = "xts-twofish-3way",
638 .cra_priority = 300,
639 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
640 .cra_blocksize = TF_BLOCK_SIZE,
641 .cra_ctxsize = sizeof(struct twofish_xts_ctx),
642 .cra_alignmask = 0,
643 .cra_type = &crypto_blkcipher_type,
644 .cra_module = THIS_MODULE,
645 .cra_list = LIST_HEAD_INIT(blk_xts_alg.cra_list),
646 .cra_u = {
647 .blkcipher = {
648 .min_keysize = TF_MIN_KEY_SIZE * 2,
649 .max_keysize = TF_MAX_KEY_SIZE * 2,
650 .ivsize = TF_BLOCK_SIZE,
651 .setkey = xts_twofish_setkey,
652 .encrypt = xts_encrypt,
653 .decrypt = xts_decrypt,
654 },
655 },
656};
657
658#endif
659
Jussi Kivilinna8280daa2011-09-26 16:47:25 +0300660int __init init(void)
661{
662 int err;
663
664 err = crypto_register_alg(&blk_ecb_alg);
665 if (err)
666 goto ecb_err;
667 err = crypto_register_alg(&blk_cbc_alg);
668 if (err)
669 goto cbc_err;
670 err = crypto_register_alg(&blk_ctr_alg);
671 if (err)
672 goto ctr_err;
Jussi Kivilinna81559f92011-10-18 13:33:02 +0300673#ifdef HAS_LRW
674 err = crypto_register_alg(&blk_lrw_alg);
675 if (err)
676 goto blk_lrw_err;
677#endif
Jussi Kivilinnabae6d302011-10-18 13:33:43 +0300678#ifdef HAS_XTS
679 err = crypto_register_alg(&blk_xts_alg);
680 if (err)
681 goto blk_xts_err;
682#endif
Jussi Kivilinna8280daa2011-09-26 16:47:25 +0300683
684 return 0;
685
Jussi Kivilinnabae6d302011-10-18 13:33:43 +0300686#ifdef HAS_XTS
687 crypto_unregister_alg(&blk_xts_alg);
688blk_xts_err:
Jussi Kivilinna81559f92011-10-18 13:33:02 +0300689#endif
Jussi Kivilinnabae6d302011-10-18 13:33:43 +0300690#ifdef HAS_LRW
691 crypto_unregister_alg(&blk_lrw_alg);
692blk_lrw_err:
693#endif
694 crypto_unregister_alg(&blk_ctr_alg);
Jussi Kivilinna8280daa2011-09-26 16:47:25 +0300695ctr_err:
696 crypto_unregister_alg(&blk_cbc_alg);
697cbc_err:
698 crypto_unregister_alg(&blk_ecb_alg);
699ecb_err:
700 return err;
701}
702
703void __exit fini(void)
704{
Jussi Kivilinnabae6d302011-10-18 13:33:43 +0300705#ifdef HAS_XTS
706 crypto_unregister_alg(&blk_xts_alg);
707#endif
Jussi Kivilinna81559f92011-10-18 13:33:02 +0300708#ifdef HAS_LRW
709 crypto_unregister_alg(&blk_lrw_alg);
710#endif
Jussi Kivilinna8280daa2011-09-26 16:47:25 +0300711 crypto_unregister_alg(&blk_ctr_alg);
712 crypto_unregister_alg(&blk_cbc_alg);
713 crypto_unregister_alg(&blk_ecb_alg);
714}
715
716module_init(init);
717module_exit(fini);
718
719MODULE_LICENSE("GPL");
720MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized");
721MODULE_ALIAS("twofish");
722MODULE_ALIAS("twofish-asm");