blob: 805c91fda7a2be07f37394f35cd67f57142f3498 [file] [log] [blame]
Jussi Kivilinna937c30d2011-11-09 16:26:25 +02001/*
2 * Glue Code for SSE2 assembler versions of Serpent Cipher
3 *
4 * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
5 *
6 * Glue code based on aesni-intel_glue.c by:
7 * Copyright (C) 2008, Intel Corp.
8 * Author: Huang Ying <ying.huang@intel.com>
9 *
10 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
11 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
12 * CTR part based on code (crypto/ctr.c) by:
13 * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
28 * USA
29 *
30 */
31
32#include <linux/module.h>
33#include <linux/hardirq.h>
34#include <linux/types.h>
35#include <linux/crypto.h>
36#include <linux/err.h>
37#include <crypto/algapi.h>
38#include <crypto/serpent.h>
39#include <crypto/cryptd.h>
40#include <crypto/b128ops.h>
41#include <crypto/ctr.h>
Jussi Kivilinna18482052011-11-09 16:26:36 +020042#include <crypto/lrw.h>
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +020043#include <crypto/xts.h>
Jussi Kivilinna937c30d2011-11-09 16:26:25 +020044#include <asm/i387.h>
Jussi Kivilinna3387e7d2012-06-14 10:09:03 +080045#include <asm/serpent-sse2.h>
Jussi Kivilinnaffaf9152012-06-18 14:06:58 +030046#include <asm/crypto/ablk_helper.h>
Jussi Kivilinna937c30d2011-11-09 16:26:25 +020047#include <crypto/scatterwalk.h>
48#include <linux/workqueue.h>
49#include <linux/spinlock.h>
50
Jussi Kivilinna937c30d2011-11-09 16:26:25 +020051static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
52{
53 if (fpu_enabled)
54 return true;
55
56 /* SSE2 is only used when chunk to be processed is large enough, so
57 * do not enable FPU until it is necessary.
58 */
59 if (nbytes < SERPENT_BLOCK_SIZE * SERPENT_PARALLEL_BLOCKS)
60 return false;
61
62 kernel_fpu_begin();
63 return true;
64}
65
66static inline void serpent_fpu_end(bool fpu_enabled)
67{
68 if (fpu_enabled)
69 kernel_fpu_end();
70}
71
72static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
73 bool enc)
74{
75 bool fpu_enabled = false;
76 struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
77 const unsigned int bsize = SERPENT_BLOCK_SIZE;
78 unsigned int nbytes;
79 int err;
80
81 err = blkcipher_walk_virt(desc, walk);
82 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
83
84 while ((nbytes = walk->nbytes)) {
85 u8 *wsrc = walk->src.virt.addr;
86 u8 *wdst = walk->dst.virt.addr;
87
88 fpu_enabled = serpent_fpu_begin(fpu_enabled, nbytes);
89
90 /* Process multi-block batch */
91 if (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS) {
92 do {
93 if (enc)
94 serpent_enc_blk_xway(ctx, wdst, wsrc);
95 else
96 serpent_dec_blk_xway(ctx, wdst, wsrc);
97
98 wsrc += bsize * SERPENT_PARALLEL_BLOCKS;
99 wdst += bsize * SERPENT_PARALLEL_BLOCKS;
100 nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
101 } while (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS);
102
103 if (nbytes < bsize)
104 goto done;
105 }
106
107 /* Handle leftovers */
108 do {
109 if (enc)
110 __serpent_encrypt(ctx, wdst, wsrc);
111 else
112 __serpent_decrypt(ctx, wdst, wsrc);
113
114 wsrc += bsize;
115 wdst += bsize;
116 nbytes -= bsize;
117 } while (nbytes >= bsize);
118
119done:
120 err = blkcipher_walk_done(desc, walk, nbytes);
121 }
122
123 serpent_fpu_end(fpu_enabled);
124 return err;
125}
126
127static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
128 struct scatterlist *src, unsigned int nbytes)
129{
130 struct blkcipher_walk walk;
131
132 blkcipher_walk_init(&walk, dst, src, nbytes);
133 return ecb_crypt(desc, &walk, true);
134}
135
136static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
137 struct scatterlist *src, unsigned int nbytes)
138{
139 struct blkcipher_walk walk;
140
141 blkcipher_walk_init(&walk, dst, src, nbytes);
142 return ecb_crypt(desc, &walk, false);
143}
144
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200145static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
146 struct blkcipher_walk *walk)
147{
148 struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
149 const unsigned int bsize = SERPENT_BLOCK_SIZE;
150 unsigned int nbytes = walk->nbytes;
151 u128 *src = (u128 *)walk->src.virt.addr;
152 u128 *dst = (u128 *)walk->dst.virt.addr;
153 u128 *iv = (u128 *)walk->iv;
154
155 do {
156 u128_xor(dst, src, iv);
157 __serpent_encrypt(ctx, (u8 *)dst, (u8 *)dst);
158 iv = dst;
159
160 src += 1;
161 dst += 1;
162 nbytes -= bsize;
163 } while (nbytes >= bsize);
164
165 u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv);
166 return nbytes;
167}
168
169static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
170 struct scatterlist *src, unsigned int nbytes)
171{
172 struct blkcipher_walk walk;
173 int err;
174
175 blkcipher_walk_init(&walk, dst, src, nbytes);
176 err = blkcipher_walk_virt(desc, &walk);
177
178 while ((nbytes = walk.nbytes)) {
179 nbytes = __cbc_encrypt(desc, &walk);
180 err = blkcipher_walk_done(desc, &walk, nbytes);
181 }
182
183 return err;
184}
185
186static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
187 struct blkcipher_walk *walk)
188{
189 struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
190 const unsigned int bsize = SERPENT_BLOCK_SIZE;
191 unsigned int nbytes = walk->nbytes;
192 u128 *src = (u128 *)walk->src.virt.addr;
193 u128 *dst = (u128 *)walk->dst.virt.addr;
194 u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
195 u128 last_iv;
196 int i;
197
198 /* Start of the last block. */
199 src += nbytes / bsize - 1;
200 dst += nbytes / bsize - 1;
201
202 last_iv = *src;
203
204 /* Process multi-block batch */
205 if (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS) {
206 do {
207 nbytes -= bsize * (SERPENT_PARALLEL_BLOCKS - 1);
208 src -= SERPENT_PARALLEL_BLOCKS - 1;
209 dst -= SERPENT_PARALLEL_BLOCKS - 1;
210
211 for (i = 0; i < SERPENT_PARALLEL_BLOCKS - 1; i++)
212 ivs[i] = src[i];
213
214 serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
215
216 for (i = 0; i < SERPENT_PARALLEL_BLOCKS - 1; i++)
217 u128_xor(dst + (i + 1), dst + (i + 1), ivs + i);
218
219 nbytes -= bsize;
220 if (nbytes < bsize)
221 goto done;
222
223 u128_xor(dst, dst, src - 1);
224 src -= 1;
225 dst -= 1;
226 } while (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS);
227
228 if (nbytes < bsize)
229 goto done;
230 }
231
232 /* Handle leftovers */
233 for (;;) {
234 __serpent_decrypt(ctx, (u8 *)dst, (u8 *)src);
235
236 nbytes -= bsize;
237 if (nbytes < bsize)
238 break;
239
240 u128_xor(dst, dst, src - 1);
241 src -= 1;
242 dst -= 1;
243 }
244
245done:
246 u128_xor(dst, dst, (u128 *)walk->iv);
247 *(u128 *)walk->iv = last_iv;
248
249 return nbytes;
250}
251
252static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
253 struct scatterlist *src, unsigned int nbytes)
254{
255 bool fpu_enabled = false;
256 struct blkcipher_walk walk;
257 int err;
258
259 blkcipher_walk_init(&walk, dst, src, nbytes);
260 err = blkcipher_walk_virt(desc, &walk);
261 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
262
263 while ((nbytes = walk.nbytes)) {
264 fpu_enabled = serpent_fpu_begin(fpu_enabled, nbytes);
265 nbytes = __cbc_decrypt(desc, &walk);
266 err = blkcipher_walk_done(desc, &walk, nbytes);
267 }
268
269 serpent_fpu_end(fpu_enabled);
270 return err;
271}
272
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200273static inline void u128_to_be128(be128 *dst, const u128 *src)
274{
275 dst->a = cpu_to_be64(src->a);
276 dst->b = cpu_to_be64(src->b);
277}
278
279static inline void be128_to_u128(u128 *dst, const be128 *src)
280{
281 dst->a = be64_to_cpu(src->a);
282 dst->b = be64_to_cpu(src->b);
283}
284
285static inline void u128_inc(u128 *i)
286{
287 i->b++;
288 if (!i->b)
289 i->a++;
290}
291
292static void ctr_crypt_final(struct blkcipher_desc *desc,
293 struct blkcipher_walk *walk)
294{
295 struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
296 u8 *ctrblk = walk->iv;
297 u8 keystream[SERPENT_BLOCK_SIZE];
298 u8 *src = walk->src.virt.addr;
299 u8 *dst = walk->dst.virt.addr;
300 unsigned int nbytes = walk->nbytes;
301
302 __serpent_encrypt(ctx, keystream, ctrblk);
303 crypto_xor(keystream, src, nbytes);
304 memcpy(dst, keystream, nbytes);
305
306 crypto_inc(ctrblk, SERPENT_BLOCK_SIZE);
307}
308
309static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
310 struct blkcipher_walk *walk)
311{
312 struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
313 const unsigned int bsize = SERPENT_BLOCK_SIZE;
314 unsigned int nbytes = walk->nbytes;
315 u128 *src = (u128 *)walk->src.virt.addr;
316 u128 *dst = (u128 *)walk->dst.virt.addr;
317 u128 ctrblk;
318 be128 ctrblocks[SERPENT_PARALLEL_BLOCKS];
319 int i;
320
321 be128_to_u128(&ctrblk, (be128 *)walk->iv);
322
323 /* Process multi-block batch */
324 if (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS) {
325 do {
326 /* create ctrblks for parallel encrypt */
327 for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
328 if (dst != src)
329 dst[i] = src[i];
330
331 u128_to_be128(&ctrblocks[i], &ctrblk);
332 u128_inc(&ctrblk);
333 }
334
335 serpent_enc_blk_xway_xor(ctx, (u8 *)dst,
336 (u8 *)ctrblocks);
337
338 src += SERPENT_PARALLEL_BLOCKS;
339 dst += SERPENT_PARALLEL_BLOCKS;
340 nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
341 } while (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS);
342
343 if (nbytes < bsize)
344 goto done;
345 }
346
347 /* Handle leftovers */
348 do {
349 if (dst != src)
350 *dst = *src;
351
352 u128_to_be128(&ctrblocks[0], &ctrblk);
353 u128_inc(&ctrblk);
354
355 __serpent_encrypt(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks);
356 u128_xor(dst, dst, (u128 *)ctrblocks);
357
358 src += 1;
359 dst += 1;
360 nbytes -= bsize;
361 } while (nbytes >= bsize);
362
363done:
364 u128_to_be128((be128 *)walk->iv, &ctrblk);
365 return nbytes;
366}
367
368static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
369 struct scatterlist *src, unsigned int nbytes)
370{
371 bool fpu_enabled = false;
372 struct blkcipher_walk walk;
373 int err;
374
375 blkcipher_walk_init(&walk, dst, src, nbytes);
376 err = blkcipher_walk_virt_block(desc, &walk, SERPENT_BLOCK_SIZE);
377 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
378
379 while ((nbytes = walk.nbytes) >= SERPENT_BLOCK_SIZE) {
380 fpu_enabled = serpent_fpu_begin(fpu_enabled, nbytes);
381 nbytes = __ctr_crypt(desc, &walk);
382 err = blkcipher_walk_done(desc, &walk, nbytes);
383 }
384
385 serpent_fpu_end(fpu_enabled);
386
387 if (walk.nbytes) {
388 ctr_crypt_final(desc, &walk);
389 err = blkcipher_walk_done(desc, &walk, 0);
390 }
391
392 return err;
393}
394
Jussi Kivilinna18482052011-11-09 16:26:36 +0200395struct crypt_priv {
396 struct serpent_ctx *ctx;
397 bool fpu_enabled;
398};
399
400static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
401{
402 const unsigned int bsize = SERPENT_BLOCK_SIZE;
403 struct crypt_priv *ctx = priv;
404 int i;
405
406 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
407
408 if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
409 serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst);
410 return;
411 }
412
413 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
414 __serpent_encrypt(ctx->ctx, srcdst, srcdst);
415}
416
417static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
418{
419 const unsigned int bsize = SERPENT_BLOCK_SIZE;
420 struct crypt_priv *ctx = priv;
421 int i;
422
423 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
424
425 if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
426 serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst);
427 return;
428 }
429
430 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
431 __serpent_decrypt(ctx->ctx, srcdst, srcdst);
432}
433
434struct serpent_lrw_ctx {
435 struct lrw_table_ctx lrw_table;
436 struct serpent_ctx serpent_ctx;
437};
438
439static int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
440 unsigned int keylen)
441{
442 struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
443 int err;
444
445 err = __serpent_setkey(&ctx->serpent_ctx, key, keylen -
446 SERPENT_BLOCK_SIZE);
447 if (err)
448 return err;
449
450 return lrw_init_table(&ctx->lrw_table, key + keylen -
451 SERPENT_BLOCK_SIZE);
452}
453
454static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
455 struct scatterlist *src, unsigned int nbytes)
456{
457 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
458 be128 buf[SERPENT_PARALLEL_BLOCKS];
459 struct crypt_priv crypt_ctx = {
460 .ctx = &ctx->serpent_ctx,
461 .fpu_enabled = false,
462 };
463 struct lrw_crypt_req req = {
464 .tbuf = buf,
465 .tbuflen = sizeof(buf),
466
467 .table_ctx = &ctx->lrw_table,
468 .crypt_ctx = &crypt_ctx,
469 .crypt_fn = encrypt_callback,
470 };
471 int ret;
472
Jussi Kivilinnad3564332011-11-09 19:44:12 +0200473 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Jussi Kivilinna18482052011-11-09 16:26:36 +0200474 ret = lrw_crypt(desc, dst, src, nbytes, &req);
475 serpent_fpu_end(crypt_ctx.fpu_enabled);
476
477 return ret;
478}
479
480static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
481 struct scatterlist *src, unsigned int nbytes)
482{
483 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
484 be128 buf[SERPENT_PARALLEL_BLOCKS];
485 struct crypt_priv crypt_ctx = {
486 .ctx = &ctx->serpent_ctx,
487 .fpu_enabled = false,
488 };
489 struct lrw_crypt_req req = {
490 .tbuf = buf,
491 .tbuflen = sizeof(buf),
492
493 .table_ctx = &ctx->lrw_table,
494 .crypt_ctx = &crypt_ctx,
495 .crypt_fn = decrypt_callback,
496 };
497 int ret;
498
Jussi Kivilinnad3564332011-11-09 19:44:12 +0200499 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Jussi Kivilinna18482052011-11-09 16:26:36 +0200500 ret = lrw_crypt(desc, dst, src, nbytes, &req);
501 serpent_fpu_end(crypt_ctx.fpu_enabled);
502
503 return ret;
504}
505
506static void lrw_exit_tfm(struct crypto_tfm *tfm)
507{
508 struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
509
510 lrw_free_table(&ctx->lrw_table);
511}
512
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +0200513struct serpent_xts_ctx {
514 struct serpent_ctx tweak_ctx;
515 struct serpent_ctx crypt_ctx;
516};
517
518static int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
519 unsigned int keylen)
520{
521 struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm);
522 u32 *flags = &tfm->crt_flags;
523 int err;
524
525 /* key consists of keys of equal size concatenated, therefore
526 * the length must be even
527 */
528 if (keylen % 2) {
529 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
530 return -EINVAL;
531 }
532
533 /* first half of xts-key is for crypt */
534 err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
535 if (err)
536 return err;
537
538 /* second half of xts-key is for tweak */
539 return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
540}
541
542static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
543 struct scatterlist *src, unsigned int nbytes)
544{
545 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
546 be128 buf[SERPENT_PARALLEL_BLOCKS];
547 struct crypt_priv crypt_ctx = {
548 .ctx = &ctx->crypt_ctx,
549 .fpu_enabled = false,
550 };
551 struct xts_crypt_req req = {
552 .tbuf = buf,
553 .tbuflen = sizeof(buf),
554
555 .tweak_ctx = &ctx->tweak_ctx,
556 .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
557 .crypt_ctx = &crypt_ctx,
558 .crypt_fn = encrypt_callback,
559 };
560 int ret;
561
Jussi Kivilinnad3564332011-11-09 19:44:12 +0200562 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +0200563 ret = xts_crypt(desc, dst, src, nbytes, &req);
564 serpent_fpu_end(crypt_ctx.fpu_enabled);
565
566 return ret;
567}
568
569static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
570 struct scatterlist *src, unsigned int nbytes)
571{
572 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
573 be128 buf[SERPENT_PARALLEL_BLOCKS];
574 struct crypt_priv crypt_ctx = {
575 .ctx = &ctx->crypt_ctx,
576 .fpu_enabled = false,
577 };
578 struct xts_crypt_req req = {
579 .tbuf = buf,
580 .tbuflen = sizeof(buf),
581
582 .tweak_ctx = &ctx->tweak_ctx,
583 .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
584 .crypt_ctx = &crypt_ctx,
585 .crypt_fn = decrypt_callback,
586 };
587 int ret;
588
Jussi Kivilinnad3564332011-11-09 19:44:12 +0200589 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +0200590 ret = xts_crypt(desc, dst, src, nbytes, &req);
591 serpent_fpu_end(crypt_ctx.fpu_enabled);
592
593 return ret;
594}
595
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200596static struct crypto_alg serpent_algs[10] = { {
597 .cra_name = "__ecb-serpent-sse2",
598 .cra_driver_name = "__driver-ecb-serpent-sse2",
599 .cra_priority = 0,
600 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
601 .cra_blocksize = SERPENT_BLOCK_SIZE,
602 .cra_ctxsize = sizeof(struct serpent_ctx),
603 .cra_alignmask = 0,
604 .cra_type = &crypto_blkcipher_type,
605 .cra_module = THIS_MODULE,
606 .cra_list = LIST_HEAD_INIT(serpent_algs[0].cra_list),
607 .cra_u = {
608 .blkcipher = {
609 .min_keysize = SERPENT_MIN_KEY_SIZE,
610 .max_keysize = SERPENT_MAX_KEY_SIZE,
611 .setkey = serpent_setkey,
612 .encrypt = ecb_encrypt,
613 .decrypt = ecb_decrypt,
614 },
615 },
616}, {
617 .cra_name = "__cbc-serpent-sse2",
618 .cra_driver_name = "__driver-cbc-serpent-sse2",
619 .cra_priority = 0,
620 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
621 .cra_blocksize = SERPENT_BLOCK_SIZE,
622 .cra_ctxsize = sizeof(struct serpent_ctx),
623 .cra_alignmask = 0,
624 .cra_type = &crypto_blkcipher_type,
625 .cra_module = THIS_MODULE,
626 .cra_list = LIST_HEAD_INIT(serpent_algs[1].cra_list),
627 .cra_u = {
628 .blkcipher = {
629 .min_keysize = SERPENT_MIN_KEY_SIZE,
630 .max_keysize = SERPENT_MAX_KEY_SIZE,
631 .setkey = serpent_setkey,
632 .encrypt = cbc_encrypt,
633 .decrypt = cbc_decrypt,
634 },
635 },
636}, {
637 .cra_name = "__ctr-serpent-sse2",
638 .cra_driver_name = "__driver-ctr-serpent-sse2",
639 .cra_priority = 0,
640 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
641 .cra_blocksize = 1,
642 .cra_ctxsize = sizeof(struct serpent_ctx),
643 .cra_alignmask = 0,
644 .cra_type = &crypto_blkcipher_type,
645 .cra_module = THIS_MODULE,
646 .cra_list = LIST_HEAD_INIT(serpent_algs[2].cra_list),
647 .cra_u = {
648 .blkcipher = {
649 .min_keysize = SERPENT_MIN_KEY_SIZE,
650 .max_keysize = SERPENT_MAX_KEY_SIZE,
651 .ivsize = SERPENT_BLOCK_SIZE,
652 .setkey = serpent_setkey,
653 .encrypt = ctr_crypt,
654 .decrypt = ctr_crypt,
655 },
656 },
657}, {
658 .cra_name = "__lrw-serpent-sse2",
659 .cra_driver_name = "__driver-lrw-serpent-sse2",
660 .cra_priority = 0,
661 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
662 .cra_blocksize = SERPENT_BLOCK_SIZE,
663 .cra_ctxsize = sizeof(struct serpent_lrw_ctx),
664 .cra_alignmask = 0,
665 .cra_type = &crypto_blkcipher_type,
666 .cra_module = THIS_MODULE,
667 .cra_list = LIST_HEAD_INIT(serpent_algs[3].cra_list),
668 .cra_exit = lrw_exit_tfm,
669 .cra_u = {
670 .blkcipher = {
671 .min_keysize = SERPENT_MIN_KEY_SIZE +
672 SERPENT_BLOCK_SIZE,
673 .max_keysize = SERPENT_MAX_KEY_SIZE +
674 SERPENT_BLOCK_SIZE,
675 .ivsize = SERPENT_BLOCK_SIZE,
676 .setkey = lrw_serpent_setkey,
677 .encrypt = lrw_encrypt,
678 .decrypt = lrw_decrypt,
679 },
680 },
681}, {
682 .cra_name = "__xts-serpent-sse2",
683 .cra_driver_name = "__driver-xts-serpent-sse2",
684 .cra_priority = 0,
685 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
686 .cra_blocksize = SERPENT_BLOCK_SIZE,
687 .cra_ctxsize = sizeof(struct serpent_xts_ctx),
688 .cra_alignmask = 0,
689 .cra_type = &crypto_blkcipher_type,
690 .cra_module = THIS_MODULE,
691 .cra_list = LIST_HEAD_INIT(serpent_algs[4].cra_list),
692 .cra_u = {
693 .blkcipher = {
694 .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
695 .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
696 .ivsize = SERPENT_BLOCK_SIZE,
697 .setkey = xts_serpent_setkey,
698 .encrypt = xts_encrypt,
699 .decrypt = xts_decrypt,
700 },
701 },
702}, {
703 .cra_name = "ecb(serpent)",
704 .cra_driver_name = "ecb-serpent-sse2",
705 .cra_priority = 400,
706 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
707 .cra_blocksize = SERPENT_BLOCK_SIZE,
Jussi Kivilinnaffaf9152012-06-18 14:06:58 +0300708 .cra_ctxsize = sizeof(struct async_helper_ctx),
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200709 .cra_alignmask = 0,
710 .cra_type = &crypto_ablkcipher_type,
711 .cra_module = THIS_MODULE,
712 .cra_list = LIST_HEAD_INIT(serpent_algs[5].cra_list),
Jussi Kivilinna435d3e52012-02-17 22:48:53 +0200713 .cra_init = ablk_init,
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200714 .cra_exit = ablk_exit,
715 .cra_u = {
716 .ablkcipher = {
717 .min_keysize = SERPENT_MIN_KEY_SIZE,
718 .max_keysize = SERPENT_MAX_KEY_SIZE,
719 .setkey = ablk_set_key,
720 .encrypt = ablk_encrypt,
721 .decrypt = ablk_decrypt,
722 },
723 },
724}, {
725 .cra_name = "cbc(serpent)",
726 .cra_driver_name = "cbc-serpent-sse2",
727 .cra_priority = 400,
728 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
729 .cra_blocksize = SERPENT_BLOCK_SIZE,
Jussi Kivilinnaffaf9152012-06-18 14:06:58 +0300730 .cra_ctxsize = sizeof(struct async_helper_ctx),
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200731 .cra_alignmask = 0,
732 .cra_type = &crypto_ablkcipher_type,
733 .cra_module = THIS_MODULE,
734 .cra_list = LIST_HEAD_INIT(serpent_algs[6].cra_list),
Jussi Kivilinna435d3e52012-02-17 22:48:53 +0200735 .cra_init = ablk_init,
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200736 .cra_exit = ablk_exit,
737 .cra_u = {
738 .ablkcipher = {
739 .min_keysize = SERPENT_MIN_KEY_SIZE,
740 .max_keysize = SERPENT_MAX_KEY_SIZE,
741 .ivsize = SERPENT_BLOCK_SIZE,
742 .setkey = ablk_set_key,
743 .encrypt = __ablk_encrypt,
744 .decrypt = ablk_decrypt,
745 },
746 },
747}, {
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200748 .cra_name = "ctr(serpent)",
749 .cra_driver_name = "ctr-serpent-sse2",
750 .cra_priority = 400,
751 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
752 .cra_blocksize = 1,
Jussi Kivilinnaffaf9152012-06-18 14:06:58 +0300753 .cra_ctxsize = sizeof(struct async_helper_ctx),
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200754 .cra_alignmask = 0,
755 .cra_type = &crypto_ablkcipher_type,
756 .cra_module = THIS_MODULE,
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200757 .cra_list = LIST_HEAD_INIT(serpent_algs[7].cra_list),
Jussi Kivilinna435d3e52012-02-17 22:48:53 +0200758 .cra_init = ablk_init,
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200759 .cra_exit = ablk_exit,
760 .cra_u = {
761 .ablkcipher = {
762 .min_keysize = SERPENT_MIN_KEY_SIZE,
763 .max_keysize = SERPENT_MAX_KEY_SIZE,
764 .ivsize = SERPENT_BLOCK_SIZE,
765 .setkey = ablk_set_key,
766 .encrypt = ablk_encrypt,
767 .decrypt = ablk_encrypt,
768 .geniv = "chainiv",
769 },
770 },
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200771}, {
Jussi Kivilinna18482052011-11-09 16:26:36 +0200772 .cra_name = "lrw(serpent)",
773 .cra_driver_name = "lrw-serpent-sse2",
774 .cra_priority = 400,
775 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
776 .cra_blocksize = SERPENT_BLOCK_SIZE,
Jussi Kivilinnaffaf9152012-06-18 14:06:58 +0300777 .cra_ctxsize = sizeof(struct async_helper_ctx),
Jussi Kivilinna18482052011-11-09 16:26:36 +0200778 .cra_alignmask = 0,
779 .cra_type = &crypto_ablkcipher_type,
780 .cra_module = THIS_MODULE,
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200781 .cra_list = LIST_HEAD_INIT(serpent_algs[8].cra_list),
Jussi Kivilinna435d3e52012-02-17 22:48:53 +0200782 .cra_init = ablk_init,
Jussi Kivilinna18482052011-11-09 16:26:36 +0200783 .cra_exit = ablk_exit,
784 .cra_u = {
785 .ablkcipher = {
786 .min_keysize = SERPENT_MIN_KEY_SIZE +
787 SERPENT_BLOCK_SIZE,
788 .max_keysize = SERPENT_MAX_KEY_SIZE +
789 SERPENT_BLOCK_SIZE,
790 .ivsize = SERPENT_BLOCK_SIZE,
791 .setkey = ablk_set_key,
792 .encrypt = ablk_encrypt,
793 .decrypt = ablk_decrypt,
794 },
795 },
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200796}, {
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +0200797 .cra_name = "xts(serpent)",
798 .cra_driver_name = "xts-serpent-sse2",
799 .cra_priority = 400,
800 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
801 .cra_blocksize = SERPENT_BLOCK_SIZE,
Jussi Kivilinnaffaf9152012-06-18 14:06:58 +0300802 .cra_ctxsize = sizeof(struct async_helper_ctx),
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +0200803 .cra_alignmask = 0,
804 .cra_type = &crypto_ablkcipher_type,
805 .cra_module = THIS_MODULE,
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200806 .cra_list = LIST_HEAD_INIT(serpent_algs[9].cra_list),
Jussi Kivilinna435d3e52012-02-17 22:48:53 +0200807 .cra_init = ablk_init,
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +0200808 .cra_exit = ablk_exit,
809 .cra_u = {
810 .ablkcipher = {
811 .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
812 .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
813 .ivsize = SERPENT_BLOCK_SIZE,
814 .setkey = ablk_set_key,
815 .encrypt = ablk_encrypt,
816 .decrypt = ablk_decrypt,
817 },
818 },
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200819} };
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +0200820
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200821static int __init serpent_sse2_init(void)
822{
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200823 if (!cpu_has_xmm2) {
824 printk(KERN_INFO "SSE2 instructions are not detected.\n");
825 return -ENODEV;
826 }
827
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200828 return crypto_register_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200829}
830
831static void __exit serpent_sse2_exit(void)
832{
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200833 crypto_unregister_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200834}
835
836module_init(serpent_sse2_init);
837module_exit(serpent_sse2_exit);
838
839MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
840MODULE_LICENSE("GPL");
841MODULE_ALIAS("serpent");