blob: 2dffc5ab883e986fdae05af00789c58c5d00df40 [file] [log] [blame]
Jussi Kivilinna937c30d2011-11-09 16:26:25 +02001/*
2 * Glue Code for SSE2 assembler versions of Serpent Cipher
3 *
4 * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
5 *
6 * Glue code based on aesni-intel_glue.c by:
7 * Copyright (C) 2008, Intel Corp.
8 * Author: Huang Ying <ying.huang@intel.com>
9 *
10 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
11 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
12 * CTR part based on code (crypto/ctr.c) by:
13 * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
28 * USA
29 *
30 */
31
32#include <linux/module.h>
33#include <linux/hardirq.h>
34#include <linux/types.h>
35#include <linux/crypto.h>
36#include <linux/err.h>
37#include <crypto/algapi.h>
38#include <crypto/serpent.h>
39#include <crypto/cryptd.h>
40#include <crypto/b128ops.h>
41#include <crypto/ctr.h>
Jussi Kivilinna18482052011-11-09 16:26:36 +020042#include <crypto/lrw.h>
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +020043#include <crypto/xts.h>
Jussi Kivilinna937c30d2011-11-09 16:26:25 +020044#include <asm/i387.h>
45#include <asm/serpent.h>
46#include <crypto/scatterwalk.h>
47#include <linux/workqueue.h>
48#include <linux/spinlock.h>
49
Jussi Kivilinna18482052011-11-09 16:26:36 +020050#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
51#define HAS_LRW
52#endif
53
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +020054#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
55#define HAS_XTS
56#endif
57
Jussi Kivilinna937c30d2011-11-09 16:26:25 +020058struct async_serpent_ctx {
59 struct cryptd_ablkcipher *cryptd_tfm;
60};
61
62static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
63{
64 if (fpu_enabled)
65 return true;
66
67 /* SSE2 is only used when chunk to be processed is large enough, so
68 * do not enable FPU until it is necessary.
69 */
70 if (nbytes < SERPENT_BLOCK_SIZE * SERPENT_PARALLEL_BLOCKS)
71 return false;
72
73 kernel_fpu_begin();
74 return true;
75}
76
77static inline void serpent_fpu_end(bool fpu_enabled)
78{
79 if (fpu_enabled)
80 kernel_fpu_end();
81}
82
83static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
84 bool enc)
85{
86 bool fpu_enabled = false;
87 struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
88 const unsigned int bsize = SERPENT_BLOCK_SIZE;
89 unsigned int nbytes;
90 int err;
91
92 err = blkcipher_walk_virt(desc, walk);
93 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
94
95 while ((nbytes = walk->nbytes)) {
96 u8 *wsrc = walk->src.virt.addr;
97 u8 *wdst = walk->dst.virt.addr;
98
99 fpu_enabled = serpent_fpu_begin(fpu_enabled, nbytes);
100
101 /* Process multi-block batch */
102 if (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS) {
103 do {
104 if (enc)
105 serpent_enc_blk_xway(ctx, wdst, wsrc);
106 else
107 serpent_dec_blk_xway(ctx, wdst, wsrc);
108
109 wsrc += bsize * SERPENT_PARALLEL_BLOCKS;
110 wdst += bsize * SERPENT_PARALLEL_BLOCKS;
111 nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
112 } while (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS);
113
114 if (nbytes < bsize)
115 goto done;
116 }
117
118 /* Handle leftovers */
119 do {
120 if (enc)
121 __serpent_encrypt(ctx, wdst, wsrc);
122 else
123 __serpent_decrypt(ctx, wdst, wsrc);
124
125 wsrc += bsize;
126 wdst += bsize;
127 nbytes -= bsize;
128 } while (nbytes >= bsize);
129
130done:
131 err = blkcipher_walk_done(desc, walk, nbytes);
132 }
133
134 serpent_fpu_end(fpu_enabled);
135 return err;
136}
137
138static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
139 struct scatterlist *src, unsigned int nbytes)
140{
141 struct blkcipher_walk walk;
142
143 blkcipher_walk_init(&walk, dst, src, nbytes);
144 return ecb_crypt(desc, &walk, true);
145}
146
147static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
148 struct scatterlist *src, unsigned int nbytes)
149{
150 struct blkcipher_walk walk;
151
152 blkcipher_walk_init(&walk, dst, src, nbytes);
153 return ecb_crypt(desc, &walk, false);
154}
155
156static struct crypto_alg blk_ecb_alg = {
157 .cra_name = "__ecb-serpent-sse2",
158 .cra_driver_name = "__driver-ecb-serpent-sse2",
159 .cra_priority = 0,
160 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
161 .cra_blocksize = SERPENT_BLOCK_SIZE,
162 .cra_ctxsize = sizeof(struct serpent_ctx),
163 .cra_alignmask = 0,
164 .cra_type = &crypto_blkcipher_type,
165 .cra_module = THIS_MODULE,
166 .cra_list = LIST_HEAD_INIT(blk_ecb_alg.cra_list),
167 .cra_u = {
168 .blkcipher = {
169 .min_keysize = SERPENT_MIN_KEY_SIZE,
170 .max_keysize = SERPENT_MAX_KEY_SIZE,
171 .setkey = serpent_setkey,
172 .encrypt = ecb_encrypt,
173 .decrypt = ecb_decrypt,
174 },
175 },
176};
177
178static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
179 struct blkcipher_walk *walk)
180{
181 struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
182 const unsigned int bsize = SERPENT_BLOCK_SIZE;
183 unsigned int nbytes = walk->nbytes;
184 u128 *src = (u128 *)walk->src.virt.addr;
185 u128 *dst = (u128 *)walk->dst.virt.addr;
186 u128 *iv = (u128 *)walk->iv;
187
188 do {
189 u128_xor(dst, src, iv);
190 __serpent_encrypt(ctx, (u8 *)dst, (u8 *)dst);
191 iv = dst;
192
193 src += 1;
194 dst += 1;
195 nbytes -= bsize;
196 } while (nbytes >= bsize);
197
198 u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv);
199 return nbytes;
200}
201
202static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
203 struct scatterlist *src, unsigned int nbytes)
204{
205 struct blkcipher_walk walk;
206 int err;
207
208 blkcipher_walk_init(&walk, dst, src, nbytes);
209 err = blkcipher_walk_virt(desc, &walk);
210
211 while ((nbytes = walk.nbytes)) {
212 nbytes = __cbc_encrypt(desc, &walk);
213 err = blkcipher_walk_done(desc, &walk, nbytes);
214 }
215
216 return err;
217}
218
219static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
220 struct blkcipher_walk *walk)
221{
222 struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
223 const unsigned int bsize = SERPENT_BLOCK_SIZE;
224 unsigned int nbytes = walk->nbytes;
225 u128 *src = (u128 *)walk->src.virt.addr;
226 u128 *dst = (u128 *)walk->dst.virt.addr;
227 u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
228 u128 last_iv;
229 int i;
230
231 /* Start of the last block. */
232 src += nbytes / bsize - 1;
233 dst += nbytes / bsize - 1;
234
235 last_iv = *src;
236
237 /* Process multi-block batch */
238 if (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS) {
239 do {
240 nbytes -= bsize * (SERPENT_PARALLEL_BLOCKS - 1);
241 src -= SERPENT_PARALLEL_BLOCKS - 1;
242 dst -= SERPENT_PARALLEL_BLOCKS - 1;
243
244 for (i = 0; i < SERPENT_PARALLEL_BLOCKS - 1; i++)
245 ivs[i] = src[i];
246
247 serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
248
249 for (i = 0; i < SERPENT_PARALLEL_BLOCKS - 1; i++)
250 u128_xor(dst + (i + 1), dst + (i + 1), ivs + i);
251
252 nbytes -= bsize;
253 if (nbytes < bsize)
254 goto done;
255
256 u128_xor(dst, dst, src - 1);
257 src -= 1;
258 dst -= 1;
259 } while (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS);
260
261 if (nbytes < bsize)
262 goto done;
263 }
264
265 /* Handle leftovers */
266 for (;;) {
267 __serpent_decrypt(ctx, (u8 *)dst, (u8 *)src);
268
269 nbytes -= bsize;
270 if (nbytes < bsize)
271 break;
272
273 u128_xor(dst, dst, src - 1);
274 src -= 1;
275 dst -= 1;
276 }
277
278done:
279 u128_xor(dst, dst, (u128 *)walk->iv);
280 *(u128 *)walk->iv = last_iv;
281
282 return nbytes;
283}
284
285static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
286 struct scatterlist *src, unsigned int nbytes)
287{
288 bool fpu_enabled = false;
289 struct blkcipher_walk walk;
290 int err;
291
292 blkcipher_walk_init(&walk, dst, src, nbytes);
293 err = blkcipher_walk_virt(desc, &walk);
294 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
295
296 while ((nbytes = walk.nbytes)) {
297 fpu_enabled = serpent_fpu_begin(fpu_enabled, nbytes);
298 nbytes = __cbc_decrypt(desc, &walk);
299 err = blkcipher_walk_done(desc, &walk, nbytes);
300 }
301
302 serpent_fpu_end(fpu_enabled);
303 return err;
304}
305
306static struct crypto_alg blk_cbc_alg = {
307 .cra_name = "__cbc-serpent-sse2",
308 .cra_driver_name = "__driver-cbc-serpent-sse2",
309 .cra_priority = 0,
310 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
311 .cra_blocksize = SERPENT_BLOCK_SIZE,
312 .cra_ctxsize = sizeof(struct serpent_ctx),
313 .cra_alignmask = 0,
314 .cra_type = &crypto_blkcipher_type,
315 .cra_module = THIS_MODULE,
316 .cra_list = LIST_HEAD_INIT(blk_cbc_alg.cra_list),
317 .cra_u = {
318 .blkcipher = {
319 .min_keysize = SERPENT_MIN_KEY_SIZE,
320 .max_keysize = SERPENT_MAX_KEY_SIZE,
321 .setkey = serpent_setkey,
322 .encrypt = cbc_encrypt,
323 .decrypt = cbc_decrypt,
324 },
325 },
326};
327
328static inline void u128_to_be128(be128 *dst, const u128 *src)
329{
330 dst->a = cpu_to_be64(src->a);
331 dst->b = cpu_to_be64(src->b);
332}
333
334static inline void be128_to_u128(u128 *dst, const be128 *src)
335{
336 dst->a = be64_to_cpu(src->a);
337 dst->b = be64_to_cpu(src->b);
338}
339
340static inline void u128_inc(u128 *i)
341{
342 i->b++;
343 if (!i->b)
344 i->a++;
345}
346
347static void ctr_crypt_final(struct blkcipher_desc *desc,
348 struct blkcipher_walk *walk)
349{
350 struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
351 u8 *ctrblk = walk->iv;
352 u8 keystream[SERPENT_BLOCK_SIZE];
353 u8 *src = walk->src.virt.addr;
354 u8 *dst = walk->dst.virt.addr;
355 unsigned int nbytes = walk->nbytes;
356
357 __serpent_encrypt(ctx, keystream, ctrblk);
358 crypto_xor(keystream, src, nbytes);
359 memcpy(dst, keystream, nbytes);
360
361 crypto_inc(ctrblk, SERPENT_BLOCK_SIZE);
362}
363
364static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
365 struct blkcipher_walk *walk)
366{
367 struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
368 const unsigned int bsize = SERPENT_BLOCK_SIZE;
369 unsigned int nbytes = walk->nbytes;
370 u128 *src = (u128 *)walk->src.virt.addr;
371 u128 *dst = (u128 *)walk->dst.virt.addr;
372 u128 ctrblk;
373 be128 ctrblocks[SERPENT_PARALLEL_BLOCKS];
374 int i;
375
376 be128_to_u128(&ctrblk, (be128 *)walk->iv);
377
378 /* Process multi-block batch */
379 if (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS) {
380 do {
381 /* create ctrblks for parallel encrypt */
382 for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
383 if (dst != src)
384 dst[i] = src[i];
385
386 u128_to_be128(&ctrblocks[i], &ctrblk);
387 u128_inc(&ctrblk);
388 }
389
390 serpent_enc_blk_xway_xor(ctx, (u8 *)dst,
391 (u8 *)ctrblocks);
392
393 src += SERPENT_PARALLEL_BLOCKS;
394 dst += SERPENT_PARALLEL_BLOCKS;
395 nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
396 } while (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS);
397
398 if (nbytes < bsize)
399 goto done;
400 }
401
402 /* Handle leftovers */
403 do {
404 if (dst != src)
405 *dst = *src;
406
407 u128_to_be128(&ctrblocks[0], &ctrblk);
408 u128_inc(&ctrblk);
409
410 __serpent_encrypt(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks);
411 u128_xor(dst, dst, (u128 *)ctrblocks);
412
413 src += 1;
414 dst += 1;
415 nbytes -= bsize;
416 } while (nbytes >= bsize);
417
418done:
419 u128_to_be128((be128 *)walk->iv, &ctrblk);
420 return nbytes;
421}
422
423static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
424 struct scatterlist *src, unsigned int nbytes)
425{
426 bool fpu_enabled = false;
427 struct blkcipher_walk walk;
428 int err;
429
430 blkcipher_walk_init(&walk, dst, src, nbytes);
431 err = blkcipher_walk_virt_block(desc, &walk, SERPENT_BLOCK_SIZE);
432 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
433
434 while ((nbytes = walk.nbytes) >= SERPENT_BLOCK_SIZE) {
435 fpu_enabled = serpent_fpu_begin(fpu_enabled, nbytes);
436 nbytes = __ctr_crypt(desc, &walk);
437 err = blkcipher_walk_done(desc, &walk, nbytes);
438 }
439
440 serpent_fpu_end(fpu_enabled);
441
442 if (walk.nbytes) {
443 ctr_crypt_final(desc, &walk);
444 err = blkcipher_walk_done(desc, &walk, 0);
445 }
446
447 return err;
448}
449
450static struct crypto_alg blk_ctr_alg = {
451 .cra_name = "__ctr-serpent-sse2",
452 .cra_driver_name = "__driver-ctr-serpent-sse2",
453 .cra_priority = 0,
454 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
455 .cra_blocksize = 1,
456 .cra_ctxsize = sizeof(struct serpent_ctx),
457 .cra_alignmask = 0,
458 .cra_type = &crypto_blkcipher_type,
459 .cra_module = THIS_MODULE,
460 .cra_list = LIST_HEAD_INIT(blk_ctr_alg.cra_list),
461 .cra_u = {
462 .blkcipher = {
463 .min_keysize = SERPENT_MIN_KEY_SIZE,
464 .max_keysize = SERPENT_MAX_KEY_SIZE,
465 .ivsize = SERPENT_BLOCK_SIZE,
466 .setkey = serpent_setkey,
467 .encrypt = ctr_crypt,
468 .decrypt = ctr_crypt,
469 },
470 },
471};
472
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +0200473#if defined(HAS_LRW) || defined(HAS_XTS)
Jussi Kivilinna18482052011-11-09 16:26:36 +0200474
475struct crypt_priv {
476 struct serpent_ctx *ctx;
477 bool fpu_enabled;
478};
479
480static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
481{
482 const unsigned int bsize = SERPENT_BLOCK_SIZE;
483 struct crypt_priv *ctx = priv;
484 int i;
485
486 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
487
488 if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
489 serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst);
490 return;
491 }
492
493 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
494 __serpent_encrypt(ctx->ctx, srcdst, srcdst);
495}
496
497static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
498{
499 const unsigned int bsize = SERPENT_BLOCK_SIZE;
500 struct crypt_priv *ctx = priv;
501 int i;
502
503 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
504
505 if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
506 serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst);
507 return;
508 }
509
510 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
511 __serpent_decrypt(ctx->ctx, srcdst, srcdst);
512}
513
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +0200514#endif
515
516#ifdef HAS_LRW
517
Jussi Kivilinna18482052011-11-09 16:26:36 +0200518struct serpent_lrw_ctx {
519 struct lrw_table_ctx lrw_table;
520 struct serpent_ctx serpent_ctx;
521};
522
523static int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
524 unsigned int keylen)
525{
526 struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
527 int err;
528
529 err = __serpent_setkey(&ctx->serpent_ctx, key, keylen -
530 SERPENT_BLOCK_SIZE);
531 if (err)
532 return err;
533
534 return lrw_init_table(&ctx->lrw_table, key + keylen -
535 SERPENT_BLOCK_SIZE);
536}
537
538static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
539 struct scatterlist *src, unsigned int nbytes)
540{
541 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
542 be128 buf[SERPENT_PARALLEL_BLOCKS];
543 struct crypt_priv crypt_ctx = {
544 .ctx = &ctx->serpent_ctx,
545 .fpu_enabled = false,
546 };
547 struct lrw_crypt_req req = {
548 .tbuf = buf,
549 .tbuflen = sizeof(buf),
550
551 .table_ctx = &ctx->lrw_table,
552 .crypt_ctx = &crypt_ctx,
553 .crypt_fn = encrypt_callback,
554 };
555 int ret;
556
557 ret = lrw_crypt(desc, dst, src, nbytes, &req);
558 serpent_fpu_end(crypt_ctx.fpu_enabled);
559
560 return ret;
561}
562
563static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
564 struct scatterlist *src, unsigned int nbytes)
565{
566 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
567 be128 buf[SERPENT_PARALLEL_BLOCKS];
568 struct crypt_priv crypt_ctx = {
569 .ctx = &ctx->serpent_ctx,
570 .fpu_enabled = false,
571 };
572 struct lrw_crypt_req req = {
573 .tbuf = buf,
574 .tbuflen = sizeof(buf),
575
576 .table_ctx = &ctx->lrw_table,
577 .crypt_ctx = &crypt_ctx,
578 .crypt_fn = decrypt_callback,
579 };
580 int ret;
581
582 ret = lrw_crypt(desc, dst, src, nbytes, &req);
583 serpent_fpu_end(crypt_ctx.fpu_enabled);
584
585 return ret;
586}
587
588static void lrw_exit_tfm(struct crypto_tfm *tfm)
589{
590 struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
591
592 lrw_free_table(&ctx->lrw_table);
593}
594
595static struct crypto_alg blk_lrw_alg = {
596 .cra_name = "__lrw-serpent-sse2",
597 .cra_driver_name = "__driver-lrw-serpent-sse2",
598 .cra_priority = 0,
599 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
600 .cra_blocksize = SERPENT_BLOCK_SIZE,
601 .cra_ctxsize = sizeof(struct serpent_lrw_ctx),
602 .cra_alignmask = 0,
603 .cra_type = &crypto_blkcipher_type,
604 .cra_module = THIS_MODULE,
605 .cra_list = LIST_HEAD_INIT(blk_lrw_alg.cra_list),
606 .cra_exit = lrw_exit_tfm,
607 .cra_u = {
608 .blkcipher = {
609 .min_keysize = SERPENT_MIN_KEY_SIZE +
610 SERPENT_BLOCK_SIZE,
611 .max_keysize = SERPENT_MAX_KEY_SIZE +
612 SERPENT_BLOCK_SIZE,
613 .ivsize = SERPENT_BLOCK_SIZE,
614 .setkey = lrw_serpent_setkey,
615 .encrypt = lrw_encrypt,
616 .decrypt = lrw_decrypt,
617 },
618 },
619};
620
621#endif
622
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +0200623#ifdef HAS_XTS
624
625struct serpent_xts_ctx {
626 struct serpent_ctx tweak_ctx;
627 struct serpent_ctx crypt_ctx;
628};
629
630static int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
631 unsigned int keylen)
632{
633 struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm);
634 u32 *flags = &tfm->crt_flags;
635 int err;
636
637 /* key consists of keys of equal size concatenated, therefore
638 * the length must be even
639 */
640 if (keylen % 2) {
641 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
642 return -EINVAL;
643 }
644
645 /* first half of xts-key is for crypt */
646 err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
647 if (err)
648 return err;
649
650 /* second half of xts-key is for tweak */
651 return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
652}
653
654static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
655 struct scatterlist *src, unsigned int nbytes)
656{
657 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
658 be128 buf[SERPENT_PARALLEL_BLOCKS];
659 struct crypt_priv crypt_ctx = {
660 .ctx = &ctx->crypt_ctx,
661 .fpu_enabled = false,
662 };
663 struct xts_crypt_req req = {
664 .tbuf = buf,
665 .tbuflen = sizeof(buf),
666
667 .tweak_ctx = &ctx->tweak_ctx,
668 .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
669 .crypt_ctx = &crypt_ctx,
670 .crypt_fn = encrypt_callback,
671 };
672 int ret;
673
674 ret = xts_crypt(desc, dst, src, nbytes, &req);
675 serpent_fpu_end(crypt_ctx.fpu_enabled);
676
677 return ret;
678}
679
680static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
681 struct scatterlist *src, unsigned int nbytes)
682{
683 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
684 be128 buf[SERPENT_PARALLEL_BLOCKS];
685 struct crypt_priv crypt_ctx = {
686 .ctx = &ctx->crypt_ctx,
687 .fpu_enabled = false,
688 };
689 struct xts_crypt_req req = {
690 .tbuf = buf,
691 .tbuflen = sizeof(buf),
692
693 .tweak_ctx = &ctx->tweak_ctx,
694 .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
695 .crypt_ctx = &crypt_ctx,
696 .crypt_fn = decrypt_callback,
697 };
698 int ret;
699
700 ret = xts_crypt(desc, dst, src, nbytes, &req);
701 serpent_fpu_end(crypt_ctx.fpu_enabled);
702
703 return ret;
704}
705
706static struct crypto_alg blk_xts_alg = {
707 .cra_name = "__xts-serpent-sse2",
708 .cra_driver_name = "__driver-xts-serpent-sse2",
709 .cra_priority = 0,
710 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
711 .cra_blocksize = SERPENT_BLOCK_SIZE,
712 .cra_ctxsize = sizeof(struct serpent_xts_ctx),
713 .cra_alignmask = 0,
714 .cra_type = &crypto_blkcipher_type,
715 .cra_module = THIS_MODULE,
716 .cra_list = LIST_HEAD_INIT(blk_xts_alg.cra_list),
717 .cra_u = {
718 .blkcipher = {
719 .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
720 .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
721 .ivsize = SERPENT_BLOCK_SIZE,
722 .setkey = xts_serpent_setkey,
723 .encrypt = xts_encrypt,
724 .decrypt = xts_decrypt,
725 },
726 },
727};
728
729#endif
730
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200731static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
732 unsigned int key_len)
733{
734 struct async_serpent_ctx *ctx = crypto_ablkcipher_ctx(tfm);
735 struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
736 int err;
737
738 crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
739 crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
740 & CRYPTO_TFM_REQ_MASK);
741 err = crypto_ablkcipher_setkey(child, key, key_len);
742 crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
743 & CRYPTO_TFM_RES_MASK);
744 return err;
745}
746
747static int __ablk_encrypt(struct ablkcipher_request *req)
748{
749 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
750 struct async_serpent_ctx *ctx = crypto_ablkcipher_ctx(tfm);
751 struct blkcipher_desc desc;
752
753 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
754 desc.info = req->info;
755 desc.flags = 0;
756
757 return crypto_blkcipher_crt(desc.tfm)->encrypt(
758 &desc, req->dst, req->src, req->nbytes);
759}
760
761static int ablk_encrypt(struct ablkcipher_request *req)
762{
763 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
764 struct async_serpent_ctx *ctx = crypto_ablkcipher_ctx(tfm);
765
766 if (!irq_fpu_usable()) {
767 struct ablkcipher_request *cryptd_req =
768 ablkcipher_request_ctx(req);
769
770 memcpy(cryptd_req, req, sizeof(*req));
771 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
772
773 return crypto_ablkcipher_encrypt(cryptd_req);
774 } else {
775 return __ablk_encrypt(req);
776 }
777}
778
779static int ablk_decrypt(struct ablkcipher_request *req)
780{
781 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
782 struct async_serpent_ctx *ctx = crypto_ablkcipher_ctx(tfm);
783
784 if (!irq_fpu_usable()) {
785 struct ablkcipher_request *cryptd_req =
786 ablkcipher_request_ctx(req);
787
788 memcpy(cryptd_req, req, sizeof(*req));
789 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
790
791 return crypto_ablkcipher_decrypt(cryptd_req);
792 } else {
793 struct blkcipher_desc desc;
794
795 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
796 desc.info = req->info;
797 desc.flags = 0;
798
799 return crypto_blkcipher_crt(desc.tfm)->decrypt(
800 &desc, req->dst, req->src, req->nbytes);
801 }
802}
803
804static void ablk_exit(struct crypto_tfm *tfm)
805{
806 struct async_serpent_ctx *ctx = crypto_tfm_ctx(tfm);
807
808 cryptd_free_ablkcipher(ctx->cryptd_tfm);
809}
810
811static void ablk_init_common(struct crypto_tfm *tfm,
812 struct cryptd_ablkcipher *cryptd_tfm)
813{
814 struct async_serpent_ctx *ctx = crypto_tfm_ctx(tfm);
815
816 ctx->cryptd_tfm = cryptd_tfm;
817 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
818 crypto_ablkcipher_reqsize(&cryptd_tfm->base);
819}
820
821static int ablk_ecb_init(struct crypto_tfm *tfm)
822{
823 struct cryptd_ablkcipher *cryptd_tfm;
824
825 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-serpent-sse2", 0, 0);
826 if (IS_ERR(cryptd_tfm))
827 return PTR_ERR(cryptd_tfm);
828 ablk_init_common(tfm, cryptd_tfm);
829 return 0;
830}
831
832static struct crypto_alg ablk_ecb_alg = {
833 .cra_name = "ecb(serpent)",
834 .cra_driver_name = "ecb-serpent-sse2",
835 .cra_priority = 400,
836 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
837 .cra_blocksize = SERPENT_BLOCK_SIZE,
838 .cra_ctxsize = sizeof(struct async_serpent_ctx),
839 .cra_alignmask = 0,
840 .cra_type = &crypto_ablkcipher_type,
841 .cra_module = THIS_MODULE,
842 .cra_list = LIST_HEAD_INIT(ablk_ecb_alg.cra_list),
843 .cra_init = ablk_ecb_init,
844 .cra_exit = ablk_exit,
845 .cra_u = {
846 .ablkcipher = {
847 .min_keysize = SERPENT_MIN_KEY_SIZE,
848 .max_keysize = SERPENT_MAX_KEY_SIZE,
849 .setkey = ablk_set_key,
850 .encrypt = ablk_encrypt,
851 .decrypt = ablk_decrypt,
852 },
853 },
854};
855
856static int ablk_cbc_init(struct crypto_tfm *tfm)
857{
858 struct cryptd_ablkcipher *cryptd_tfm;
859
860 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-serpent-sse2", 0, 0);
861 if (IS_ERR(cryptd_tfm))
862 return PTR_ERR(cryptd_tfm);
863 ablk_init_common(tfm, cryptd_tfm);
864 return 0;
865}
866
867static struct crypto_alg ablk_cbc_alg = {
868 .cra_name = "cbc(serpent)",
869 .cra_driver_name = "cbc-serpent-sse2",
870 .cra_priority = 400,
871 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
872 .cra_blocksize = SERPENT_BLOCK_SIZE,
873 .cra_ctxsize = sizeof(struct async_serpent_ctx),
874 .cra_alignmask = 0,
875 .cra_type = &crypto_ablkcipher_type,
876 .cra_module = THIS_MODULE,
877 .cra_list = LIST_HEAD_INIT(ablk_cbc_alg.cra_list),
878 .cra_init = ablk_cbc_init,
879 .cra_exit = ablk_exit,
880 .cra_u = {
881 .ablkcipher = {
882 .min_keysize = SERPENT_MIN_KEY_SIZE,
883 .max_keysize = SERPENT_MAX_KEY_SIZE,
884 .ivsize = SERPENT_BLOCK_SIZE,
885 .setkey = ablk_set_key,
886 .encrypt = __ablk_encrypt,
887 .decrypt = ablk_decrypt,
888 },
889 },
890};
891
892static int ablk_ctr_init(struct crypto_tfm *tfm)
893{
894 struct cryptd_ablkcipher *cryptd_tfm;
895
896 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-serpent-sse2", 0, 0);
897 if (IS_ERR(cryptd_tfm))
898 return PTR_ERR(cryptd_tfm);
899 ablk_init_common(tfm, cryptd_tfm);
900 return 0;
901}
902
903static struct crypto_alg ablk_ctr_alg = {
904 .cra_name = "ctr(serpent)",
905 .cra_driver_name = "ctr-serpent-sse2",
906 .cra_priority = 400,
907 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
908 .cra_blocksize = 1,
909 .cra_ctxsize = sizeof(struct async_serpent_ctx),
910 .cra_alignmask = 0,
911 .cra_type = &crypto_ablkcipher_type,
912 .cra_module = THIS_MODULE,
913 .cra_list = LIST_HEAD_INIT(ablk_ctr_alg.cra_list),
914 .cra_init = ablk_ctr_init,
915 .cra_exit = ablk_exit,
916 .cra_u = {
917 .ablkcipher = {
918 .min_keysize = SERPENT_MIN_KEY_SIZE,
919 .max_keysize = SERPENT_MAX_KEY_SIZE,
920 .ivsize = SERPENT_BLOCK_SIZE,
921 .setkey = ablk_set_key,
922 .encrypt = ablk_encrypt,
923 .decrypt = ablk_encrypt,
924 .geniv = "chainiv",
925 },
926 },
927};
928
Jussi Kivilinna18482052011-11-09 16:26:36 +0200929#ifdef HAS_LRW
930
931static int ablk_lrw_init(struct crypto_tfm *tfm)
932{
933 struct cryptd_ablkcipher *cryptd_tfm;
934
935 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-lrw-serpent-sse2", 0, 0);
936 if (IS_ERR(cryptd_tfm))
937 return PTR_ERR(cryptd_tfm);
938 ablk_init_common(tfm, cryptd_tfm);
939 return 0;
940}
941
942static struct crypto_alg ablk_lrw_alg = {
943 .cra_name = "lrw(serpent)",
944 .cra_driver_name = "lrw-serpent-sse2",
945 .cra_priority = 400,
946 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
947 .cra_blocksize = SERPENT_BLOCK_SIZE,
948 .cra_ctxsize = sizeof(struct async_serpent_ctx),
949 .cra_alignmask = 0,
950 .cra_type = &crypto_ablkcipher_type,
951 .cra_module = THIS_MODULE,
952 .cra_list = LIST_HEAD_INIT(ablk_lrw_alg.cra_list),
953 .cra_init = ablk_lrw_init,
954 .cra_exit = ablk_exit,
955 .cra_u = {
956 .ablkcipher = {
957 .min_keysize = SERPENT_MIN_KEY_SIZE +
958 SERPENT_BLOCK_SIZE,
959 .max_keysize = SERPENT_MAX_KEY_SIZE +
960 SERPENT_BLOCK_SIZE,
961 .ivsize = SERPENT_BLOCK_SIZE,
962 .setkey = ablk_set_key,
963 .encrypt = ablk_encrypt,
964 .decrypt = ablk_decrypt,
965 },
966 },
967};
968
969#endif
970
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +0200971#ifdef HAS_XTS
972
973static int ablk_xts_init(struct crypto_tfm *tfm)
974{
975 struct cryptd_ablkcipher *cryptd_tfm;
976
977 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-xts-serpent-sse2", 0, 0);
978 if (IS_ERR(cryptd_tfm))
979 return PTR_ERR(cryptd_tfm);
980 ablk_init_common(tfm, cryptd_tfm);
981 return 0;
982}
983
984static struct crypto_alg ablk_xts_alg = {
985 .cra_name = "xts(serpent)",
986 .cra_driver_name = "xts-serpent-sse2",
987 .cra_priority = 400,
988 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
989 .cra_blocksize = SERPENT_BLOCK_SIZE,
990 .cra_ctxsize = sizeof(struct async_serpent_ctx),
991 .cra_alignmask = 0,
992 .cra_type = &crypto_ablkcipher_type,
993 .cra_module = THIS_MODULE,
994 .cra_list = LIST_HEAD_INIT(ablk_xts_alg.cra_list),
995 .cra_init = ablk_xts_init,
996 .cra_exit = ablk_exit,
997 .cra_u = {
998 .ablkcipher = {
999 .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
1000 .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
1001 .ivsize = SERPENT_BLOCK_SIZE,
1002 .setkey = ablk_set_key,
1003 .encrypt = ablk_encrypt,
1004 .decrypt = ablk_decrypt,
1005 },
1006 },
1007};
1008
1009#endif
1010
Jussi Kivilinna937c30d2011-11-09 16:26:25 +02001011static int __init serpent_sse2_init(void)
1012{
1013 int err;
1014
1015 if (!cpu_has_xmm2) {
1016 printk(KERN_INFO "SSE2 instructions are not detected.\n");
1017 return -ENODEV;
1018 }
1019
1020 err = crypto_register_alg(&blk_ecb_alg);
1021 if (err)
1022 goto blk_ecb_err;
1023 err = crypto_register_alg(&blk_cbc_alg);
1024 if (err)
1025 goto blk_cbc_err;
1026 err = crypto_register_alg(&blk_ctr_alg);
1027 if (err)
1028 goto blk_ctr_err;
1029 err = crypto_register_alg(&ablk_ecb_alg);
1030 if (err)
1031 goto ablk_ecb_err;
1032 err = crypto_register_alg(&ablk_cbc_alg);
1033 if (err)
1034 goto ablk_cbc_err;
1035 err = crypto_register_alg(&ablk_ctr_alg);
1036 if (err)
1037 goto ablk_ctr_err;
Jussi Kivilinna18482052011-11-09 16:26:36 +02001038#ifdef HAS_LRW
1039 err = crypto_register_alg(&blk_lrw_alg);
1040 if (err)
1041 goto blk_lrw_err;
1042 err = crypto_register_alg(&ablk_lrw_alg);
1043 if (err)
1044 goto ablk_lrw_err;
1045#endif
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +02001046#ifdef HAS_XTS
1047 err = crypto_register_alg(&blk_xts_alg);
1048 if (err)
1049 goto blk_xts_err;
1050 err = crypto_register_alg(&ablk_xts_alg);
1051 if (err)
1052 goto ablk_xts_err;
1053#endif
Jussi Kivilinna937c30d2011-11-09 16:26:25 +02001054 return err;
1055
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +02001056#ifdef HAS_XTS
1057 crypto_unregister_alg(&ablk_xts_alg);
1058ablk_xts_err:
1059 crypto_unregister_alg(&blk_xts_alg);
1060blk_xts_err:
1061#endif
Jussi Kivilinna18482052011-11-09 16:26:36 +02001062#ifdef HAS_LRW
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +02001063 crypto_unregister_alg(&ablk_lrw_alg);
Jussi Kivilinna18482052011-11-09 16:26:36 +02001064ablk_lrw_err:
1065 crypto_unregister_alg(&blk_lrw_alg);
1066blk_lrw_err:
Jussi Kivilinna18482052011-11-09 16:26:36 +02001067#endif
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +02001068 crypto_unregister_alg(&ablk_ctr_alg);
Jussi Kivilinna937c30d2011-11-09 16:26:25 +02001069ablk_ctr_err:
1070 crypto_unregister_alg(&ablk_cbc_alg);
1071ablk_cbc_err:
1072 crypto_unregister_alg(&ablk_ecb_alg);
1073ablk_ecb_err:
1074 crypto_unregister_alg(&blk_ctr_alg);
1075blk_ctr_err:
1076 crypto_unregister_alg(&blk_cbc_alg);
1077blk_cbc_err:
1078 crypto_unregister_alg(&blk_ecb_alg);
1079blk_ecb_err:
1080 return err;
1081}
1082
1083static void __exit serpent_sse2_exit(void)
1084{
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +02001085#ifdef HAS_XTS
1086 crypto_unregister_alg(&ablk_xts_alg);
1087 crypto_unregister_alg(&blk_xts_alg);
1088#endif
Jussi Kivilinna18482052011-11-09 16:26:36 +02001089#ifdef HAS_LRW
1090 crypto_unregister_alg(&ablk_lrw_alg);
1091 crypto_unregister_alg(&blk_lrw_alg);
1092#endif
Jussi Kivilinna937c30d2011-11-09 16:26:25 +02001093 crypto_unregister_alg(&ablk_ctr_alg);
1094 crypto_unregister_alg(&ablk_cbc_alg);
1095 crypto_unregister_alg(&ablk_ecb_alg);
1096 crypto_unregister_alg(&blk_ctr_alg);
1097 crypto_unregister_alg(&blk_cbc_alg);
1098 crypto_unregister_alg(&blk_ecb_alg);
1099}
1100
1101module_init(serpent_sse2_init);
1102module_exit(serpent_sse2_exit);
1103
1104MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
1105MODULE_LICENSE("GPL");
1106MODULE_ALIAS("serpent");