blob: 8c86239010ae87e41f2ad8d187215f16357ca4b6 [file] [log] [blame]
Jussi Kivilinna937c30d2011-11-09 16:26:25 +02001/*
2 * Glue Code for SSE2 assembler versions of Serpent Cipher
3 *
4 * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
5 *
6 * Glue code based on aesni-intel_glue.c by:
7 * Copyright (C) 2008, Intel Corp.
8 * Author: Huang Ying <ying.huang@intel.com>
9 *
10 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
11 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
12 * CTR part based on code (crypto/ctr.c) by:
13 * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
28 * USA
29 *
30 */
31
32#include <linux/module.h>
33#include <linux/hardirq.h>
34#include <linux/types.h>
35#include <linux/crypto.h>
36#include <linux/err.h>
37#include <crypto/algapi.h>
38#include <crypto/serpent.h>
39#include <crypto/cryptd.h>
40#include <crypto/b128ops.h>
41#include <crypto/ctr.h>
Jussi Kivilinna18482052011-11-09 16:26:36 +020042#include <crypto/lrw.h>
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +020043#include <crypto/xts.h>
Jussi Kivilinna937c30d2011-11-09 16:26:25 +020044#include <asm/i387.h>
Jussi Kivilinna3387e7d2012-06-14 10:09:03 +080045#include <asm/serpent-sse2.h>
Jussi Kivilinnaffaf9152012-06-18 14:06:58 +030046#include <asm/crypto/ablk_helper.h>
Jussi Kivilinna937c30d2011-11-09 16:26:25 +020047#include <crypto/scatterwalk.h>
48#include <linux/workqueue.h>
49#include <linux/spinlock.h>
50
Jussi Kivilinnae81792f2012-06-18 14:07:14 +030051typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src);
52typedef void (*common_glue_cbc_func_t)(void *ctx, u128 *dst, const u128 *src);
53typedef void (*common_glue_ctr_func_t)(void *ctx, u128 *dst, const u128 *src,
54 u128 *iv);
55
56#define GLUE_FUNC_CAST(fn) ((common_glue_func_t)(fn))
57#define GLUE_CBC_FUNC_CAST(fn) ((common_glue_cbc_func_t)(fn))
58#define GLUE_CTR_FUNC_CAST(fn) ((common_glue_ctr_func_t)(fn))
59
60struct common_glue_func_entry {
61 unsigned int num_blocks; /* number of blocks that @fn will process */
62 union {
63 common_glue_func_t ecb;
64 common_glue_cbc_func_t cbc;
65 common_glue_ctr_func_t ctr;
66 } fn_u;
67};
68
69struct common_glue_ctx {
70 unsigned int num_funcs;
71 int fpu_blocks_limit; /* -1 means fpu not needed at all */
72
73 /*
74 * First funcs entry must have largest num_blocks and last funcs entry
75 * must have num_blocks == 1!
76 */
77 struct common_glue_func_entry funcs[];
78};
79
80static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit,
81 struct blkcipher_desc *desc,
82 bool fpu_enabled, unsigned int nbytes)
Jussi Kivilinna937c30d2011-11-09 16:26:25 +020083{
Jussi Kivilinnae81792f2012-06-18 14:07:14 +030084 if (likely(fpu_blocks_limit < 0))
85 return false;
86
Jussi Kivilinna937c30d2011-11-09 16:26:25 +020087 if (fpu_enabled)
88 return true;
89
Jussi Kivilinnae81792f2012-06-18 14:07:14 +030090 /*
91 * Vector-registers are only used when chunk to be processed is large
92 * enough, so do not enable FPU until it is necessary.
Jussi Kivilinna937c30d2011-11-09 16:26:25 +020093 */
Jussi Kivilinnae81792f2012-06-18 14:07:14 +030094 if (nbytes < bsize * (unsigned int)fpu_blocks_limit)
Jussi Kivilinna937c30d2011-11-09 16:26:25 +020095 return false;
96
Jussi Kivilinnae81792f2012-06-18 14:07:14 +030097 if (desc) {
98 /* prevent sleeping if FPU is in use */
99 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
100 }
101
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200102 kernel_fpu_begin();
103 return true;
104}
105
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300106static inline void glue_fpu_end(bool fpu_enabled)
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200107{
108 if (fpu_enabled)
109 kernel_fpu_end();
110}
111
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300112static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
113 struct blkcipher_desc *desc,
114 struct blkcipher_walk *walk)
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200115{
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300116 void *ctx = crypto_blkcipher_ctx(desc->tfm);
117 const unsigned int bsize = 128 / 8;
118 unsigned int nbytes, i, func_bytes;
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200119 bool fpu_enabled = false;
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200120 int err;
121
122 err = blkcipher_walk_virt(desc, walk);
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200123
124 while ((nbytes = walk->nbytes)) {
125 u8 *wsrc = walk->src.virt.addr;
126 u8 *wdst = walk->dst.virt.addr;
127
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300128 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
129 desc, fpu_enabled, nbytes);
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200130
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300131 for (i = 0; i < gctx->num_funcs; i++) {
132 func_bytes = bsize * gctx->funcs[i].num_blocks;
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200133
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300134 /* Process multi-block batch */
135 if (nbytes >= func_bytes) {
136 do {
137 gctx->funcs[i].fn_u.ecb(ctx, wdst,
138 wsrc);
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200139
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300140 wsrc += func_bytes;
141 wdst += func_bytes;
142 nbytes -= func_bytes;
143 } while (nbytes >= func_bytes);
144
145 if (nbytes < bsize)
146 goto done;
147 }
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200148 }
149
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200150done:
151 err = blkcipher_walk_done(desc, walk, nbytes);
152 }
153
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300154 glue_fpu_end(fpu_enabled);
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200155 return err;
156}
157
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300158int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
159 struct blkcipher_desc *desc, struct scatterlist *dst,
160 struct scatterlist *src, unsigned int nbytes)
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200161{
162 struct blkcipher_walk walk;
163
164 blkcipher_walk_init(&walk, dst, src, nbytes);
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300165 return __glue_ecb_crypt_128bit(gctx, desc, &walk);
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200166}
167
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300168static unsigned int __glue_cbc_encrypt_128bit(const common_glue_func_t fn,
169 struct blkcipher_desc *desc,
170 struct blkcipher_walk *walk)
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200171{
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300172 void *ctx = crypto_blkcipher_ctx(desc->tfm);
173 const unsigned int bsize = 128 / 8;
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200174 unsigned int nbytes = walk->nbytes;
175 u128 *src = (u128 *)walk->src.virt.addr;
176 u128 *dst = (u128 *)walk->dst.virt.addr;
177 u128 *iv = (u128 *)walk->iv;
178
179 do {
180 u128_xor(dst, src, iv);
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300181 fn(ctx, (u8 *)dst, (u8 *)dst);
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200182 iv = dst;
183
184 src += 1;
185 dst += 1;
186 nbytes -= bsize;
187 } while (nbytes >= bsize);
188
189 u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv);
190 return nbytes;
191}
192
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300193int glue_cbc_encrypt_128bit(const common_glue_func_t fn,
194 struct blkcipher_desc *desc,
195 struct scatterlist *dst,
196 struct scatterlist *src, unsigned int nbytes)
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200197{
198 struct blkcipher_walk walk;
199 int err;
200
201 blkcipher_walk_init(&walk, dst, src, nbytes);
202 err = blkcipher_walk_virt(desc, &walk);
203
204 while ((nbytes = walk.nbytes)) {
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300205 nbytes = __glue_cbc_encrypt_128bit(fn, desc, &walk);
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200206 err = blkcipher_walk_done(desc, &walk, nbytes);
207 }
208
209 return err;
210}
211
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300212static unsigned int
213__glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
214 struct blkcipher_desc *desc,
215 struct blkcipher_walk *walk)
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200216{
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300217 void *ctx = crypto_blkcipher_ctx(desc->tfm);
218 const unsigned int bsize = 128 / 8;
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200219 unsigned int nbytes = walk->nbytes;
220 u128 *src = (u128 *)walk->src.virt.addr;
221 u128 *dst = (u128 *)walk->dst.virt.addr;
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200222 u128 last_iv;
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300223 unsigned int num_blocks, func_bytes;
224 unsigned int i;
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200225
226 /* Start of the last block. */
227 src += nbytes / bsize - 1;
228 dst += nbytes / bsize - 1;
229
230 last_iv = *src;
231
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300232 for (i = 0; i < gctx->num_funcs; i++) {
233 num_blocks = gctx->funcs[i].num_blocks;
234 func_bytes = bsize * num_blocks;
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200235
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300236 /* Process multi-block batch */
237 if (nbytes >= func_bytes) {
238 do {
239 nbytes -= func_bytes - bsize;
240 src -= num_blocks - 1;
241 dst -= num_blocks - 1;
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200242
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300243 gctx->funcs[i].fn_u.cbc(ctx, dst, src);
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200244
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300245 nbytes -= bsize;
246 if (nbytes < bsize)
247 goto done;
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200248
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300249 u128_xor(dst, dst, src - 1);
250 src -= 1;
251 dst -= 1;
252 } while (nbytes >= func_bytes);
253
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200254 if (nbytes < bsize)
255 goto done;
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300256 }
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200257 }
258
259done:
260 u128_xor(dst, dst, (u128 *)walk->iv);
261 *(u128 *)walk->iv = last_iv;
262
263 return nbytes;
264}
265
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300266int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
267 struct blkcipher_desc *desc,
268 struct scatterlist *dst,
269 struct scatterlist *src, unsigned int nbytes)
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200270{
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300271 const unsigned int bsize = 128 / 8;
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200272 bool fpu_enabled = false;
273 struct blkcipher_walk walk;
274 int err;
275
276 blkcipher_walk_init(&walk, dst, src, nbytes);
277 err = blkcipher_walk_virt(desc, &walk);
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200278
279 while ((nbytes = walk.nbytes)) {
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300280 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
281 desc, fpu_enabled, nbytes);
282 nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200283 err = blkcipher_walk_done(desc, &walk, nbytes);
284 }
285
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300286 glue_fpu_end(fpu_enabled);
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200287 return err;
288}
289
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200290static inline void u128_to_be128(be128 *dst, const u128 *src)
291{
292 dst->a = cpu_to_be64(src->a);
293 dst->b = cpu_to_be64(src->b);
294}
295
296static inline void be128_to_u128(u128 *dst, const be128 *src)
297{
298 dst->a = be64_to_cpu(src->a);
299 dst->b = be64_to_cpu(src->b);
300}
301
302static inline void u128_inc(u128 *i)
303{
304 i->b++;
305 if (!i->b)
306 i->a++;
307}
308
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300309static void glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr,
310 struct blkcipher_desc *desc,
311 struct blkcipher_walk *walk)
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200312{
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300313 void *ctx = crypto_blkcipher_ctx(desc->tfm);
314 u8 *src = (u8 *)walk->src.virt.addr;
315 u8 *dst = (u8 *)walk->dst.virt.addr;
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200316 unsigned int nbytes = walk->nbytes;
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300317 u128 ctrblk;
318 u128 tmp;
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200319
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300320 be128_to_u128(&ctrblk, (be128 *)walk->iv);
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200321
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300322 memcpy(&tmp, src, nbytes);
323 fn_ctr(ctx, &tmp, &tmp, &ctrblk);
324 memcpy(dst, &tmp, nbytes);
325
326 u128_to_be128((be128 *)walk->iv, &ctrblk);
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200327}
328
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300329static unsigned int __glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
330 struct blkcipher_desc *desc,
331 struct blkcipher_walk *walk)
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200332{
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300333 const unsigned int bsize = 128 / 8;
334 void *ctx = crypto_blkcipher_ctx(desc->tfm);
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200335 unsigned int nbytes = walk->nbytes;
336 u128 *src = (u128 *)walk->src.virt.addr;
337 u128 *dst = (u128 *)walk->dst.virt.addr;
338 u128 ctrblk;
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300339 unsigned int num_blocks, func_bytes;
340 unsigned int i;
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200341
342 be128_to_u128(&ctrblk, (be128 *)walk->iv);
343
344 /* Process multi-block batch */
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300345 for (i = 0; i < gctx->num_funcs; i++) {
346 num_blocks = gctx->funcs[i].num_blocks;
347 func_bytes = bsize * num_blocks;
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200348
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300349 if (nbytes >= func_bytes) {
350 do {
351 gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk);
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200352
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300353 src += num_blocks;
354 dst += num_blocks;
355 nbytes -= func_bytes;
356 } while (nbytes >= func_bytes);
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200357
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300358 if (nbytes < bsize)
359 goto done;
360 }
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200361 }
362
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200363done:
364 u128_to_be128((be128 *)walk->iv, &ctrblk);
365 return nbytes;
366}
367
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300368int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
369 struct blkcipher_desc *desc, struct scatterlist *dst,
370 struct scatterlist *src, unsigned int nbytes)
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200371{
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300372 const unsigned int bsize = 128 / 8;
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200373 bool fpu_enabled = false;
374 struct blkcipher_walk walk;
375 int err;
376
377 blkcipher_walk_init(&walk, dst, src, nbytes);
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300378 err = blkcipher_walk_virt_block(desc, &walk, bsize);
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200379
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300380 while ((nbytes = walk.nbytes) >= bsize) {
381 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
382 desc, fpu_enabled, nbytes);
383 nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200384 err = blkcipher_walk_done(desc, &walk, nbytes);
385 }
386
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300387 glue_fpu_end(fpu_enabled);
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200388
389 if (walk.nbytes) {
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300390 glue_ctr_crypt_final_128bit(
391 gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200392 err = blkcipher_walk_done(desc, &walk, 0);
393 }
394
395 return err;
396}
397
Jussi Kivilinnae81792f2012-06-18 14:07:14 +0300398static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
399{
400 u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
401 unsigned int j;
402
403 for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
404 ivs[j] = src[j];
405
406 serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
407
408 for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
409 u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
410}
411
412static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, u128 *iv)
413{
414 be128 ctrblk;
415
416 u128_to_be128(&ctrblk, iv);
417 u128_inc(iv);
418
419 __serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
420 u128_xor(dst, src, (u128 *)&ctrblk);
421}
422
423static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src,
424 u128 *iv)
425{
426 be128 ctrblks[SERPENT_PARALLEL_BLOCKS];
427 unsigned int i;
428
429 for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
430 if (dst != src)
431 dst[i] = src[i];
432
433 u128_to_be128(&ctrblks[i], iv);
434 u128_inc(iv);
435 }
436
437 serpent_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks);
438}
439
440static const struct common_glue_ctx serpent_enc = {
441 .num_funcs = 2,
442 .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
443
444 .funcs = { {
445 .num_blocks = SERPENT_PARALLEL_BLOCKS,
446 .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) }
447 }, {
448 .num_blocks = 1,
449 .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
450 } }
451};
452
453static const struct common_glue_ctx serpent_ctr = {
454 .num_funcs = 2,
455 .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
456
457 .funcs = { {
458 .num_blocks = SERPENT_PARALLEL_BLOCKS,
459 .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) }
460 }, {
461 .num_blocks = 1,
462 .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) }
463 } }
464};
465
466static const struct common_glue_ctx serpent_dec = {
467 .num_funcs = 2,
468 .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
469
470 .funcs = { {
471 .num_blocks = SERPENT_PARALLEL_BLOCKS,
472 .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) }
473 }, {
474 .num_blocks = 1,
475 .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
476 } }
477};
478
479static const struct common_glue_ctx serpent_dec_cbc = {
480 .num_funcs = 2,
481 .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
482
483 .funcs = { {
484 .num_blocks = SERPENT_PARALLEL_BLOCKS,
485 .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) }
486 }, {
487 .num_blocks = 1,
488 .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
489 } }
490};
491
492static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
493 struct scatterlist *src, unsigned int nbytes)
494{
495 return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes);
496}
497
498static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
499 struct scatterlist *src, unsigned int nbytes)
500{
501 return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes);
502}
503
504static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
505 struct scatterlist *src, unsigned int nbytes)
506{
507 return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc,
508 dst, src, nbytes);
509}
510
511static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
512 struct scatterlist *src, unsigned int nbytes)
513{
514 return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src,
515 nbytes);
516}
517
518static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
519 struct scatterlist *src, unsigned int nbytes)
520{
521 return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes);
522}
523
524static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
525{
526 return glue_fpu_begin(SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS,
527 NULL, fpu_enabled, nbytes);
528}
529
530static inline void serpent_fpu_end(bool fpu_enabled)
531{
532 glue_fpu_end(fpu_enabled);
533}
534
Jussi Kivilinna18482052011-11-09 16:26:36 +0200535struct crypt_priv {
536 struct serpent_ctx *ctx;
537 bool fpu_enabled;
538};
539
540static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
541{
542 const unsigned int bsize = SERPENT_BLOCK_SIZE;
543 struct crypt_priv *ctx = priv;
544 int i;
545
546 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
547
548 if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
549 serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst);
550 return;
551 }
552
553 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
554 __serpent_encrypt(ctx->ctx, srcdst, srcdst);
555}
556
557static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
558{
559 const unsigned int bsize = SERPENT_BLOCK_SIZE;
560 struct crypt_priv *ctx = priv;
561 int i;
562
563 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
564
565 if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
566 serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst);
567 return;
568 }
569
570 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
571 __serpent_decrypt(ctx->ctx, srcdst, srcdst);
572}
573
574struct serpent_lrw_ctx {
575 struct lrw_table_ctx lrw_table;
576 struct serpent_ctx serpent_ctx;
577};
578
579static int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
580 unsigned int keylen)
581{
582 struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
583 int err;
584
585 err = __serpent_setkey(&ctx->serpent_ctx, key, keylen -
586 SERPENT_BLOCK_SIZE);
587 if (err)
588 return err;
589
590 return lrw_init_table(&ctx->lrw_table, key + keylen -
591 SERPENT_BLOCK_SIZE);
592}
593
594static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
595 struct scatterlist *src, unsigned int nbytes)
596{
597 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
598 be128 buf[SERPENT_PARALLEL_BLOCKS];
599 struct crypt_priv crypt_ctx = {
600 .ctx = &ctx->serpent_ctx,
601 .fpu_enabled = false,
602 };
603 struct lrw_crypt_req req = {
604 .tbuf = buf,
605 .tbuflen = sizeof(buf),
606
607 .table_ctx = &ctx->lrw_table,
608 .crypt_ctx = &crypt_ctx,
609 .crypt_fn = encrypt_callback,
610 };
611 int ret;
612
Jussi Kivilinnad3564332011-11-09 19:44:12 +0200613 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Jussi Kivilinna18482052011-11-09 16:26:36 +0200614 ret = lrw_crypt(desc, dst, src, nbytes, &req);
615 serpent_fpu_end(crypt_ctx.fpu_enabled);
616
617 return ret;
618}
619
620static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
621 struct scatterlist *src, unsigned int nbytes)
622{
623 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
624 be128 buf[SERPENT_PARALLEL_BLOCKS];
625 struct crypt_priv crypt_ctx = {
626 .ctx = &ctx->serpent_ctx,
627 .fpu_enabled = false,
628 };
629 struct lrw_crypt_req req = {
630 .tbuf = buf,
631 .tbuflen = sizeof(buf),
632
633 .table_ctx = &ctx->lrw_table,
634 .crypt_ctx = &crypt_ctx,
635 .crypt_fn = decrypt_callback,
636 };
637 int ret;
638
Jussi Kivilinnad3564332011-11-09 19:44:12 +0200639 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Jussi Kivilinna18482052011-11-09 16:26:36 +0200640 ret = lrw_crypt(desc, dst, src, nbytes, &req);
641 serpent_fpu_end(crypt_ctx.fpu_enabled);
642
643 return ret;
644}
645
646static void lrw_exit_tfm(struct crypto_tfm *tfm)
647{
648 struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
649
650 lrw_free_table(&ctx->lrw_table);
651}
652
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +0200653struct serpent_xts_ctx {
654 struct serpent_ctx tweak_ctx;
655 struct serpent_ctx crypt_ctx;
656};
657
658static int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
659 unsigned int keylen)
660{
661 struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm);
662 u32 *flags = &tfm->crt_flags;
663 int err;
664
665 /* key consists of keys of equal size concatenated, therefore
666 * the length must be even
667 */
668 if (keylen % 2) {
669 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
670 return -EINVAL;
671 }
672
673 /* first half of xts-key is for crypt */
674 err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
675 if (err)
676 return err;
677
678 /* second half of xts-key is for tweak */
679 return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
680}
681
682static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
683 struct scatterlist *src, unsigned int nbytes)
684{
685 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
686 be128 buf[SERPENT_PARALLEL_BLOCKS];
687 struct crypt_priv crypt_ctx = {
688 .ctx = &ctx->crypt_ctx,
689 .fpu_enabled = false,
690 };
691 struct xts_crypt_req req = {
692 .tbuf = buf,
693 .tbuflen = sizeof(buf),
694
695 .tweak_ctx = &ctx->tweak_ctx,
696 .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
697 .crypt_ctx = &crypt_ctx,
698 .crypt_fn = encrypt_callback,
699 };
700 int ret;
701
Jussi Kivilinnad3564332011-11-09 19:44:12 +0200702 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +0200703 ret = xts_crypt(desc, dst, src, nbytes, &req);
704 serpent_fpu_end(crypt_ctx.fpu_enabled);
705
706 return ret;
707}
708
709static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
710 struct scatterlist *src, unsigned int nbytes)
711{
712 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
713 be128 buf[SERPENT_PARALLEL_BLOCKS];
714 struct crypt_priv crypt_ctx = {
715 .ctx = &ctx->crypt_ctx,
716 .fpu_enabled = false,
717 };
718 struct xts_crypt_req req = {
719 .tbuf = buf,
720 .tbuflen = sizeof(buf),
721
722 .tweak_ctx = &ctx->tweak_ctx,
723 .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
724 .crypt_ctx = &crypt_ctx,
725 .crypt_fn = decrypt_callback,
726 };
727 int ret;
728
Jussi Kivilinnad3564332011-11-09 19:44:12 +0200729 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +0200730 ret = xts_crypt(desc, dst, src, nbytes, &req);
731 serpent_fpu_end(crypt_ctx.fpu_enabled);
732
733 return ret;
734}
735
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200736static struct crypto_alg serpent_algs[10] = { {
737 .cra_name = "__ecb-serpent-sse2",
738 .cra_driver_name = "__driver-ecb-serpent-sse2",
739 .cra_priority = 0,
740 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
741 .cra_blocksize = SERPENT_BLOCK_SIZE,
742 .cra_ctxsize = sizeof(struct serpent_ctx),
743 .cra_alignmask = 0,
744 .cra_type = &crypto_blkcipher_type,
745 .cra_module = THIS_MODULE,
746 .cra_list = LIST_HEAD_INIT(serpent_algs[0].cra_list),
747 .cra_u = {
748 .blkcipher = {
749 .min_keysize = SERPENT_MIN_KEY_SIZE,
750 .max_keysize = SERPENT_MAX_KEY_SIZE,
751 .setkey = serpent_setkey,
752 .encrypt = ecb_encrypt,
753 .decrypt = ecb_decrypt,
754 },
755 },
756}, {
757 .cra_name = "__cbc-serpent-sse2",
758 .cra_driver_name = "__driver-cbc-serpent-sse2",
759 .cra_priority = 0,
760 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
761 .cra_blocksize = SERPENT_BLOCK_SIZE,
762 .cra_ctxsize = sizeof(struct serpent_ctx),
763 .cra_alignmask = 0,
764 .cra_type = &crypto_blkcipher_type,
765 .cra_module = THIS_MODULE,
766 .cra_list = LIST_HEAD_INIT(serpent_algs[1].cra_list),
767 .cra_u = {
768 .blkcipher = {
769 .min_keysize = SERPENT_MIN_KEY_SIZE,
770 .max_keysize = SERPENT_MAX_KEY_SIZE,
771 .setkey = serpent_setkey,
772 .encrypt = cbc_encrypt,
773 .decrypt = cbc_decrypt,
774 },
775 },
776}, {
777 .cra_name = "__ctr-serpent-sse2",
778 .cra_driver_name = "__driver-ctr-serpent-sse2",
779 .cra_priority = 0,
780 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
781 .cra_blocksize = 1,
782 .cra_ctxsize = sizeof(struct serpent_ctx),
783 .cra_alignmask = 0,
784 .cra_type = &crypto_blkcipher_type,
785 .cra_module = THIS_MODULE,
786 .cra_list = LIST_HEAD_INIT(serpent_algs[2].cra_list),
787 .cra_u = {
788 .blkcipher = {
789 .min_keysize = SERPENT_MIN_KEY_SIZE,
790 .max_keysize = SERPENT_MAX_KEY_SIZE,
791 .ivsize = SERPENT_BLOCK_SIZE,
792 .setkey = serpent_setkey,
793 .encrypt = ctr_crypt,
794 .decrypt = ctr_crypt,
795 },
796 },
797}, {
798 .cra_name = "__lrw-serpent-sse2",
799 .cra_driver_name = "__driver-lrw-serpent-sse2",
800 .cra_priority = 0,
801 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
802 .cra_blocksize = SERPENT_BLOCK_SIZE,
803 .cra_ctxsize = sizeof(struct serpent_lrw_ctx),
804 .cra_alignmask = 0,
805 .cra_type = &crypto_blkcipher_type,
806 .cra_module = THIS_MODULE,
807 .cra_list = LIST_HEAD_INIT(serpent_algs[3].cra_list),
808 .cra_exit = lrw_exit_tfm,
809 .cra_u = {
810 .blkcipher = {
811 .min_keysize = SERPENT_MIN_KEY_SIZE +
812 SERPENT_BLOCK_SIZE,
813 .max_keysize = SERPENT_MAX_KEY_SIZE +
814 SERPENT_BLOCK_SIZE,
815 .ivsize = SERPENT_BLOCK_SIZE,
816 .setkey = lrw_serpent_setkey,
817 .encrypt = lrw_encrypt,
818 .decrypt = lrw_decrypt,
819 },
820 },
821}, {
822 .cra_name = "__xts-serpent-sse2",
823 .cra_driver_name = "__driver-xts-serpent-sse2",
824 .cra_priority = 0,
825 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
826 .cra_blocksize = SERPENT_BLOCK_SIZE,
827 .cra_ctxsize = sizeof(struct serpent_xts_ctx),
828 .cra_alignmask = 0,
829 .cra_type = &crypto_blkcipher_type,
830 .cra_module = THIS_MODULE,
831 .cra_list = LIST_HEAD_INIT(serpent_algs[4].cra_list),
832 .cra_u = {
833 .blkcipher = {
834 .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
835 .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
836 .ivsize = SERPENT_BLOCK_SIZE,
837 .setkey = xts_serpent_setkey,
838 .encrypt = xts_encrypt,
839 .decrypt = xts_decrypt,
840 },
841 },
842}, {
843 .cra_name = "ecb(serpent)",
844 .cra_driver_name = "ecb-serpent-sse2",
845 .cra_priority = 400,
846 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
847 .cra_blocksize = SERPENT_BLOCK_SIZE,
Jussi Kivilinnaffaf9152012-06-18 14:06:58 +0300848 .cra_ctxsize = sizeof(struct async_helper_ctx),
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200849 .cra_alignmask = 0,
850 .cra_type = &crypto_ablkcipher_type,
851 .cra_module = THIS_MODULE,
852 .cra_list = LIST_HEAD_INIT(serpent_algs[5].cra_list),
Jussi Kivilinna435d3e52012-02-17 22:48:53 +0200853 .cra_init = ablk_init,
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200854 .cra_exit = ablk_exit,
855 .cra_u = {
856 .ablkcipher = {
857 .min_keysize = SERPENT_MIN_KEY_SIZE,
858 .max_keysize = SERPENT_MAX_KEY_SIZE,
859 .setkey = ablk_set_key,
860 .encrypt = ablk_encrypt,
861 .decrypt = ablk_decrypt,
862 },
863 },
864}, {
865 .cra_name = "cbc(serpent)",
866 .cra_driver_name = "cbc-serpent-sse2",
867 .cra_priority = 400,
868 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
869 .cra_blocksize = SERPENT_BLOCK_SIZE,
Jussi Kivilinnaffaf9152012-06-18 14:06:58 +0300870 .cra_ctxsize = sizeof(struct async_helper_ctx),
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200871 .cra_alignmask = 0,
872 .cra_type = &crypto_ablkcipher_type,
873 .cra_module = THIS_MODULE,
874 .cra_list = LIST_HEAD_INIT(serpent_algs[6].cra_list),
Jussi Kivilinna435d3e52012-02-17 22:48:53 +0200875 .cra_init = ablk_init,
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200876 .cra_exit = ablk_exit,
877 .cra_u = {
878 .ablkcipher = {
879 .min_keysize = SERPENT_MIN_KEY_SIZE,
880 .max_keysize = SERPENT_MAX_KEY_SIZE,
881 .ivsize = SERPENT_BLOCK_SIZE,
882 .setkey = ablk_set_key,
883 .encrypt = __ablk_encrypt,
884 .decrypt = ablk_decrypt,
885 },
886 },
887}, {
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200888 .cra_name = "ctr(serpent)",
889 .cra_driver_name = "ctr-serpent-sse2",
890 .cra_priority = 400,
891 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
892 .cra_blocksize = 1,
Jussi Kivilinnaffaf9152012-06-18 14:06:58 +0300893 .cra_ctxsize = sizeof(struct async_helper_ctx),
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200894 .cra_alignmask = 0,
895 .cra_type = &crypto_ablkcipher_type,
896 .cra_module = THIS_MODULE,
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200897 .cra_list = LIST_HEAD_INIT(serpent_algs[7].cra_list),
Jussi Kivilinna435d3e52012-02-17 22:48:53 +0200898 .cra_init = ablk_init,
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200899 .cra_exit = ablk_exit,
900 .cra_u = {
901 .ablkcipher = {
902 .min_keysize = SERPENT_MIN_KEY_SIZE,
903 .max_keysize = SERPENT_MAX_KEY_SIZE,
904 .ivsize = SERPENT_BLOCK_SIZE,
905 .setkey = ablk_set_key,
906 .encrypt = ablk_encrypt,
907 .decrypt = ablk_encrypt,
908 .geniv = "chainiv",
909 },
910 },
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200911}, {
Jussi Kivilinna18482052011-11-09 16:26:36 +0200912 .cra_name = "lrw(serpent)",
913 .cra_driver_name = "lrw-serpent-sse2",
914 .cra_priority = 400,
915 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
916 .cra_blocksize = SERPENT_BLOCK_SIZE,
Jussi Kivilinnaffaf9152012-06-18 14:06:58 +0300917 .cra_ctxsize = sizeof(struct async_helper_ctx),
Jussi Kivilinna18482052011-11-09 16:26:36 +0200918 .cra_alignmask = 0,
919 .cra_type = &crypto_ablkcipher_type,
920 .cra_module = THIS_MODULE,
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200921 .cra_list = LIST_HEAD_INIT(serpent_algs[8].cra_list),
Jussi Kivilinna435d3e52012-02-17 22:48:53 +0200922 .cra_init = ablk_init,
Jussi Kivilinna18482052011-11-09 16:26:36 +0200923 .cra_exit = ablk_exit,
924 .cra_u = {
925 .ablkcipher = {
926 .min_keysize = SERPENT_MIN_KEY_SIZE +
927 SERPENT_BLOCK_SIZE,
928 .max_keysize = SERPENT_MAX_KEY_SIZE +
929 SERPENT_BLOCK_SIZE,
930 .ivsize = SERPENT_BLOCK_SIZE,
931 .setkey = ablk_set_key,
932 .encrypt = ablk_encrypt,
933 .decrypt = ablk_decrypt,
934 },
935 },
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200936}, {
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +0200937 .cra_name = "xts(serpent)",
938 .cra_driver_name = "xts-serpent-sse2",
939 .cra_priority = 400,
940 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
941 .cra_blocksize = SERPENT_BLOCK_SIZE,
Jussi Kivilinnaffaf9152012-06-18 14:06:58 +0300942 .cra_ctxsize = sizeof(struct async_helper_ctx),
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +0200943 .cra_alignmask = 0,
944 .cra_type = &crypto_ablkcipher_type,
945 .cra_module = THIS_MODULE,
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200946 .cra_list = LIST_HEAD_INIT(serpent_algs[9].cra_list),
Jussi Kivilinna435d3e52012-02-17 22:48:53 +0200947 .cra_init = ablk_init,
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +0200948 .cra_exit = ablk_exit,
949 .cra_u = {
950 .ablkcipher = {
951 .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
952 .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
953 .ivsize = SERPENT_BLOCK_SIZE,
954 .setkey = ablk_set_key,
955 .encrypt = ablk_encrypt,
956 .decrypt = ablk_decrypt,
957 },
958 },
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200959} };
Jussi Kivilinna5962f8b2011-11-09 16:26:41 +0200960
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200961static int __init serpent_sse2_init(void)
962{
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200963 if (!cpu_has_xmm2) {
964 printk(KERN_INFO "SSE2 instructions are not detected.\n");
965 return -ENODEV;
966 }
967
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200968 return crypto_register_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200969}
970
971static void __exit serpent_sse2_exit(void)
972{
Jussi Kivilinna35474c32012-02-17 22:48:37 +0200973 crypto_unregister_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
Jussi Kivilinna937c30d2011-11-09 16:26:25 +0200974}
975
976module_init(serpent_sse2_init);
977module_exit(serpent_sse2_exit);
978
979MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
980MODULE_LICENSE("GPL");
981MODULE_ALIAS("serpent");