blob: be829f6afc8e5bbcf6fabec3a0135740b1cb966c [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Rik Snel64470f12006-11-26 09:43:10 +11002/* LRW: as defined by Cyril Guyot in
3 * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf
4 *
5 * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
6 *
Jussi Kivilinna6c2205b2011-11-09 11:50:31 +08007 * Based on ecb.c
Rik Snel64470f12006-11-26 09:43:10 +11008 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
Rik Snel64470f12006-11-26 09:43:10 +11009 */
10/* This implementation is checked against the test vectors in the above
11 * document and by a test vector provided by Ken Buchanan at
12 * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
13 *
14 * The test vectors are included in the testing module tcrypt.[ch] */
Jussi Kivilinna6c2205b2011-11-09 11:50:31 +080015
Herbert Xu700cb3f2016-11-22 20:08:16 +080016#include <crypto/internal/skcipher.h>
17#include <crypto/scatterwalk.h>
Rik Snel64470f12006-11-26 09:43:10 +110018#include <linux/err.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/scatterlist.h>
23#include <linux/slab.h>
24
25#include <crypto/b128ops.h>
26#include <crypto/gf128mul.h>
Rik Snel64470f12006-11-26 09:43:10 +110027
Eric Biggers217afcc2018-02-19 23:48:25 -080028#define LRW_BLOCK_SIZE 16
29
Jussi Kivilinna171c0202011-10-18 13:32:24 +030030struct priv {
Herbert Xu700cb3f2016-11-22 20:08:16 +080031 struct crypto_skcipher *child;
Eric Biggers217afcc2018-02-19 23:48:25 -080032
33 /*
34 * optimizes multiplying a random (non incrementing, as at the
35 * start of a new sector) value with key2, we could also have
36 * used 4k optimization tables or no optimization at all. In the
37 * latter case we would have to store key2 here
38 */
39 struct gf128mul_64k *table;
40
41 /*
42 * stores:
43 * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
44 * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
45 * key2*{ 0,0,...1,1,1,1,1 }, etc
46 * needed for optimized multiplication of incrementing values
47 * with key2
48 */
49 be128 mulinc[128];
Jussi Kivilinna171c0202011-10-18 13:32:24 +030050};
51
Herbert Xu700cb3f2016-11-22 20:08:16 +080052struct rctx {
Herbert Xu700cb3f2016-11-22 20:08:16 +080053 be128 t;
Herbert Xu700cb3f2016-11-22 20:08:16 +080054 struct skcipher_request subreq;
55};
56
Rik Snel64470f12006-11-26 09:43:10 +110057static inline void setbit128_bbe(void *b, int bit)
58{
Herbert Xu8eb2dfa2009-02-17 20:00:11 +080059 __set_bit(bit ^ (0x80 -
60#ifdef __BIG_ENDIAN
61 BITS_PER_LONG
62#else
63 BITS_PER_BYTE
64#endif
65 ), b);
Rik Snel64470f12006-11-26 09:43:10 +110066}
67
Eric Biggers217afcc2018-02-19 23:48:25 -080068static int setkey(struct crypto_skcipher *parent, const u8 *key,
69 unsigned int keylen)
Rik Snel64470f12006-11-26 09:43:10 +110070{
Eric Biggers217afcc2018-02-19 23:48:25 -080071 struct priv *ctx = crypto_skcipher_ctx(parent);
72 struct crypto_skcipher *child = ctx->child;
73 int err, bsize = LRW_BLOCK_SIZE;
74 const u8 *tweak = key + keylen - bsize;
Rik Snel64470f12006-11-26 09:43:10 +110075 be128 tmp = { 0 };
Jussi Kivilinna171c0202011-10-18 13:32:24 +030076 int i;
Rik Snel64470f12006-11-26 09:43:10 +110077
Eric Biggers217afcc2018-02-19 23:48:25 -080078 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
79 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
80 CRYPTO_TFM_REQ_MASK);
81 err = crypto_skcipher_setkey(child, key, keylen - bsize);
82 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
83 CRYPTO_TFM_RES_MASK);
84 if (err)
85 return err;
86
Rik Snel64470f12006-11-26 09:43:10 +110087 if (ctx->table)
88 gf128mul_free_64k(ctx->table);
89
90 /* initialize multiplication table for Key2 */
Jussi Kivilinna171c0202011-10-18 13:32:24 +030091 ctx->table = gf128mul_init_64k_bbe((be128 *)tweak);
Rik Snel64470f12006-11-26 09:43:10 +110092 if (!ctx->table)
93 return -ENOMEM;
94
95 /* initialize optimization table */
96 for (i = 0; i < 128; i++) {
97 setbit128_bbe(&tmp, i);
98 ctx->mulinc[i] = tmp;
99 gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
100 }
101
102 return 0;
103}
Jussi Kivilinna171c0202011-10-18 13:32:24 +0300104
Ondrej Mosnacekc778f962018-09-13 10:51:33 +0200105/*
106 * Returns the number of trailing '1' bits in the words of the counter, which is
107 * represented by 4 32-bit words, arranged from least to most significant.
108 * At the same time, increments the counter by one.
109 *
110 * For example:
111 *
112 * u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 };
113 * int i = next_index(&counter);
114 * // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 }
115 */
116static int next_index(u32 *counter)
Rik Snel64470f12006-11-26 09:43:10 +1100117{
Ondrej Mosnacekc778f962018-09-13 10:51:33 +0200118 int i, res = 0;
Rik Snel64470f12006-11-26 09:43:10 +1100119
Ondrej Mosnacekc778f962018-09-13 10:51:33 +0200120 for (i = 0; i < 4; i++) {
Ard Biesheuvelfd27b572018-09-30 21:51:16 +0200121 if (counter[i] + 1 != 0)
122 return res + ffz(counter[i]++);
123
Ondrej Mosnacekc778f962018-09-13 10:51:33 +0200124 counter[i] = 0;
125 res += 32;
Rik Snel64470f12006-11-26 09:43:10 +1100126 }
127
Ondrej Mosnacekfbe1a852018-09-13 10:51:31 +0200128 /*
129 * If we get here, then x == 128 and we are incrementing the counter
130 * from all ones to all zeros. This means we must return index 127, i.e.
131 * the one corresponding to key2*{ 1,...,1 }.
132 */
133 return 127;
Rik Snel64470f12006-11-26 09:43:10 +1100134}
135
Ondrej Mosnacekac3c8f32018-09-13 10:51:34 +0200136/*
137 * We compute the tweak masks twice (both before and after the ECB encryption or
138 * decryption) to avoid having to allocate a temporary buffer and/or make
139 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
140 * just doing the next_index() calls again.
141 */
142static int xor_tweak(struct skcipher_request *req, bool second_pass)
Rik Snel64470f12006-11-26 09:43:10 +1100143{
Jussi Kivilinna46607202011-10-18 13:32:19 +0300144 const int bs = LRW_BLOCK_SIZE;
Herbert Xu700cb3f2016-11-22 20:08:16 +0800145 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
Herbert Xu700cb3f2016-11-22 20:08:16 +0800146 struct priv *ctx = crypto_skcipher_ctx(tfm);
Ondrej Mosnacekac3c8f32018-09-13 10:51:34 +0200147 struct rctx *rctx = skcipher_request_ctx(req);
148 be128 t = rctx->t;
Herbert Xu700cb3f2016-11-22 20:08:16 +0800149 struct skcipher_walk w;
Ondrej Mosnacekc778f962018-09-13 10:51:33 +0200150 __be32 *iv;
151 u32 counter[4];
Herbert Xu700cb3f2016-11-22 20:08:16 +0800152 int err;
Rik Snel64470f12006-11-26 09:43:10 +1100153
Ondrej Mosnacekac3c8f32018-09-13 10:51:34 +0200154 if (second_pass) {
155 req = &rctx->subreq;
156 /* set to our TFM to enforce correct alignment: */
157 skcipher_request_set_tfm(req, tfm);
158 }
Herbert Xu700cb3f2016-11-22 20:08:16 +0800159
Ondrej Mosnacekac3c8f32018-09-13 10:51:34 +0200160 err = skcipher_walk_virt(&w, req, false);
Eric Biggersaec286c2019-04-09 23:46:29 -0700161 if (err)
162 return err;
Ondrej Mosnacekc778f962018-09-13 10:51:33 +0200163
Eric Biggersaec286c2019-04-09 23:46:29 -0700164 iv = (__be32 *)w.iv;
Ondrej Mosnacekc778f962018-09-13 10:51:33 +0200165 counter[0] = be32_to_cpu(iv[3]);
166 counter[1] = be32_to_cpu(iv[2]);
167 counter[2] = be32_to_cpu(iv[1]);
168 counter[3] = be32_to_cpu(iv[0]);
Herbert Xu700cb3f2016-11-22 20:08:16 +0800169
170 while (w.nbytes) {
171 unsigned int avail = w.nbytes;
172 be128 *wsrc;
173 be128 *wdst;
174
175 wsrc = w.src.virt.addr;
176 wdst = w.dst.virt.addr;
177
178 do {
Ondrej Mosnacekac3c8f32018-09-13 10:51:34 +0200179 be128_xor(wdst++, &t, wsrc++);
Herbert Xu700cb3f2016-11-22 20:08:16 +0800180
181 /* T <- I*Key2, using the optimization
182 * discussed in the specification */
Ondrej Mosnacekac3c8f32018-09-13 10:51:34 +0200183 be128_xor(&t, &t, &ctx->mulinc[next_index(counter)]);
Herbert Xu700cb3f2016-11-22 20:08:16 +0800184 } while ((avail -= bs) >= bs);
185
Ondrej Mosnacekac3c8f32018-09-13 10:51:34 +0200186 if (second_pass && w.nbytes == w.total) {
Ondrej Mosnacekc778f962018-09-13 10:51:33 +0200187 iv[0] = cpu_to_be32(counter[3]);
188 iv[1] = cpu_to_be32(counter[2]);
189 iv[2] = cpu_to_be32(counter[1]);
190 iv[3] = cpu_to_be32(counter[0]);
191 }
192
Herbert Xu700cb3f2016-11-22 20:08:16 +0800193 err = skcipher_walk_done(&w, avail);
194 }
195
Herbert Xu700cb3f2016-11-22 20:08:16 +0800196 return err;
Rik Snel64470f12006-11-26 09:43:10 +1100197}
198
Ondrej Mosnacekac3c8f32018-09-13 10:51:34 +0200199static int xor_tweak_pre(struct skcipher_request *req)
200{
201 return xor_tweak(req, false);
202}
203
204static int xor_tweak_post(struct skcipher_request *req)
205{
206 return xor_tweak(req, true);
207}
208
209static void crypt_done(struct crypto_async_request *areq, int err)
210{
211 struct skcipher_request *req = areq->data;
212
Herbert Xub257b482019-04-15 14:37:34 +0800213 if (!err) {
214 struct rctx *rctx = skcipher_request_ctx(req);
215
216 rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Ondrej Mosnacekac3c8f32018-09-13 10:51:34 +0200217 err = xor_tweak_post(req);
Herbert Xub257b482019-04-15 14:37:34 +0800218 }
Ondrej Mosnacekac3c8f32018-09-13 10:51:34 +0200219
220 skcipher_request_complete(req, err);
221}
222
223static void init_crypt(struct skcipher_request *req)
Rik Snel64470f12006-11-26 09:43:10 +1100224{
Herbert Xu700cb3f2016-11-22 20:08:16 +0800225 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
226 struct rctx *rctx = skcipher_request_ctx(req);
Ondrej Mosnacekac3c8f32018-09-13 10:51:34 +0200227 struct skcipher_request *subreq = &rctx->subreq;
Rik Snel64470f12006-11-26 09:43:10 +1100228
Ondrej Mosnacekac3c8f32018-09-13 10:51:34 +0200229 skcipher_request_set_tfm(subreq, ctx->child);
230 skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req);
231 /* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */
232 skcipher_request_set_crypt(subreq, req->dst, req->dst,
233 req->cryptlen, req->iv);
Herbert Xu700cb3f2016-11-22 20:08:16 +0800234
235 /* calculate first value of T */
236 memcpy(&rctx->t, req->iv, sizeof(rctx->t));
237
238 /* T <- I*Key2 */
Eric Biggers217afcc2018-02-19 23:48:25 -0800239 gf128mul_64k_bbe(&rctx->t, ctx->table);
Herbert Xu700cb3f2016-11-22 20:08:16 +0800240}
241
242static int encrypt(struct skcipher_request *req)
243{
Herbert Xu700cb3f2016-11-22 20:08:16 +0800244 struct rctx *rctx = skcipher_request_ctx(req);
Ondrej Mosnacekac3c8f32018-09-13 10:51:34 +0200245 struct skcipher_request *subreq = &rctx->subreq;
Herbert Xu700cb3f2016-11-22 20:08:16 +0800246
Ondrej Mosnacekac3c8f32018-09-13 10:51:34 +0200247 init_crypt(req);
248 return xor_tweak_pre(req) ?:
249 crypto_skcipher_encrypt(subreq) ?:
250 xor_tweak_post(req);
Herbert Xu700cb3f2016-11-22 20:08:16 +0800251}
252
253static int decrypt(struct skcipher_request *req)
254{
Ondrej Mosnacekac3c8f32018-09-13 10:51:34 +0200255 struct rctx *rctx = skcipher_request_ctx(req);
256 struct skcipher_request *subreq = &rctx->subreq;
257
258 init_crypt(req);
259 return xor_tweak_pre(req) ?:
260 crypto_skcipher_decrypt(subreq) ?:
261 xor_tweak_post(req);
Rik Snel64470f12006-11-26 09:43:10 +1100262}
263
Herbert Xu700cb3f2016-11-22 20:08:16 +0800264static int init_tfm(struct crypto_skcipher *tfm)
Rik Snel64470f12006-11-26 09:43:10 +1100265{
Herbert Xu700cb3f2016-11-22 20:08:16 +0800266 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
267 struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
268 struct priv *ctx = crypto_skcipher_ctx(tfm);
269 struct crypto_skcipher *cipher;
Rik Snel64470f12006-11-26 09:43:10 +1100270
Herbert Xu700cb3f2016-11-22 20:08:16 +0800271 cipher = crypto_spawn_skcipher(spawn);
Herbert Xu2e306ee2006-12-17 10:05:58 +1100272 if (IS_ERR(cipher))
273 return PTR_ERR(cipher);
Rik Snel64470f12006-11-26 09:43:10 +1100274
Herbert Xu2e306ee2006-12-17 10:05:58 +1100275 ctx->child = cipher;
Herbert Xu700cb3f2016-11-22 20:08:16 +0800276
277 crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) +
278 sizeof(struct rctx));
279
Rik Snel64470f12006-11-26 09:43:10 +1100280 return 0;
281}
282
Herbert Xu700cb3f2016-11-22 20:08:16 +0800283static void exit_tfm(struct crypto_skcipher *tfm)
Rik Snel64470f12006-11-26 09:43:10 +1100284{
Herbert Xu700cb3f2016-11-22 20:08:16 +0800285 struct priv *ctx = crypto_skcipher_ctx(tfm);
Jussi Kivilinna171c0202011-10-18 13:32:24 +0300286
Eric Biggers217afcc2018-02-19 23:48:25 -0800287 if (ctx->table)
288 gf128mul_free_64k(ctx->table);
Herbert Xu700cb3f2016-11-22 20:08:16 +0800289 crypto_free_skcipher(ctx->child);
Rik Snel64470f12006-11-26 09:43:10 +1100290}
291
Herbert Xu700cb3f2016-11-22 20:08:16 +0800292static void free(struct skcipher_instance *inst)
Rik Snel64470f12006-11-26 09:43:10 +1100293{
Herbert Xu700cb3f2016-11-22 20:08:16 +0800294 crypto_drop_skcipher(skcipher_instance_ctx(inst));
295 kfree(inst);
296}
297
298static int create(struct crypto_template *tmpl, struct rtattr **tb)
299{
300 struct crypto_skcipher_spawn *spawn;
301 struct skcipher_instance *inst;
302 struct crypto_attr_type *algt;
303 struct skcipher_alg *alg;
304 const char *cipher_name;
305 char ecb_name[CRYPTO_MAX_ALG_NAME];
Herbert Xuebc610e2007-01-01 18:37:02 +1100306 int err;
Rik Snel64470f12006-11-26 09:43:10 +1100307
Herbert Xu700cb3f2016-11-22 20:08:16 +0800308 algt = crypto_get_attr_type(tb);
309 if (IS_ERR(algt))
310 return PTR_ERR(algt);
311
312 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
313 return -EINVAL;
314
315 cipher_name = crypto_attr_alg_name(tb[1]);
316 if (IS_ERR(cipher_name))
317 return PTR_ERR(cipher_name);
318
319 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
320 if (!inst)
321 return -ENOMEM;
322
323 spawn = skcipher_instance_ctx(inst);
324
325 crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
326 err = crypto_grab_skcipher(spawn, cipher_name, 0,
327 crypto_requires_sync(algt->type,
328 algt->mask));
329 if (err == -ENOENT) {
330 err = -ENAMETOOLONG;
331 if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
332 cipher_name) >= CRYPTO_MAX_ALG_NAME)
333 goto err_free_inst;
334
335 err = crypto_grab_skcipher(spawn, ecb_name, 0,
336 crypto_requires_sync(algt->type,
337 algt->mask));
338 }
339
Herbert Xuebc610e2007-01-01 18:37:02 +1100340 if (err)
Herbert Xu700cb3f2016-11-22 20:08:16 +0800341 goto err_free_inst;
Herbert Xuebc610e2007-01-01 18:37:02 +1100342
Herbert Xu700cb3f2016-11-22 20:08:16 +0800343 alg = crypto_skcipher_spawn_alg(spawn);
Rik Snel64470f12006-11-26 09:43:10 +1100344
Herbert Xu700cb3f2016-11-22 20:08:16 +0800345 err = -EINVAL;
346 if (alg->base.cra_blocksize != LRW_BLOCK_SIZE)
347 goto err_drop_spawn;
Rik Snel64470f12006-11-26 09:43:10 +1100348
Herbert Xu700cb3f2016-11-22 20:08:16 +0800349 if (crypto_skcipher_alg_ivsize(alg))
350 goto err_drop_spawn;
Rik Snel64470f12006-11-26 09:43:10 +1100351
Herbert Xu700cb3f2016-11-22 20:08:16 +0800352 err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw",
353 &alg->base);
354 if (err)
355 goto err_drop_spawn;
Rik Snel64470f12006-11-26 09:43:10 +1100356
Herbert Xu700cb3f2016-11-22 20:08:16 +0800357 err = -EINVAL;
358 cipher_name = alg->base.cra_name;
Rik Snel64470f12006-11-26 09:43:10 +1100359
Herbert Xu700cb3f2016-11-22 20:08:16 +0800360 /* Alas we screwed up the naming so we have to mangle the
361 * cipher name.
362 */
363 if (!strncmp(cipher_name, "ecb(", 4)) {
364 unsigned len;
Rik Snel64470f12006-11-26 09:43:10 +1100365
Herbert Xu700cb3f2016-11-22 20:08:16 +0800366 len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
367 if (len < 2 || len >= sizeof(ecb_name))
368 goto err_drop_spawn;
Rik Snel64470f12006-11-26 09:43:10 +1100369
Herbert Xu700cb3f2016-11-22 20:08:16 +0800370 if (ecb_name[len - 1] != ')')
371 goto err_drop_spawn;
Rik Snel64470f12006-11-26 09:43:10 +1100372
Herbert Xu700cb3f2016-11-22 20:08:16 +0800373 ecb_name[len - 1] = 0;
Rik Snel64470f12006-11-26 09:43:10 +1100374
Herbert Xu700cb3f2016-11-22 20:08:16 +0800375 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
Christophe Jaillet616129c2017-10-08 11:39:49 +0200376 "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) {
377 err = -ENAMETOOLONG;
378 goto err_drop_spawn;
379 }
Christophe Jailletd38efad2017-10-08 11:39:50 +0200380 } else
381 goto err_drop_spawn;
Herbert Xu700cb3f2016-11-22 20:08:16 +0800382
383 inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
384 inst->alg.base.cra_priority = alg->base.cra_priority;
385 inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
386 inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
Eric Biggers20a0f972019-05-30 10:53:08 -0700387 (__alignof__(be128) - 1);
Herbert Xu700cb3f2016-11-22 20:08:16 +0800388
389 inst->alg.ivsize = LRW_BLOCK_SIZE;
390 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
391 LRW_BLOCK_SIZE;
392 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
393 LRW_BLOCK_SIZE;
394
395 inst->alg.base.cra_ctxsize = sizeof(struct priv);
396
397 inst->alg.init = init_tfm;
398 inst->alg.exit = exit_tfm;
399
400 inst->alg.setkey = setkey;
401 inst->alg.encrypt = encrypt;
402 inst->alg.decrypt = decrypt;
403
404 inst->free = free;
405
406 err = skcipher_register_instance(tmpl, inst);
407 if (err)
408 goto err_drop_spawn;
409
410out:
411 return err;
412
413err_drop_spawn:
414 crypto_drop_skcipher(spawn);
415err_free_inst:
Rik Snel64470f12006-11-26 09:43:10 +1100416 kfree(inst);
Herbert Xu700cb3f2016-11-22 20:08:16 +0800417 goto out;
Rik Snel64470f12006-11-26 09:43:10 +1100418}
419
420static struct crypto_template crypto_tmpl = {
421 .name = "lrw",
Herbert Xu700cb3f2016-11-22 20:08:16 +0800422 .create = create,
Rik Snel64470f12006-11-26 09:43:10 +1100423 .module = THIS_MODULE,
424};
425
426static int __init crypto_module_init(void)
427{
428 return crypto_register_template(&crypto_tmpl);
429}
430
431static void __exit crypto_module_exit(void)
432{
433 crypto_unregister_template(&crypto_tmpl);
434}
435
Eric Biggersc4741b22019-04-11 21:57:42 -0700436subsys_initcall(crypto_module_init);
Rik Snel64470f12006-11-26 09:43:10 +1100437module_exit(crypto_module_exit);
438
439MODULE_LICENSE("GPL");
440MODULE_DESCRIPTION("LRW block cipher mode");
Kees Cook4943ba12014-11-24 16:32:38 -0800441MODULE_ALIAS_CRYPTO("lrw");