blob: d38a382b09eb6c28670a3644c0f4706ce8dccdac [file] [log] [blame]
Rik Snel64470f12006-11-26 09:43:10 +11001/* LRW: as defined by Cyril Guyot in
2 * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf
3 *
4 * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
5 *
Jussi Kivilinna6c2205b2011-11-09 11:50:31 +08006 * Based on ecb.c
Rik Snel64470f12006-11-26 09:43:10 +11007 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 */
14/* This implementation is checked against the test vectors in the above
15 * document and by a test vector provided by Ken Buchanan at
16 * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
17 *
18 * The test vectors are included in the testing module tcrypt.[ch] */
Jussi Kivilinna6c2205b2011-11-09 11:50:31 +080019
Rik Snel64470f12006-11-26 09:43:10 +110020#include <crypto/algapi.h>
21#include <linux/err.h>
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/slab.h>
27
28#include <crypto/b128ops.h>
29#include <crypto/gf128mul.h>
Jussi Kivilinna6c2205b2011-11-09 11:50:31 +080030#include <crypto/lrw.h>
Rik Snel64470f12006-11-26 09:43:10 +110031
Jussi Kivilinna171c0202011-10-18 13:32:24 +030032struct priv {
33 struct crypto_cipher *child;
34 struct lrw_table_ctx table;
35};
36
Rik Snel64470f12006-11-26 09:43:10 +110037static inline void setbit128_bbe(void *b, int bit)
38{
Herbert Xu8eb2dfa2009-02-17 20:00:11 +080039 __set_bit(bit ^ (0x80 -
40#ifdef __BIG_ENDIAN
41 BITS_PER_LONG
42#else
43 BITS_PER_BYTE
44#endif
45 ), b);
Rik Snel64470f12006-11-26 09:43:10 +110046}
47
Jussi Kivilinna6c2205b2011-11-09 11:50:31 +080048int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak)
Rik Snel64470f12006-11-26 09:43:10 +110049{
Rik Snel64470f12006-11-26 09:43:10 +110050 be128 tmp = { 0 };
Jussi Kivilinna171c0202011-10-18 13:32:24 +030051 int i;
Rik Snel64470f12006-11-26 09:43:10 +110052
53 if (ctx->table)
54 gf128mul_free_64k(ctx->table);
55
56 /* initialize multiplication table for Key2 */
Jussi Kivilinna171c0202011-10-18 13:32:24 +030057 ctx->table = gf128mul_init_64k_bbe((be128 *)tweak);
Rik Snel64470f12006-11-26 09:43:10 +110058 if (!ctx->table)
59 return -ENOMEM;
60
61 /* initialize optimization table */
62 for (i = 0; i < 128; i++) {
63 setbit128_bbe(&tmp, i);
64 ctx->mulinc[i] = tmp;
65 gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
66 }
67
68 return 0;
69}
Jussi Kivilinna6c2205b2011-11-09 11:50:31 +080070EXPORT_SYMBOL_GPL(lrw_init_table);
Rik Snel64470f12006-11-26 09:43:10 +110071
Jussi Kivilinna6c2205b2011-11-09 11:50:31 +080072void lrw_free_table(struct lrw_table_ctx *ctx)
Jussi Kivilinna171c0202011-10-18 13:32:24 +030073{
74 if (ctx->table)
75 gf128mul_free_64k(ctx->table);
76}
Jussi Kivilinna6c2205b2011-11-09 11:50:31 +080077EXPORT_SYMBOL_GPL(lrw_free_table);
Jussi Kivilinna171c0202011-10-18 13:32:24 +030078
79static int setkey(struct crypto_tfm *parent, const u8 *key,
80 unsigned int keylen)
81{
82 struct priv *ctx = crypto_tfm_ctx(parent);
83 struct crypto_cipher *child = ctx->child;
84 int err, bsize = LRW_BLOCK_SIZE;
85 const u8 *tweak = key + keylen - bsize;
86
87 crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
88 crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
89 CRYPTO_TFM_REQ_MASK);
90 err = crypto_cipher_setkey(child, key, keylen - bsize);
91 if (err)
92 return err;
93 crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
94 CRYPTO_TFM_RES_MASK);
95
96 return lrw_init_table(&ctx->table, tweak);
97}
98
Rik Snel64470f12006-11-26 09:43:10 +110099struct sinfo {
100 be128 t;
101 struct crypto_tfm *tfm;
102 void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
103};
104
105static inline void inc(be128 *iv)
106{
Marcin Slusarzfd4609a2008-03-14 16:22:53 +0800107 be64_add_cpu(&iv->b, 1);
108 if (!iv->b)
109 be64_add_cpu(&iv->a, 1);
Rik Snel64470f12006-11-26 09:43:10 +1100110}
111
David S. Miller9ebed9d2006-12-04 20:20:05 -0800112static inline void lrw_round(struct sinfo *s, void *dst, const void *src)
Rik Snel64470f12006-11-26 09:43:10 +1100113{
114 be128_xor(dst, &s->t, src); /* PP <- T xor P */
115 s->fn(s->tfm, dst, dst); /* CC <- E(Key2,PP) */
116 be128_xor(dst, dst, &s->t); /* C <- T xor CC */
117}
118
119/* this returns the number of consequative 1 bits starting
120 * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */
121static inline int get_index128(be128 *block)
122{
123 int x;
124 __be32 *p = (__be32 *) block;
125
126 for (p += 3, x = 0; x < 128; p--, x += 32) {
127 u32 val = be32_to_cpup(p);
128
129 if (!~val)
130 continue;
131
132 return x + ffz(val);
133 }
134
Ondrej Mosnacekb957cd42018-09-13 10:51:31 +0200135 /*
136 * If we get here, then x == 128 and we are incrementing the counter
137 * from all ones to all zeros. This means we must return index 127, i.e.
138 * the one corresponding to key2*{ 1,...,1 }.
139 */
140 return 127;
Rik Snel64470f12006-11-26 09:43:10 +1100141}
142
143static int crypt(struct blkcipher_desc *d,
144 struct blkcipher_walk *w, struct priv *ctx,
145 void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
146{
147 int err;
148 unsigned int avail;
Jussi Kivilinna46607202011-10-18 13:32:19 +0300149 const int bs = LRW_BLOCK_SIZE;
Rik Snel64470f12006-11-26 09:43:10 +1100150 struct sinfo s = {
151 .tfm = crypto_cipher_tfm(ctx->child),
152 .fn = fn
153 };
154 be128 *iv;
155 u8 *wsrc;
156 u8 *wdst;
157
158 err = blkcipher_walk_virt(d, w);
159 if (!(avail = w->nbytes))
160 return err;
161
162 wsrc = w->src.virt.addr;
163 wdst = w->dst.virt.addr;
164
165 /* calculate first value of T */
166 iv = (be128 *)w->iv;
167 s.t = *iv;
168
169 /* T <- I*Key2 */
Jussi Kivilinna171c0202011-10-18 13:32:24 +0300170 gf128mul_64k_bbe(&s.t, ctx->table.table);
Rik Snel64470f12006-11-26 09:43:10 +1100171
172 goto first;
173
174 for (;;) {
175 do {
176 /* T <- I*Key2, using the optimization
177 * discussed in the specification */
Jussi Kivilinna171c0202011-10-18 13:32:24 +0300178 be128_xor(&s.t, &s.t,
179 &ctx->table.mulinc[get_index128(iv)]);
Rik Snel64470f12006-11-26 09:43:10 +1100180 inc(iv);
181
182first:
David S. Miller9ebed9d2006-12-04 20:20:05 -0800183 lrw_round(&s, wdst, wsrc);
Rik Snel64470f12006-11-26 09:43:10 +1100184
185 wsrc += bs;
186 wdst += bs;
187 } while ((avail -= bs) >= bs);
188
189 err = blkcipher_walk_done(d, w, avail);
190 if (!(avail = w->nbytes))
191 break;
192
193 wsrc = w->src.virt.addr;
194 wdst = w->dst.virt.addr;
195 }
196
197 return err;
198}
199
200static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
201 struct scatterlist *src, unsigned int nbytes)
202{
203 struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
204 struct blkcipher_walk w;
205
206 blkcipher_walk_init(&w, dst, src, nbytes);
207 return crypt(desc, &w, ctx,
208 crypto_cipher_alg(ctx->child)->cia_encrypt);
209}
210
211static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
212 struct scatterlist *src, unsigned int nbytes)
213{
214 struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
215 struct blkcipher_walk w;
216
217 blkcipher_walk_init(&w, dst, src, nbytes);
218 return crypt(desc, &w, ctx,
219 crypto_cipher_alg(ctx->child)->cia_decrypt);
220}
221
Jussi Kivilinna6c2205b2011-11-09 11:50:31 +0800222int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
223 struct scatterlist *ssrc, unsigned int nbytes,
224 struct lrw_crypt_req *req)
225{
226 const unsigned int bsize = LRW_BLOCK_SIZE;
227 const unsigned int max_blks = req->tbuflen / bsize;
228 struct lrw_table_ctx *ctx = req->table_ctx;
229 struct blkcipher_walk walk;
230 unsigned int nblocks;
231 be128 *iv, *src, *dst, *t;
232 be128 *t_buf = req->tbuf;
233 int err, i;
234
235 BUG_ON(max_blks < 1);
236
237 blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
238
239 err = blkcipher_walk_virt(desc, &walk);
240 nbytes = walk.nbytes;
241 if (!nbytes)
242 return err;
243
244 nblocks = min(walk.nbytes / bsize, max_blks);
245 src = (be128 *)walk.src.virt.addr;
246 dst = (be128 *)walk.dst.virt.addr;
247
248 /* calculate first value of T */
249 iv = (be128 *)walk.iv;
250 t_buf[0] = *iv;
251
252 /* T <- I*Key2 */
253 gf128mul_64k_bbe(&t_buf[0], ctx->table);
254
255 i = 0;
256 goto first;
257
258 for (;;) {
259 do {
260 for (i = 0; i < nblocks; i++) {
261 /* T <- I*Key2, using the optimization
262 * discussed in the specification */
263 be128_xor(&t_buf[i], t,
264 &ctx->mulinc[get_index128(iv)]);
265 inc(iv);
266first:
267 t = &t_buf[i];
268
269 /* PP <- T xor P */
270 be128_xor(dst + i, t, src + i);
271 }
272
273 /* CC <- E(Key2,PP) */
274 req->crypt_fn(req->crypt_ctx, (u8 *)dst,
275 nblocks * bsize);
276
277 /* C <- T xor CC */
278 for (i = 0; i < nblocks; i++)
279 be128_xor(dst + i, dst + i, &t_buf[i]);
280
281 src += nblocks;
282 dst += nblocks;
283 nbytes -= nblocks * bsize;
284 nblocks = min(nbytes / bsize, max_blks);
285 } while (nblocks > 0);
286
287 err = blkcipher_walk_done(desc, &walk, nbytes);
288 nbytes = walk.nbytes;
289 if (!nbytes)
290 break;
291
292 nblocks = min(nbytes / bsize, max_blks);
293 src = (be128 *)walk.src.virt.addr;
294 dst = (be128 *)walk.dst.virt.addr;
295 }
296
297 return err;
298}
299EXPORT_SYMBOL_GPL(lrw_crypt);
300
Rik Snel64470f12006-11-26 09:43:10 +1100301static int init_tfm(struct crypto_tfm *tfm)
302{
Herbert Xu2e306ee2006-12-17 10:05:58 +1100303 struct crypto_cipher *cipher;
Rik Snel64470f12006-11-26 09:43:10 +1100304 struct crypto_instance *inst = (void *)tfm->__crt_alg;
305 struct crypto_spawn *spawn = crypto_instance_ctx(inst);
306 struct priv *ctx = crypto_tfm_ctx(tfm);
307 u32 *flags = &tfm->crt_flags;
308
Herbert Xu2e306ee2006-12-17 10:05:58 +1100309 cipher = crypto_spawn_cipher(spawn);
310 if (IS_ERR(cipher))
311 return PTR_ERR(cipher);
Rik Snel64470f12006-11-26 09:43:10 +1100312
Jussi Kivilinna46607202011-10-18 13:32:19 +0300313 if (crypto_cipher_blocksize(cipher) != LRW_BLOCK_SIZE) {
Rik Snel64470f12006-11-26 09:43:10 +1100314 *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
Jussi Kivilinnab884f8b2011-10-18 13:32:14 +0300315 crypto_free_cipher(cipher);
Rik Snel64470f12006-11-26 09:43:10 +1100316 return -EINVAL;
317 }
318
Herbert Xu2e306ee2006-12-17 10:05:58 +1100319 ctx->child = cipher;
Rik Snel64470f12006-11-26 09:43:10 +1100320 return 0;
321}
322
323static void exit_tfm(struct crypto_tfm *tfm)
324{
325 struct priv *ctx = crypto_tfm_ctx(tfm);
Jussi Kivilinna171c0202011-10-18 13:32:24 +0300326
327 lrw_free_table(&ctx->table);
Rik Snel64470f12006-11-26 09:43:10 +1100328 crypto_free_cipher(ctx->child);
329}
330
Herbert Xuebc610e2007-01-01 18:37:02 +1100331static struct crypto_instance *alloc(struct rtattr **tb)
Rik Snel64470f12006-11-26 09:43:10 +1100332{
333 struct crypto_instance *inst;
334 struct crypto_alg *alg;
Herbert Xuebc610e2007-01-01 18:37:02 +1100335 int err;
Rik Snel64470f12006-11-26 09:43:10 +1100336
Herbert Xuebc610e2007-01-01 18:37:02 +1100337 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
338 if (err)
339 return ERR_PTR(err);
340
341 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
342 CRYPTO_ALG_TYPE_MASK);
Rik Snel64470f12006-11-26 09:43:10 +1100343 if (IS_ERR(alg))
David Howellse231c2e2008-02-07 00:15:26 -0800344 return ERR_CAST(alg);
Rik Snel64470f12006-11-26 09:43:10 +1100345
346 inst = crypto_alloc_instance("lrw", alg);
347 if (IS_ERR(inst))
348 goto out_put_alg;
349
350 inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
351 inst->alg.cra_priority = alg->cra_priority;
352 inst->alg.cra_blocksize = alg->cra_blocksize;
353
354 if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7;
355 else inst->alg.cra_alignmask = alg->cra_alignmask;
356 inst->alg.cra_type = &crypto_blkcipher_type;
357
358 if (!(alg->cra_blocksize % 4))
359 inst->alg.cra_alignmask |= 3;
360 inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
361 inst->alg.cra_blkcipher.min_keysize =
362 alg->cra_cipher.cia_min_keysize + alg->cra_blocksize;
363 inst->alg.cra_blkcipher.max_keysize =
364 alg->cra_cipher.cia_max_keysize + alg->cra_blocksize;
365
366 inst->alg.cra_ctxsize = sizeof(struct priv);
367
368 inst->alg.cra_init = init_tfm;
369 inst->alg.cra_exit = exit_tfm;
370
371 inst->alg.cra_blkcipher.setkey = setkey;
372 inst->alg.cra_blkcipher.encrypt = encrypt;
373 inst->alg.cra_blkcipher.decrypt = decrypt;
374
375out_put_alg:
376 crypto_mod_put(alg);
377 return inst;
378}
379
380static void free(struct crypto_instance *inst)
381{
382 crypto_drop_spawn(crypto_instance_ctx(inst));
383 kfree(inst);
384}
385
386static struct crypto_template crypto_tmpl = {
387 .name = "lrw",
388 .alloc = alloc,
389 .free = free,
390 .module = THIS_MODULE,
391};
392
393static int __init crypto_module_init(void)
394{
395 return crypto_register_template(&crypto_tmpl);
396}
397
398static void __exit crypto_module_exit(void)
399{
400 crypto_unregister_template(&crypto_tmpl);
401}
402
403module_init(crypto_module_init);
404module_exit(crypto_module_exit);
405
406MODULE_LICENSE("GPL");
407MODULE_DESCRIPTION("LRW block cipher mode");
Kees Cook4943ba12014-11-24 16:32:38 -0800408MODULE_ALIAS_CRYPTO("lrw");