blob: e5a9878e65325e2bc072e63d9612c05b311a94d2 [file] [log] [blame]
Herbert Xua10f5542015-05-21 15:11:15 +08001/*
2 * echainiv: Encrypted Chain IV Generator
3 *
4 * This generator generates an IV based on a sequence number by xoring it
5 * with a salt and then encrypting it with the same key as used to encrypt
6 * the plain text. This algorithm requires that the block size be equal
7 * to the IV size. It is mainly useful for CBC.
8 *
9 * This generator can only be used by algorithms where authentication
10 * is performed after encryption (i.e., authenc).
11 *
12 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the Free
16 * Software Foundation; either version 2 of the License, or (at your option)
17 * any later version.
18 *
19 */
20
21#include <crypto/internal/aead.h>
22#include <crypto/null.h>
23#include <crypto/rng.h>
24#include <crypto/scatterwalk.h>
25#include <linux/err.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/mm.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
31#include <linux/spinlock.h>
32#include <linux/string.h>
33
34#define MAX_IV_SIZE 16
35
36struct echainiv_request_ctx {
37 struct scatterlist src[2];
38 struct scatterlist dst[2];
39 struct scatterlist ivbuf[2];
40 struct scatterlist *ivsg;
41 struct aead_givcrypt_request subreq;
42};
43
44struct echainiv_ctx {
45 struct crypto_aead *child;
46 spinlock_t lock;
47 struct crypto_blkcipher *null;
48 u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
49};
50
51static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
52
53static int echainiv_setkey(struct crypto_aead *tfm,
54 const u8 *key, unsigned int keylen)
55{
56 struct echainiv_ctx *ctx = crypto_aead_ctx(tfm);
57
58 return crypto_aead_setkey(ctx->child, key, keylen);
59}
60
61static int echainiv_setauthsize(struct crypto_aead *tfm,
62 unsigned int authsize)
63{
64 struct echainiv_ctx *ctx = crypto_aead_ctx(tfm);
65
66 return crypto_aead_setauthsize(ctx->child, authsize);
67}
68
69/* We don't care if we get preempted and read/write IVs from the next CPU. */
70void echainiv_read_iv(u8 *dst, unsigned size)
71{
72 u32 *a = (u32 *)dst;
73 u32 __percpu *b = echainiv_iv;
74
75 for (; size >= 4; size -= 4) {
76 *a++ = this_cpu_read(*b);
77 b++;
78 }
79}
80
81void echainiv_write_iv(const u8 *src, unsigned size)
82{
83 const u32 *a = (const u32 *)src;
84 u32 __percpu *b = echainiv_iv;
85
86 for (; size >= 4; size -= 4) {
87 this_cpu_write(*b, *a);
88 a++;
89 b++;
90 }
91}
92
93static void echainiv_encrypt_compat_complete2(struct aead_request *req,
94 int err)
95{
96 struct echainiv_request_ctx *rctx = aead_request_ctx(req);
97 struct aead_givcrypt_request *subreq = &rctx->subreq;
98 struct crypto_aead *geniv;
99
100 if (err == -EINPROGRESS)
101 return;
102
103 if (err)
104 goto out;
105
106 geniv = crypto_aead_reqtfm(req);
107 scatterwalk_map_and_copy(subreq->giv, rctx->ivsg, 0,
108 crypto_aead_ivsize(geniv), 1);
109
110out:
111 kzfree(subreq->giv);
112}
113
114static void echainiv_encrypt_compat_complete(
115 struct crypto_async_request *base, int err)
116{
117 struct aead_request *req = base->data;
118
119 echainiv_encrypt_compat_complete2(req, err);
120 aead_request_complete(req, err);
121}
122
123static void echainiv_encrypt_complete2(struct aead_request *req, int err)
124{
125 struct aead_request *subreq = aead_request_ctx(req);
126 struct crypto_aead *geniv;
127 unsigned int ivsize;
128
129 if (err == -EINPROGRESS)
130 return;
131
132 if (err)
133 goto out;
134
135 geniv = crypto_aead_reqtfm(req);
136 ivsize = crypto_aead_ivsize(geniv);
137
138 echainiv_write_iv(subreq->iv, ivsize);
139
140 if (req->iv != subreq->iv)
141 memcpy(req->iv, subreq->iv, ivsize);
142
143out:
144 if (req->iv != subreq->iv)
145 kzfree(subreq->iv);
146}
147
148static void echainiv_encrypt_complete(struct crypto_async_request *base,
149 int err)
150{
151 struct aead_request *req = base->data;
152
153 echainiv_encrypt_complete2(req, err);
154 aead_request_complete(req, err);
155}
156
157static int echainiv_encrypt_compat(struct aead_request *req)
158{
159 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
160 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
161 struct echainiv_request_ctx *rctx = aead_request_ctx(req);
162 struct aead_givcrypt_request *subreq = &rctx->subreq;
163 unsigned int ivsize = crypto_aead_ivsize(geniv);
164 crypto_completion_t compl;
165 void *data;
166 u8 *info;
167 __be64 seq;
168 int err;
169
170 compl = req->base.complete;
171 data = req->base.data;
172
173 rctx->ivsg = scatterwalk_ffwd(rctx->ivbuf, req->dst, req->assoclen);
174 info = PageHighMem(sg_page(rctx->ivsg)) ? NULL : sg_virt(rctx->ivsg);
175
176 if (!info) {
177 info = kmalloc(ivsize, req->base.flags &
178 CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
179 GFP_ATOMIC);
180 if (!info)
181 return -ENOMEM;
182
183 compl = echainiv_encrypt_compat_complete;
184 data = req;
185 }
186
187 memcpy(&seq, req->iv + ivsize - sizeof(seq), sizeof(seq));
188
189 aead_givcrypt_set_tfm(subreq, ctx->child);
190 aead_givcrypt_set_callback(subreq, req->base.flags,
191 req->base.complete, req->base.data);
192 aead_givcrypt_set_crypt(subreq,
193 scatterwalk_ffwd(rctx->src, req->src,
194 req->assoclen + ivsize),
195 scatterwalk_ffwd(rctx->dst, rctx->ivsg,
196 ivsize),
197 req->cryptlen - ivsize, req->iv);
198 aead_givcrypt_set_assoc(subreq, req->src, req->assoclen);
199 aead_givcrypt_set_giv(subreq, info, be64_to_cpu(seq));
200
201 err = crypto_aead_givencrypt(subreq);
202 if (unlikely(PageHighMem(sg_page(rctx->ivsg))))
203 echainiv_encrypt_compat_complete2(req, err);
204 return err;
205}
206
207static int echainiv_encrypt(struct aead_request *req)
208{
209 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
210 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
211 struct aead_request *subreq = aead_request_ctx(req);
212 crypto_completion_t compl;
213 void *data;
214 u8 *info;
215 unsigned int ivsize;
216 int err;
217
218 aead_request_set_tfm(subreq, ctx->child);
219
220 compl = echainiv_encrypt_complete;
221 data = req;
222 info = req->iv;
223
224 ivsize = crypto_aead_ivsize(geniv);
225
226 if (req->src != req->dst) {
227 struct scatterlist src[2];
228 struct scatterlist dst[2];
229 struct blkcipher_desc desc = {
230 .tfm = ctx->null,
231 };
232
233 err = crypto_blkcipher_encrypt(
234 &desc,
235 scatterwalk_ffwd(dst, req->dst,
236 req->assoclen + ivsize),
237 scatterwalk_ffwd(src, req->src,
238 req->assoclen + ivsize),
239 req->cryptlen - ivsize);
240 if (err)
241 return err;
242 }
243
244 if (unlikely(!IS_ALIGNED((unsigned long)info,
245 crypto_aead_alignmask(geniv) + 1))) {
246 info = kmalloc(ivsize, req->base.flags &
247 CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
248 GFP_ATOMIC);
249 if (!info)
250 return -ENOMEM;
251
252 memcpy(info, req->iv, ivsize);
253 }
254
255 aead_request_set_callback(subreq, req->base.flags, compl, data);
256 aead_request_set_crypt(subreq, req->dst, req->dst,
257 req->cryptlen - ivsize, info);
258 aead_request_set_ad(subreq, req->assoclen + ivsize, 0);
259
260 crypto_xor(info, ctx->salt, ivsize);
261 scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
262 echainiv_read_iv(info, ivsize);
263
264 err = crypto_aead_encrypt(subreq);
265 echainiv_encrypt_complete2(req, err);
266 return err;
267}
268
269static int echainiv_decrypt_compat(struct aead_request *req)
270{
271 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
272 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
273 struct aead_request *subreq = aead_request_ctx(req);
274 crypto_completion_t compl;
275 void *data;
276 unsigned int ivsize;
277
278 aead_request_set_tfm(subreq, ctx->child);
279
280 compl = req->base.complete;
281 data = req->base.data;
282
283 ivsize = crypto_aead_ivsize(geniv);
284
285 aead_request_set_callback(subreq, req->base.flags, compl, data);
286 aead_request_set_crypt(subreq, req->src, req->dst,
287 req->cryptlen - ivsize, req->iv);
288 aead_request_set_ad(subreq, req->assoclen, ivsize);
289
290 scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
291
292 return crypto_aead_decrypt(subreq);
293}
294
295static int echainiv_decrypt(struct aead_request *req)
296{
297 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
298 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
299 struct aead_request *subreq = aead_request_ctx(req);
300 crypto_completion_t compl;
301 void *data;
302 unsigned int ivsize;
303
304 aead_request_set_tfm(subreq, ctx->child);
305
306 compl = req->base.complete;
307 data = req->base.data;
308
309 ivsize = crypto_aead_ivsize(geniv);
310
311 aead_request_set_callback(subreq, req->base.flags, compl, data);
312 aead_request_set_crypt(subreq, req->src, req->dst,
313 req->cryptlen - ivsize, req->iv);
314 aead_request_set_ad(subreq, req->assoclen + ivsize, 0);
315
316 scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
317 if (req->src != req->dst)
318 scatterwalk_map_and_copy(req->iv, req->dst,
319 req->assoclen, ivsize, 1);
320
321 return crypto_aead_decrypt(subreq);
322}
323
324static int echainiv_encrypt_compat_first(struct aead_request *req)
325{
326 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
327 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
328 int err = 0;
329
330 spin_lock_bh(&ctx->lock);
331 if (geniv->encrypt != echainiv_encrypt_compat_first)
332 goto unlock;
333
334 geniv->encrypt = echainiv_encrypt_compat;
335 err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
336 crypto_aead_ivsize(geniv));
337
338unlock:
339 spin_unlock_bh(&ctx->lock);
340
341 if (err)
342 return err;
343
344 return echainiv_encrypt_compat(req);
345}
346
347static int echainiv_encrypt_first(struct aead_request *req)
348{
349 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
350 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
351 int err = 0;
352
353 spin_lock_bh(&ctx->lock);
354 if (geniv->encrypt != echainiv_encrypt_first)
355 goto unlock;
356
357 geniv->encrypt = echainiv_encrypt;
358 err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
359 crypto_aead_ivsize(geniv));
360
361unlock:
362 spin_unlock_bh(&ctx->lock);
363
364 if (err)
365 return err;
366
367 return echainiv_encrypt(req);
368}
369
370static int echainiv_compat_init(struct crypto_tfm *tfm)
371{
372 struct crypto_aead *geniv = __crypto_aead_cast(tfm);
373 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
374 int err;
375
376 spin_lock_init(&ctx->lock);
377
378 crypto_aead_set_reqsize(geniv, sizeof(struct echainiv_request_ctx));
379
380 err = aead_geniv_init(tfm);
381
382 ctx->child = geniv->child;
383 geniv->child = geniv;
384
385 return err;
386}
387
388static int echainiv_init(struct crypto_tfm *tfm)
389{
390 struct crypto_aead *geniv = __crypto_aead_cast(tfm);
391 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
392 int err;
393
394 spin_lock_init(&ctx->lock);
395
396 crypto_aead_set_reqsize(geniv, sizeof(struct aead_request));
397
398 ctx->null = crypto_get_default_null_skcipher();
399 err = PTR_ERR(ctx->null);
400 if (IS_ERR(ctx->null))
401 goto out;
402
403 err = aead_geniv_init(tfm);
404 if (err)
405 goto drop_null;
406
407 ctx->child = geniv->child;
408 geniv->child = geniv;
409
410out:
411 return err;
412
413drop_null:
414 crypto_put_default_null_skcipher();
415 goto out;
416}
417
418static void echainiv_compat_exit(struct crypto_tfm *tfm)
419{
420 struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm);
421
422 crypto_free_aead(ctx->child);
423}
424
425static void echainiv_exit(struct crypto_tfm *tfm)
426{
427 struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm);
428
429 crypto_free_aead(ctx->child);
430 crypto_put_default_null_skcipher();
431}
432
433static struct crypto_template echainiv_tmpl;
434
435static struct crypto_instance *echainiv_aead_alloc(struct rtattr **tb)
436{
437 struct aead_instance *inst;
438 struct crypto_aead_spawn *spawn;
439 struct aead_alg *alg;
440
441 inst = aead_geniv_alloc(&echainiv_tmpl, tb, 0, 0);
442
443 if (IS_ERR(inst))
444 goto out;
445
446 if (inst->alg.ivsize < sizeof(u64) ||
447 inst->alg.ivsize & (sizeof(u32) - 1) ||
448 inst->alg.ivsize > MAX_IV_SIZE) {
449 aead_geniv_free(inst);
450 inst = ERR_PTR(-EINVAL);
451 goto out;
452 }
453
454 spawn = aead_instance_ctx(inst);
455 alg = crypto_spawn_aead_alg(spawn);
456
457 inst->alg.setkey = echainiv_setkey;
458 inst->alg.setauthsize = echainiv_setauthsize;
459 inst->alg.encrypt = echainiv_encrypt_first;
460 inst->alg.decrypt = echainiv_decrypt;
461
462 inst->alg.base.cra_init = echainiv_init;
463 inst->alg.base.cra_exit = echainiv_exit;
464
465 inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
466 inst->alg.base.cra_ctxsize = sizeof(struct echainiv_ctx);
467 inst->alg.base.cra_ctxsize += inst->alg.base.cra_aead.ivsize;
468
469 if (alg->base.cra_aead.encrypt) {
470 inst->alg.encrypt = echainiv_encrypt_compat_first;
471 inst->alg.decrypt = echainiv_decrypt_compat;
472
473 inst->alg.base.cra_init = echainiv_compat_init;
474 inst->alg.base.cra_exit = echainiv_compat_exit;
475 }
476
477out:
478 return aead_crypto_instance(inst);
479}
480
481static struct crypto_instance *echainiv_alloc(struct rtattr **tb)
482{
483 struct crypto_instance *inst;
484 int err;
485
486 err = crypto_get_default_rng();
487 if (err)
488 return ERR_PTR(err);
489
490 inst = echainiv_aead_alloc(tb);
491
492 if (IS_ERR(inst))
493 goto put_rng;
494
495out:
496 return inst;
497
498put_rng:
499 crypto_put_default_rng();
500 goto out;
501}
502
503static void echainiv_free(struct crypto_instance *inst)
504{
505 aead_geniv_free(aead_instance(inst));
506 crypto_put_default_rng();
507}
508
509static struct crypto_template echainiv_tmpl = {
510 .name = "echainiv",
511 .alloc = echainiv_alloc,
512 .free = echainiv_free,
513 .module = THIS_MODULE,
514};
515
516static int __init echainiv_module_init(void)
517{
518 return crypto_register_template(&echainiv_tmpl);
519}
520
521static void __exit echainiv_module_exit(void)
522{
523 crypto_unregister_template(&echainiv_tmpl);
524}
525
526module_init(echainiv_module_init);
527module_exit(echainiv_module_exit);
528
529MODULE_LICENSE("GPL");
530MODULE_DESCRIPTION("Encrypted Chain IV Generator");
531MODULE_ALIAS_CRYPTO("echainiv");