blob: d0a4bab220e5e6b5608e50810b3dead70e7be095 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4 *
5 * This file is released under the GPL.
6 */
7
8#include <linux/module.h>
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/bio.h>
12#include <linux/blkdev.h>
13#include <linux/mempool.h>
14#include <linux/slab.h>
15#include <linux/crypto.h>
16#include <linux/workqueue.h>
17#include <asm/atomic.h>
18#include <asm/scatterlist.h>
19#include <asm/page.h>
20
21#include "dm.h"
22
23#define PFX "crypt: "
24
25/*
26 * per bio private data
27 */
28struct crypt_io {
29 struct dm_target *target;
30 struct bio *bio;
31 struct bio *first_clone;
32 struct work_struct work;
33 atomic_t pending;
34 int error;
35};
36
37/*
38 * context holding the current state of a multi-part conversion
39 */
40struct convert_context {
41 struct bio *bio_in;
42 struct bio *bio_out;
43 unsigned int offset_in;
44 unsigned int offset_out;
45 unsigned int idx_in;
46 unsigned int idx_out;
47 sector_t sector;
48 int write;
49};
50
51struct crypt_config;
52
53struct crypt_iv_operations {
54 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
55 const char *opts);
56 void (*dtr)(struct crypt_config *cc);
57 const char *(*status)(struct crypt_config *cc);
58 int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
59};
60
61/*
62 * Crypt: maps a linear range of a block device
63 * and encrypts / decrypts at the same time.
64 */
65struct crypt_config {
66 struct dm_dev *dev;
67 sector_t start;
68
69 /*
70 * pool for per bio private data and
71 * for encryption buffer pages
72 */
73 mempool_t *io_pool;
74 mempool_t *page_pool;
75
76 /*
77 * crypto related data
78 */
79 struct crypt_iv_operations *iv_gen_ops;
80 char *iv_mode;
81 void *iv_gen_private;
82 sector_t iv_offset;
83 unsigned int iv_size;
84
85 struct crypto_tfm *tfm;
86 unsigned int key_size;
87 u8 key[0];
88};
89
90#define MIN_IOS 256
91#define MIN_POOL_PAGES 32
92#define MIN_BIO_PAGES 8
93
94static kmem_cache_t *_crypt_io_pool;
95
96/*
97 * Mempool alloc and free functions for the page
98 */
99static void *mempool_alloc_page(unsigned int __nocast gfp_mask, void *data)
100{
101 return alloc_page(gfp_mask);
102}
103
104static void mempool_free_page(void *page, void *data)
105{
106 __free_page(page);
107}
108
109
110/*
111 * Different IV generation algorithms:
112 *
113 * plain: the initial vector is the 32-bit low-endian version of the sector
114 * number, padded with zeros if neccessary.
115 *
116 * ess_iv: "encrypted sector|salt initial vector", the sector number is
117 * encrypted with the bulk cipher using a salt as key. The salt
118 * should be derived from the bulk cipher's key via hashing.
119 *
120 * plumb: unimplemented, see:
121 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
122 */
123
124static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
125{
126 memset(iv, 0, cc->iv_size);
127 *(u32 *)iv = cpu_to_le32(sector & 0xffffffff);
128
129 return 0;
130}
131
132static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
133 const char *opts)
134{
135 struct crypto_tfm *essiv_tfm;
136 struct crypto_tfm *hash_tfm;
137 struct scatterlist sg;
138 unsigned int saltsize;
139 u8 *salt;
140
141 if (opts == NULL) {
142 ti->error = PFX "Digest algorithm missing for ESSIV mode";
143 return -EINVAL;
144 }
145
146 /* Hash the cipher key with the given hash algorithm */
147 hash_tfm = crypto_alloc_tfm(opts, 0);
148 if (hash_tfm == NULL) {
149 ti->error = PFX "Error initializing ESSIV hash";
150 return -EINVAL;
151 }
152
153 if (crypto_tfm_alg_type(hash_tfm) != CRYPTO_ALG_TYPE_DIGEST) {
154 ti->error = PFX "Expected digest algorithm for ESSIV hash";
155 crypto_free_tfm(hash_tfm);
156 return -EINVAL;
157 }
158
159 saltsize = crypto_tfm_alg_digestsize(hash_tfm);
160 salt = kmalloc(saltsize, GFP_KERNEL);
161 if (salt == NULL) {
162 ti->error = PFX "Error kmallocing salt storage in ESSIV";
163 crypto_free_tfm(hash_tfm);
164 return -ENOMEM;
165 }
166
167 sg.page = virt_to_page(cc->key);
168 sg.offset = offset_in_page(cc->key);
169 sg.length = cc->key_size;
170 crypto_digest_digest(hash_tfm, &sg, 1, salt);
171 crypto_free_tfm(hash_tfm);
172
173 /* Setup the essiv_tfm with the given salt */
174 essiv_tfm = crypto_alloc_tfm(crypto_tfm_alg_name(cc->tfm),
175 CRYPTO_TFM_MODE_ECB);
176 if (essiv_tfm == NULL) {
177 ti->error = PFX "Error allocating crypto tfm for ESSIV";
178 kfree(salt);
179 return -EINVAL;
180 }
181 if (crypto_tfm_alg_blocksize(essiv_tfm)
182 != crypto_tfm_alg_ivsize(cc->tfm)) {
183 ti->error = PFX "Block size of ESSIV cipher does "
184 "not match IV size of block cipher";
185 crypto_free_tfm(essiv_tfm);
186 kfree(salt);
187 return -EINVAL;
188 }
189 if (crypto_cipher_setkey(essiv_tfm, salt, saltsize) < 0) {
190 ti->error = PFX "Failed to set key for ESSIV cipher";
191 crypto_free_tfm(essiv_tfm);
192 kfree(salt);
193 return -EINVAL;
194 }
195 kfree(salt);
196
197 cc->iv_gen_private = (void *)essiv_tfm;
198 return 0;
199}
200
201static void crypt_iv_essiv_dtr(struct crypt_config *cc)
202{
203 crypto_free_tfm((struct crypto_tfm *)cc->iv_gen_private);
204 cc->iv_gen_private = NULL;
205}
206
207static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
208{
209 struct scatterlist sg = { NULL, };
210
211 memset(iv, 0, cc->iv_size);
212 *(u64 *)iv = cpu_to_le64(sector);
213
214 sg.page = virt_to_page(iv);
215 sg.offset = offset_in_page(iv);
216 sg.length = cc->iv_size;
217 crypto_cipher_encrypt((struct crypto_tfm *)cc->iv_gen_private,
218 &sg, &sg, cc->iv_size);
219
220 return 0;
221}
222
223static struct crypt_iv_operations crypt_iv_plain_ops = {
224 .generator = crypt_iv_plain_gen
225};
226
227static struct crypt_iv_operations crypt_iv_essiv_ops = {
228 .ctr = crypt_iv_essiv_ctr,
229 .dtr = crypt_iv_essiv_dtr,
230 .generator = crypt_iv_essiv_gen
231};
232
233
234static inline int
235crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
236 struct scatterlist *in, unsigned int length,
237 int write, sector_t sector)
238{
239 u8 iv[cc->iv_size];
240 int r;
241
242 if (cc->iv_gen_ops) {
243 r = cc->iv_gen_ops->generator(cc, iv, sector);
244 if (r < 0)
245 return r;
246
247 if (write)
248 r = crypto_cipher_encrypt_iv(cc->tfm, out, in, length, iv);
249 else
250 r = crypto_cipher_decrypt_iv(cc->tfm, out, in, length, iv);
251 } else {
252 if (write)
253 r = crypto_cipher_encrypt(cc->tfm, out, in, length);
254 else
255 r = crypto_cipher_decrypt(cc->tfm, out, in, length);
256 }
257
258 return r;
259}
260
261static void
262crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx,
263 struct bio *bio_out, struct bio *bio_in,
264 sector_t sector, int write)
265{
266 ctx->bio_in = bio_in;
267 ctx->bio_out = bio_out;
268 ctx->offset_in = 0;
269 ctx->offset_out = 0;
270 ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
271 ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
272 ctx->sector = sector + cc->iv_offset;
273 ctx->write = write;
274}
275
276/*
277 * Encrypt / decrypt data from one bio to another one (can be the same one)
278 */
279static int crypt_convert(struct crypt_config *cc,
280 struct convert_context *ctx)
281{
282 int r = 0;
283
284 while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
285 ctx->idx_out < ctx->bio_out->bi_vcnt) {
286 struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
287 struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
288 struct scatterlist sg_in = {
289 .page = bv_in->bv_page,
290 .offset = bv_in->bv_offset + ctx->offset_in,
291 .length = 1 << SECTOR_SHIFT
292 };
293 struct scatterlist sg_out = {
294 .page = bv_out->bv_page,
295 .offset = bv_out->bv_offset + ctx->offset_out,
296 .length = 1 << SECTOR_SHIFT
297 };
298
299 ctx->offset_in += sg_in.length;
300 if (ctx->offset_in >= bv_in->bv_len) {
301 ctx->offset_in = 0;
302 ctx->idx_in++;
303 }
304
305 ctx->offset_out += sg_out.length;
306 if (ctx->offset_out >= bv_out->bv_len) {
307 ctx->offset_out = 0;
308 ctx->idx_out++;
309 }
310
311 r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,
312 ctx->write, ctx->sector);
313 if (r < 0)
314 break;
315
316 ctx->sector++;
317 }
318
319 return r;
320}
321
322/*
323 * Generate a new unfragmented bio with the given size
324 * This should never violate the device limitations
325 * May return a smaller bio when running out of pages
326 */
327static struct bio *
328crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
329 struct bio *base_bio, unsigned int *bio_vec_idx)
330{
331 struct bio *bio;
332 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
333 int gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 unsigned int i;
335
336 /*
Nick Pigginbd53b712005-05-01 08:58:37 -0700337 * Use __GFP_NOMEMALLOC to tell the VM to act less aggressively and
338 * to fail earlier. This is not necessary but increases throughput.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 * FIXME: Is this really intelligent?
340 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 if (base_bio)
Nick Pigginbd53b712005-05-01 08:58:37 -0700342 bio = bio_clone(base_bio, GFP_NOIO|__GFP_NOMEMALLOC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 else
Nick Pigginbd53b712005-05-01 08:58:37 -0700344 bio = bio_alloc(GFP_NOIO|__GFP_NOMEMALLOC, nr_iovecs);
345 if (!bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
348 /* if the last bio was not complete, continue where that one ended */
349 bio->bi_idx = *bio_vec_idx;
350 bio->bi_vcnt = *bio_vec_idx;
351 bio->bi_size = 0;
352 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
353
354 /* bio->bi_idx pages have already been allocated */
355 size -= bio->bi_idx * PAGE_SIZE;
356
357 for(i = bio->bi_idx; i < nr_iovecs; i++) {
358 struct bio_vec *bv = bio_iovec_idx(bio, i);
359
360 bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
361 if (!bv->bv_page)
362 break;
363
364 /*
365 * if additional pages cannot be allocated without waiting,
366 * return a partially allocated bio, the caller will then try
367 * to allocate additional bios while submitting this partial bio
368 */
369 if ((i - bio->bi_idx) == (MIN_BIO_PAGES - 1))
370 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
371
372 bv->bv_offset = 0;
373 if (size > PAGE_SIZE)
374 bv->bv_len = PAGE_SIZE;
375 else
376 bv->bv_len = size;
377
378 bio->bi_size += bv->bv_len;
379 bio->bi_vcnt++;
380 size -= bv->bv_len;
381 }
382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 if (!bio->bi_size) {
384 bio_put(bio);
385 return NULL;
386 }
387
388 /*
389 * Remember the last bio_vec allocated to be able
390 * to correctly continue after the splitting.
391 */
392 *bio_vec_idx = bio->bi_vcnt;
393
394 return bio;
395}
396
397static void crypt_free_buffer_pages(struct crypt_config *cc,
398 struct bio *bio, unsigned int bytes)
399{
400 unsigned int i, start, end;
401 struct bio_vec *bv;
402
403 /*
404 * This is ugly, but Jens Axboe thinks that using bi_idx in the
405 * endio function is too dangerous at the moment, so I calculate the
406 * correct position using bi_vcnt and bi_size.
407 * The bv_offset and bv_len fields might already be modified but we
408 * know that we always allocated whole pages.
409 * A fix to the bi_idx issue in the kernel is in the works, so
410 * we will hopefully be able to revert to the cleaner solution soon.
411 */
412 i = bio->bi_vcnt - 1;
413 bv = bio_iovec_idx(bio, i);
414 end = (i << PAGE_SHIFT) + (bv->bv_offset + bv->bv_len) - bio->bi_size;
415 start = end - bytes;
416
417 start >>= PAGE_SHIFT;
418 if (!bio->bi_size)
419 end = bio->bi_vcnt;
420 else
421 end >>= PAGE_SHIFT;
422
423 for(i = start; i < end; i++) {
424 bv = bio_iovec_idx(bio, i);
425 BUG_ON(!bv->bv_page);
426 mempool_free(bv->bv_page, cc->page_pool);
427 bv->bv_page = NULL;
428 }
429}
430
431/*
432 * One of the bios was finished. Check for completion of
433 * the whole request and correctly clean up the buffer.
434 */
435static void dec_pending(struct crypt_io *io, int error)
436{
437 struct crypt_config *cc = (struct crypt_config *) io->target->private;
438
439 if (error < 0)
440 io->error = error;
441
442 if (!atomic_dec_and_test(&io->pending))
443 return;
444
445 if (io->first_clone)
446 bio_put(io->first_clone);
447
448 bio_endio(io->bio, io->bio->bi_size, io->error);
449
450 mempool_free(io, cc->io_pool);
451}
452
453/*
454 * kcryptd:
455 *
456 * Needed because it would be very unwise to do decryption in an
457 * interrupt context, so bios returning from read requests get
458 * queued here.
459 */
460static struct workqueue_struct *_kcryptd_workqueue;
461
462static void kcryptd_do_work(void *data)
463{
464 struct crypt_io *io = (struct crypt_io *) data;
465 struct crypt_config *cc = (struct crypt_config *) io->target->private;
466 struct convert_context ctx;
467 int r;
468
469 crypt_convert_init(cc, &ctx, io->bio, io->bio,
470 io->bio->bi_sector - io->target->begin, 0);
471 r = crypt_convert(cc, &ctx);
472
473 dec_pending(io, r);
474}
475
476static void kcryptd_queue_io(struct crypt_io *io)
477{
478 INIT_WORK(&io->work, kcryptd_do_work, io);
479 queue_work(_kcryptd_workqueue, &io->work);
480}
481
482/*
483 * Decode key from its hex representation
484 */
485static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
486{
487 char buffer[3];
488 char *endp;
489 unsigned int i;
490
491 buffer[2] = '\0';
492
493 for(i = 0; i < size; i++) {
494 buffer[0] = *hex++;
495 buffer[1] = *hex++;
496
497 key[i] = (u8)simple_strtoul(buffer, &endp, 16);
498
499 if (endp != &buffer[2])
500 return -EINVAL;
501 }
502
503 if (*hex != '\0')
504 return -EINVAL;
505
506 return 0;
507}
508
509/*
510 * Encode key into its hex representation
511 */
512static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
513{
514 unsigned int i;
515
516 for(i = 0; i < size; i++) {
517 sprintf(hex, "%02x", *key);
518 hex += 2;
519 key++;
520 }
521}
522
523/*
524 * Construct an encryption mapping:
525 * <cipher> <key> <iv_offset> <dev_path> <start>
526 */
527static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
528{
529 struct crypt_config *cc;
530 struct crypto_tfm *tfm;
531 char *tmp;
532 char *cipher;
533 char *chainmode;
534 char *ivmode;
535 char *ivopts;
536 unsigned int crypto_flags;
537 unsigned int key_size;
538
539 if (argc != 5) {
540 ti->error = PFX "Not enough arguments";
541 return -EINVAL;
542 }
543
544 tmp = argv[0];
545 cipher = strsep(&tmp, "-");
546 chainmode = strsep(&tmp, "-");
547 ivopts = strsep(&tmp, "-");
548 ivmode = strsep(&ivopts, ":");
549
550 if (tmp)
551 DMWARN(PFX "Unexpected additional cipher options");
552
553 key_size = strlen(argv[1]) >> 1;
554
555 cc = kmalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
556 if (cc == NULL) {
557 ti->error =
558 PFX "Cannot allocate transparent encryption context";
559 return -ENOMEM;
560 }
561
562 cc->key_size = key_size;
563 if ((!key_size && strcmp(argv[1], "-") != 0) ||
564 (key_size && crypt_decode_key(cc->key, argv[1], key_size) < 0)) {
565 ti->error = PFX "Error decoding key";
566 goto bad1;
567 }
568
569 /* Compatiblity mode for old dm-crypt cipher strings */
570 if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
571 chainmode = "cbc";
572 ivmode = "plain";
573 }
574
575 /* Choose crypto_flags according to chainmode */
576 if (strcmp(chainmode, "cbc") == 0)
577 crypto_flags = CRYPTO_TFM_MODE_CBC;
578 else if (strcmp(chainmode, "ecb") == 0)
579 crypto_flags = CRYPTO_TFM_MODE_ECB;
580 else {
581 ti->error = PFX "Unknown chaining mode";
582 goto bad1;
583 }
584
585 if (crypto_flags != CRYPTO_TFM_MODE_ECB && !ivmode) {
586 ti->error = PFX "This chaining mode requires an IV mechanism";
587 goto bad1;
588 }
589
590 tfm = crypto_alloc_tfm(cipher, crypto_flags);
591 if (!tfm) {
592 ti->error = PFX "Error allocating crypto tfm";
593 goto bad1;
594 }
595 if (crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER) {
596 ti->error = PFX "Expected cipher algorithm";
597 goto bad2;
598 }
599
600 cc->tfm = tfm;
601
602 /*
603 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>".
604 * See comments at iv code
605 */
606
607 if (ivmode == NULL)
608 cc->iv_gen_ops = NULL;
609 else if (strcmp(ivmode, "plain") == 0)
610 cc->iv_gen_ops = &crypt_iv_plain_ops;
611 else if (strcmp(ivmode, "essiv") == 0)
612 cc->iv_gen_ops = &crypt_iv_essiv_ops;
613 else {
614 ti->error = PFX "Invalid IV mode";
615 goto bad2;
616 }
617
618 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
619 cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
620 goto bad2;
621
622 if (tfm->crt_cipher.cit_decrypt_iv && tfm->crt_cipher.cit_encrypt_iv)
623 /* at least a 64 bit sector number should fit in our buffer */
624 cc->iv_size = max(crypto_tfm_alg_ivsize(tfm),
625 (unsigned int)(sizeof(u64) / sizeof(u8)));
626 else {
627 cc->iv_size = 0;
628 if (cc->iv_gen_ops) {
629 DMWARN(PFX "Selected cipher does not support IVs");
630 if (cc->iv_gen_ops->dtr)
631 cc->iv_gen_ops->dtr(cc);
632 cc->iv_gen_ops = NULL;
633 }
634 }
635
636 cc->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
637 mempool_free_slab, _crypt_io_pool);
638 if (!cc->io_pool) {
639 ti->error = PFX "Cannot allocate crypt io mempool";
640 goto bad3;
641 }
642
643 cc->page_pool = mempool_create(MIN_POOL_PAGES, mempool_alloc_page,
644 mempool_free_page, NULL);
645 if (!cc->page_pool) {
646 ti->error = PFX "Cannot allocate page mempool";
647 goto bad4;
648 }
649
650 if (tfm->crt_cipher.cit_setkey(tfm, cc->key, key_size) < 0) {
651 ti->error = PFX "Error setting key";
652 goto bad5;
653 }
654
655 if (sscanf(argv[2], SECTOR_FORMAT, &cc->iv_offset) != 1) {
656 ti->error = PFX "Invalid iv_offset sector";
657 goto bad5;
658 }
659
660 if (sscanf(argv[4], SECTOR_FORMAT, &cc->start) != 1) {
661 ti->error = PFX "Invalid device sector";
662 goto bad5;
663 }
664
665 if (dm_get_device(ti, argv[3], cc->start, ti->len,
666 dm_table_get_mode(ti->table), &cc->dev)) {
667 ti->error = PFX "Device lookup failed";
668 goto bad5;
669 }
670
671 if (ivmode && cc->iv_gen_ops) {
672 if (ivopts)
673 *(ivopts - 1) = ':';
674 cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
675 if (!cc->iv_mode) {
676 ti->error = PFX "Error kmallocing iv_mode string";
677 goto bad5;
678 }
679 strcpy(cc->iv_mode, ivmode);
680 } else
681 cc->iv_mode = NULL;
682
683 ti->private = cc;
684 return 0;
685
686bad5:
687 mempool_destroy(cc->page_pool);
688bad4:
689 mempool_destroy(cc->io_pool);
690bad3:
691 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
692 cc->iv_gen_ops->dtr(cc);
693bad2:
694 crypto_free_tfm(tfm);
695bad1:
696 kfree(cc);
697 return -EINVAL;
698}
699
700static void crypt_dtr(struct dm_target *ti)
701{
702 struct crypt_config *cc = (struct crypt_config *) ti->private;
703
704 mempool_destroy(cc->page_pool);
705 mempool_destroy(cc->io_pool);
706
Jesper Juhl990a8ba2005-06-21 17:17:30 -0700707 kfree(cc->iv_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
709 cc->iv_gen_ops->dtr(cc);
710 crypto_free_tfm(cc->tfm);
711 dm_put_device(ti, cc->dev);
712 kfree(cc);
713}
714
715static int crypt_endio(struct bio *bio, unsigned int done, int error)
716{
717 struct crypt_io *io = (struct crypt_io *) bio->bi_private;
718 struct crypt_config *cc = (struct crypt_config *) io->target->private;
719
720 if (bio_data_dir(bio) == WRITE) {
721 /*
722 * free the processed pages, even if
723 * it's only a partially completed write
724 */
725 crypt_free_buffer_pages(cc, bio, done);
726 }
727
728 if (bio->bi_size)
729 return 1;
730
731 bio_put(bio);
732
733 /*
734 * successful reads are decrypted by the worker thread
735 */
736 if ((bio_data_dir(bio) == READ)
737 && bio_flagged(bio, BIO_UPTODATE)) {
738 kcryptd_queue_io(io);
739 return 0;
740 }
741
742 dec_pending(io, error);
743 return error;
744}
745
746static inline struct bio *
747crypt_clone(struct crypt_config *cc, struct crypt_io *io, struct bio *bio,
748 sector_t sector, unsigned int *bvec_idx,
749 struct convert_context *ctx)
750{
751 struct bio *clone;
752
753 if (bio_data_dir(bio) == WRITE) {
754 clone = crypt_alloc_buffer(cc, bio->bi_size,
755 io->first_clone, bvec_idx);
756 if (clone) {
757 ctx->bio_out = clone;
758 if (crypt_convert(cc, ctx) < 0) {
759 crypt_free_buffer_pages(cc, clone,
760 clone->bi_size);
761 bio_put(clone);
762 return NULL;
763 }
764 }
765 } else {
766 /*
767 * The block layer might modify the bvec array, so always
768 * copy the required bvecs because we need the original
769 * one in order to decrypt the whole bio data *afterwards*.
770 */
771 clone = bio_alloc(GFP_NOIO, bio_segments(bio));
772 if (clone) {
773 clone->bi_idx = 0;
774 clone->bi_vcnt = bio_segments(bio);
775 clone->bi_size = bio->bi_size;
776 memcpy(clone->bi_io_vec, bio_iovec(bio),
777 sizeof(struct bio_vec) * clone->bi_vcnt);
778 }
779 }
780
781 if (!clone)
782 return NULL;
783
784 clone->bi_private = io;
785 clone->bi_end_io = crypt_endio;
786 clone->bi_bdev = cc->dev->bdev;
787 clone->bi_sector = cc->start + sector;
788 clone->bi_rw = bio->bi_rw;
789
790 return clone;
791}
792
793static int crypt_map(struct dm_target *ti, struct bio *bio,
794 union map_info *map_context)
795{
796 struct crypt_config *cc = (struct crypt_config *) ti->private;
797 struct crypt_io *io = mempool_alloc(cc->io_pool, GFP_NOIO);
798 struct convert_context ctx;
799 struct bio *clone;
800 unsigned int remaining = bio->bi_size;
801 sector_t sector = bio->bi_sector - ti->begin;
802 unsigned int bvec_idx = 0;
803
804 io->target = ti;
805 io->bio = bio;
806 io->first_clone = NULL;
807 io->error = 0;
808 atomic_set(&io->pending, 1); /* hold a reference */
809
810 if (bio_data_dir(bio) == WRITE)
811 crypt_convert_init(cc, &ctx, NULL, bio, sector, 1);
812
813 /*
814 * The allocated buffers can be smaller than the whole bio,
815 * so repeat the whole process until all the data can be handled.
816 */
817 while (remaining) {
818 clone = crypt_clone(cc, io, bio, sector, &bvec_idx, &ctx);
819 if (!clone)
820 goto cleanup;
821
822 if (!io->first_clone) {
823 /*
824 * hold a reference to the first clone, because it
825 * holds the bio_vec array and that can't be freed
826 * before all other clones are released
827 */
828 bio_get(clone);
829 io->first_clone = clone;
830 }
831 atomic_inc(&io->pending);
832
833 remaining -= clone->bi_size;
834 sector += bio_sectors(clone);
835
836 generic_make_request(clone);
837
838 /* out of memory -> run queues */
839 if (remaining)
840 blk_congestion_wait(bio_data_dir(clone), HZ/100);
841 }
842
843 /* drop reference, clones could have returned before we reach this */
844 dec_pending(io, 0);
845 return 0;
846
847cleanup:
848 if (io->first_clone) {
849 dec_pending(io, -ENOMEM);
850 return 0;
851 }
852
853 /* if no bio has been dispatched yet, we can directly return the error */
854 mempool_free(io, cc->io_pool);
855 return -ENOMEM;
856}
857
858static int crypt_status(struct dm_target *ti, status_type_t type,
859 char *result, unsigned int maxlen)
860{
861 struct crypt_config *cc = (struct crypt_config *) ti->private;
862 const char *cipher;
863 const char *chainmode = NULL;
864 unsigned int sz = 0;
865
866 switch (type) {
867 case STATUSTYPE_INFO:
868 result[0] = '\0';
869 break;
870
871 case STATUSTYPE_TABLE:
872 cipher = crypto_tfm_alg_name(cc->tfm);
873
874 switch(cc->tfm->crt_cipher.cit_mode) {
875 case CRYPTO_TFM_MODE_CBC:
876 chainmode = "cbc";
877 break;
878 case CRYPTO_TFM_MODE_ECB:
879 chainmode = "ecb";
880 break;
881 default:
882 BUG();
883 }
884
885 if (cc->iv_mode)
886 DMEMIT("%s-%s-%s ", cipher, chainmode, cc->iv_mode);
887 else
888 DMEMIT("%s-%s ", cipher, chainmode);
889
890 if (cc->key_size > 0) {
891 if ((maxlen - sz) < ((cc->key_size << 1) + 1))
892 return -ENOMEM;
893
894 crypt_encode_key(result + sz, cc->key, cc->key_size);
895 sz += cc->key_size << 1;
896 } else {
897 if (sz >= maxlen)
898 return -ENOMEM;
899 result[sz++] = '-';
900 }
901
902 DMEMIT(" " SECTOR_FORMAT " %s " SECTOR_FORMAT,
903 cc->iv_offset, cc->dev->name, cc->start);
904 break;
905 }
906 return 0;
907}
908
909static struct target_type crypt_target = {
910 .name = "crypt",
911 .version= {1, 1, 0},
912 .module = THIS_MODULE,
913 .ctr = crypt_ctr,
914 .dtr = crypt_dtr,
915 .map = crypt_map,
916 .status = crypt_status,
917};
918
919static int __init dm_crypt_init(void)
920{
921 int r;
922
923 _crypt_io_pool = kmem_cache_create("dm-crypt_io",
924 sizeof(struct crypt_io),
925 0, 0, NULL, NULL);
926 if (!_crypt_io_pool)
927 return -ENOMEM;
928
929 _kcryptd_workqueue = create_workqueue("kcryptd");
930 if (!_kcryptd_workqueue) {
931 r = -ENOMEM;
932 DMERR(PFX "couldn't create kcryptd");
933 goto bad1;
934 }
935
936 r = dm_register_target(&crypt_target);
937 if (r < 0) {
938 DMERR(PFX "register failed %d", r);
939 goto bad2;
940 }
941
942 return 0;
943
944bad2:
945 destroy_workqueue(_kcryptd_workqueue);
946bad1:
947 kmem_cache_destroy(_crypt_io_pool);
948 return r;
949}
950
951static void __exit dm_crypt_exit(void)
952{
953 int r = dm_unregister_target(&crypt_target);
954
955 if (r < 0)
956 DMERR(PFX "unregister failed %d", r);
957
958 destroy_workqueue(_kcryptd_workqueue);
959 kmem_cache_destroy(_crypt_io_pool);
960}
961
962module_init(dm_crypt_init);
963module_exit(dm_crypt_exit);
964
965MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
966MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
967MODULE_LICENSE("GPL");