blob: 3147c8d09ea84a0a76d0fd7ead35931a89e29aed [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Jana Saoutbf142992014-06-24 14:27:04 -04002 * Copyright (C) 2003 Jana Saout <jana@saout.de>
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
Milan Broz54cea3f2015-05-15 17:00:25 +02004 * Copyright (C) 2006-2015 Red Hat, Inc. All rights reserved.
Milan Brozed04d982013-10-28 23:21:04 +01005 * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This file is released under the GPL.
8 */
9
Milan Broz43d69032008-02-08 02:11:09 +000010#include <linux/completion.h>
Herbert Xud1806f62006-08-22 20:29:17 +100011#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/bio.h>
16#include <linux/blkdev.h>
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/crypto.h>
20#include <linux/workqueue.h>
Mikulas Patockadc267622015-02-13 08:25:59 -050021#include <linux/kthread.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070022#include <linux/backing-dev.h>
Arun Sharma600634972011-07-26 16:09:06 -070023#include <linux/atomic.h>
David Hardeman378f0582005-09-17 17:55:31 +100024#include <linux/scatterlist.h>
Mikulas Patockab3c5fd32015-02-13 08:27:41 -050025#include <linux/rbtree.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <asm/page.h>
Rik Snel48527fa2006-09-03 08:56:39 +100027#include <asm/unaligned.h>
Milan Broz34745782011-01-13 19:59:55 +000028#include <crypto/hash.h>
29#include <crypto/md5.h>
30#include <crypto/algapi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Mikulas Patocka586e80e2008-10-21 17:44:59 +010032#include <linux/device-mapper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Alasdair G Kergon72d94862006-06-26 00:27:35 -070034#define DM_MSG_PREFIX "crypt"
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 * context holding the current state of a multi-part conversion
38 */
39struct convert_context {
Milan Broz43d69032008-02-08 02:11:09 +000040 struct completion restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 struct bio *bio_in;
42 struct bio *bio_out;
Kent Overstreet003b5c52013-10-11 15:45:43 -070043 struct bvec_iter iter_in;
44 struct bvec_iter iter_out;
Mikulas Patockac66029f2012-07-27 15:08:05 +010045 sector_t cc_sector;
Mikulas Patocka40b62292012-07-27 15:08:04 +010046 atomic_t cc_pending;
Mikulas Patocka610f2de2014-02-20 18:01:01 -050047 struct ablkcipher_request *req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048};
49
Milan Broz53017032008-02-08 02:10:38 +000050/*
51 * per bio private data
52 */
53struct dm_crypt_io {
Alasdair G Kergon49a8a922012-07-27 15:08:05 +010054 struct crypt_config *cc;
Milan Broz53017032008-02-08 02:10:38 +000055 struct bio *base_bio;
56 struct work_struct work;
57
58 struct convert_context ctx;
59
Mikulas Patocka40b62292012-07-27 15:08:04 +010060 atomic_t io_pending;
Milan Broz53017032008-02-08 02:10:38 +000061 int error;
Milan Broz0c395b02008-02-08 02:10:54 +000062 sector_t sector;
Mikulas Patockadc267622015-02-13 08:25:59 -050063
Mikulas Patockab3c5fd32015-02-13 08:27:41 -050064 struct rb_node rb_node;
Mikulas Patocka298a9fa2014-03-28 15:51:55 -040065} CRYPTO_MINALIGN_ATTR;
Milan Broz53017032008-02-08 02:10:38 +000066
Milan Broz01482b72008-02-08 02:11:04 +000067struct dm_crypt_request {
Huang Yingb2174ee2009-03-16 17:44:33 +000068 struct convert_context *ctx;
Milan Broz01482b72008-02-08 02:11:04 +000069 struct scatterlist sg_in;
70 struct scatterlist sg_out;
Milan Broz2dc53272011-01-13 19:59:54 +000071 sector_t iv_sector;
Milan Broz01482b72008-02-08 02:11:04 +000072};
73
Linus Torvalds1da177e2005-04-16 15:20:36 -070074struct crypt_config;
75
76struct crypt_iv_operations {
77 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
Milan Brozd469f842007-10-19 22:42:37 +010078 const char *opts);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 void (*dtr)(struct crypt_config *cc);
Milan Brozb95bf2d2009-12-10 23:51:56 +000080 int (*init)(struct crypt_config *cc);
Milan Broz542da312009-12-10 23:51:57 +000081 int (*wipe)(struct crypt_config *cc);
Milan Broz2dc53272011-01-13 19:59:54 +000082 int (*generator)(struct crypt_config *cc, u8 *iv,
83 struct dm_crypt_request *dmreq);
84 int (*post)(struct crypt_config *cc, u8 *iv,
85 struct dm_crypt_request *dmreq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086};
87
Milan Broz60473592009-12-10 23:51:55 +000088struct iv_essiv_private {
Milan Brozb95bf2d2009-12-10 23:51:56 +000089 struct crypto_hash *hash_tfm;
90 u8 *salt;
Milan Broz60473592009-12-10 23:51:55 +000091};
92
93struct iv_benbi_private {
94 int shift;
95};
96
Milan Broz34745782011-01-13 19:59:55 +000097#define LMK_SEED_SIZE 64 /* hash + 0 */
98struct iv_lmk_private {
99 struct crypto_shash *hash_tfm;
100 u8 *seed;
101};
102
Milan Brozed04d982013-10-28 23:21:04 +0100103#define TCW_WHITENING_SIZE 16
104struct iv_tcw_private {
105 struct crypto_shash *crc32_tfm;
106 u8 *iv_seed;
107 u8 *whitening;
108};
109
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110/*
111 * Crypt: maps a linear range of a block device
112 * and encrypts / decrypts at the same time.
113 */
Mikulas Patocka0f5d8e62015-02-13 08:27:08 -0500114enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
Mikulas Patockabcbd94f2015-11-19 07:36:50 -0500115 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
116 DM_CRYPT_EXIT_THREAD};
Andi Kleenc0297722011-01-13 19:59:53 +0000117
118/*
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500119 * The fields in here must be read only after initialization.
Andi Kleenc0297722011-01-13 19:59:53 +0000120 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121struct crypt_config {
122 struct dm_dev *dev;
123 sector_t start;
124
125 /*
Milan Brozddd42ed2008-02-08 02:11:07 +0000126 * pool for per bio private data, crypto requests and
127 * encryption requeusts/buffer pages
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 */
Milan Brozddd42ed2008-02-08 02:11:07 +0000129 mempool_t *req_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 mempool_t *page_pool;
Milan Broz6a24c712006-10-03 01:15:40 -0700131 struct bio_set *bs;
Mikulas Patocka7145c242015-02-13 08:24:41 -0500132 struct mutex bio_alloc_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
Milan Brozcabf08e2007-10-19 22:38:58 +0100134 struct workqueue_struct *io_queue;
135 struct workqueue_struct *crypt_queue;
Milan Broz3f1e9072008-03-28 14:16:07 -0700136
Mikulas Patockadc267622015-02-13 08:25:59 -0500137 struct task_struct *write_thread;
138 wait_queue_head_t write_thread_wait;
Mikulas Patockab3c5fd32015-02-13 08:27:41 -0500139 struct rb_root write_tree;
Mikulas Patockadc267622015-02-13 08:25:59 -0500140
Milan Broz5ebaee62010-08-12 04:14:07 +0100141 char *cipher;
Milan Broz7dbcd132011-01-13 19:59:52 +0000142 char *cipher_string;
Milan Broz5ebaee62010-08-12 04:14:07 +0100143
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 struct crypt_iv_operations *iv_gen_ops;
Herbert Xu79066ad2006-12-05 13:41:52 -0800145 union {
Milan Broz60473592009-12-10 23:51:55 +0000146 struct iv_essiv_private essiv;
147 struct iv_benbi_private benbi;
Milan Broz34745782011-01-13 19:59:55 +0000148 struct iv_lmk_private lmk;
Milan Brozed04d982013-10-28 23:21:04 +0100149 struct iv_tcw_private tcw;
Herbert Xu79066ad2006-12-05 13:41:52 -0800150 } iv_gen_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 sector_t iv_offset;
152 unsigned int iv_size;
153
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100154 /* ESSIV: struct crypto_cipher *essiv_tfm */
155 void *iv_private;
156 struct crypto_ablkcipher **tfms;
Milan Brozd1f96422011-01-13 19:59:54 +0000157 unsigned tfms_count;
Andi Kleenc0297722011-01-13 19:59:53 +0000158
159 /*
Milan Brozddd42ed2008-02-08 02:11:07 +0000160 * Layout of each crypto request:
161 *
162 * struct ablkcipher_request
163 * context
164 * padding
165 * struct dm_crypt_request
166 * padding
167 * IV
168 *
169 * The padding is added so that dm_crypt_request and the IV are
170 * correctly aligned.
171 */
172 unsigned int dmreq_start;
Milan Brozddd42ed2008-02-08 02:11:07 +0000173
Mikulas Patocka298a9fa2014-03-28 15:51:55 -0400174 unsigned int per_bio_data_size;
175
Milan Broze48d4bb2006-10-03 01:15:37 -0700176 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 unsigned int key_size;
Milan Brozda31a072013-10-28 23:21:03 +0100178 unsigned int key_parts; /* independent parts in key buffer */
179 unsigned int key_extra_size; /* additional keys length */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 u8 key[0];
181};
182
Milan Broz6a24c712006-10-03 01:15:40 -0700183#define MIN_IOS 16
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100185static void clone_init(struct dm_crypt_io *, struct bio *);
Alasdair G Kergon395b1672008-02-08 02:10:52 +0000186static void kcryptd_queue_crypt(struct dm_crypt_io *io);
Milan Broz2dc53272011-01-13 19:59:54 +0000187static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
Olaf Kirch027581f2007-05-09 02:32:52 -0700188
Andi Kleenc0297722011-01-13 19:59:53 +0000189/*
190 * Use this to access cipher attributes that are the same for each CPU.
191 */
192static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
193{
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100194 return cc->tfms[0];
Andi Kleenc0297722011-01-13 19:59:53 +0000195}
196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 * Different IV generation algorithms:
199 *
Rik Snel3c164bd2006-09-02 18:17:33 +1000200 * plain: the initial vector is the 32-bit little-endian version of the sector
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +0200201 * number, padded with zeros if necessary.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 *
Milan Broz61afef62009-12-10 23:52:25 +0000203 * plain64: the initial vector is the 64-bit little-endian version of the sector
204 * number, padded with zeros if necessary.
205 *
Rik Snel3c164bd2006-09-02 18:17:33 +1000206 * essiv: "encrypted sector|salt initial vector", the sector number is
207 * encrypted with the bulk cipher using a salt as key. The salt
208 * should be derived from the bulk cipher's key via hashing.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 *
Rik Snel48527fa2006-09-03 08:56:39 +1000210 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
211 * (needed for LRW-32-AES and possible other narrow block modes)
212 *
Ludwig Nussel46b47732007-05-09 02:32:55 -0700213 * null: the initial vector is always zero. Provides compatibility with
214 * obsolete loop_fish2 devices. Do not use for new devices.
215 *
Milan Broz34745782011-01-13 19:59:55 +0000216 * lmk: Compatible implementation of the block chaining mode used
217 * by the Loop-AES block device encryption system
218 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
219 * It operates on full 512 byte sectors and uses CBC
220 * with an IV derived from the sector number, the data and
221 * optionally extra IV seed.
222 * This means that after decryption the first block
223 * of sector must be tweaked according to decrypted data.
224 * Loop-AES can use three encryption schemes:
225 * version 1: is plain aes-cbc mode
226 * version 2: uses 64 multikey scheme with lmk IV generator
227 * version 3: the same as version 2 with additional IV seed
228 * (it uses 65 keys, last key is used as IV seed)
229 *
Milan Brozed04d982013-10-28 23:21:04 +0100230 * tcw: Compatible implementation of the block chaining mode used
231 * by the TrueCrypt device encryption system (prior to version 4.1).
Milan Broze44f23b2015-04-05 18:03:10 +0200232 * For more info see: https://gitlab.com/cryptsetup/cryptsetup/wikis/TrueCryptOnDiskFormat
Milan Brozed04d982013-10-28 23:21:04 +0100233 * It operates on full 512 byte sectors and uses CBC
234 * with an IV derived from initial key and the sector number.
235 * In addition, whitening value is applied on every sector, whitening
236 * is calculated from initial key, sector number and mixed using CRC32.
237 * Note that this encryption scheme is vulnerable to watermarking attacks
238 * and should be used for old compatible containers access only.
239 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 * plumb: unimplemented, see:
241 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
242 */
243
Milan Broz2dc53272011-01-13 19:59:54 +0000244static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
245 struct dm_crypt_request *dmreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246{
247 memset(iv, 0, cc->iv_size);
Alasdair G Kergon283a8322011-08-02 12:32:01 +0100248 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
250 return 0;
251}
252
Milan Broz61afef62009-12-10 23:52:25 +0000253static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
Milan Broz2dc53272011-01-13 19:59:54 +0000254 struct dm_crypt_request *dmreq)
Milan Broz61afef62009-12-10 23:52:25 +0000255{
256 memset(iv, 0, cc->iv_size);
Alasdair G Kergon283a8322011-08-02 12:32:01 +0100257 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
Milan Broz61afef62009-12-10 23:52:25 +0000258
259 return 0;
260}
261
Milan Brozb95bf2d2009-12-10 23:51:56 +0000262/* Initialise ESSIV - compute salt but no local memory allocations */
263static int crypt_iv_essiv_init(struct crypt_config *cc)
264{
265 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
266 struct hash_desc desc;
267 struct scatterlist sg;
Andi Kleenc0297722011-01-13 19:59:53 +0000268 struct crypto_cipher *essiv_tfm;
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100269 int err;
Milan Brozb95bf2d2009-12-10 23:51:56 +0000270
271 sg_init_one(&sg, cc->key, cc->key_size);
272 desc.tfm = essiv->hash_tfm;
273 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
274
275 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
276 if (err)
277 return err;
278
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100279 essiv_tfm = cc->iv_private;
Andi Kleenc0297722011-01-13 19:59:53 +0000280
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100281 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
282 crypto_hash_digestsize(essiv->hash_tfm));
283 if (err)
284 return err;
Andi Kleenc0297722011-01-13 19:59:53 +0000285
286 return 0;
Milan Brozb95bf2d2009-12-10 23:51:56 +0000287}
288
Milan Broz542da312009-12-10 23:51:57 +0000289/* Wipe salt and reset key derived from volume key */
290static int crypt_iv_essiv_wipe(struct crypt_config *cc)
291{
292 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
293 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
Andi Kleenc0297722011-01-13 19:59:53 +0000294 struct crypto_cipher *essiv_tfm;
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100295 int r, err = 0;
Milan Broz542da312009-12-10 23:51:57 +0000296
297 memset(essiv->salt, 0, salt_size);
298
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100299 essiv_tfm = cc->iv_private;
300 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
301 if (r)
302 err = r;
Andi Kleenc0297722011-01-13 19:59:53 +0000303
304 return err;
305}
306
307/* Set up per cpu cipher state */
308static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
309 struct dm_target *ti,
310 u8 *salt, unsigned saltsize)
311{
312 struct crypto_cipher *essiv_tfm;
313 int err;
314
315 /* Setup the essiv_tfm with the given salt */
316 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
317 if (IS_ERR(essiv_tfm)) {
318 ti->error = "Error allocating crypto tfm for ESSIV";
319 return essiv_tfm;
320 }
321
322 if (crypto_cipher_blocksize(essiv_tfm) !=
323 crypto_ablkcipher_ivsize(any_tfm(cc))) {
324 ti->error = "Block size of ESSIV cipher does "
325 "not match IV size of block cipher";
326 crypto_free_cipher(essiv_tfm);
327 return ERR_PTR(-EINVAL);
328 }
329
330 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
331 if (err) {
332 ti->error = "Failed to set key for ESSIV cipher";
333 crypto_free_cipher(essiv_tfm);
334 return ERR_PTR(err);
335 }
336
337 return essiv_tfm;
Milan Broz542da312009-12-10 23:51:57 +0000338}
339
Milan Broz60473592009-12-10 23:51:55 +0000340static void crypt_iv_essiv_dtr(struct crypt_config *cc)
341{
Andi Kleenc0297722011-01-13 19:59:53 +0000342 struct crypto_cipher *essiv_tfm;
Milan Broz60473592009-12-10 23:51:55 +0000343 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
344
Milan Brozb95bf2d2009-12-10 23:51:56 +0000345 crypto_free_hash(essiv->hash_tfm);
346 essiv->hash_tfm = NULL;
347
348 kzfree(essiv->salt);
349 essiv->salt = NULL;
Andi Kleenc0297722011-01-13 19:59:53 +0000350
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100351 essiv_tfm = cc->iv_private;
Andi Kleenc0297722011-01-13 19:59:53 +0000352
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100353 if (essiv_tfm)
354 crypto_free_cipher(essiv_tfm);
Andi Kleenc0297722011-01-13 19:59:53 +0000355
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100356 cc->iv_private = NULL;
Milan Broz60473592009-12-10 23:51:55 +0000357}
358
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
Milan Brozd469f842007-10-19 22:42:37 +0100360 const char *opts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
Milan Broz5861f1b2009-12-10 23:51:56 +0000362 struct crypto_cipher *essiv_tfm = NULL;
363 struct crypto_hash *hash_tfm = NULL;
Milan Broz5861f1b2009-12-10 23:51:56 +0000364 u8 *salt = NULL;
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100365 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
Milan Broz5861f1b2009-12-10 23:51:56 +0000367 if (!opts) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700368 ti->error = "Digest algorithm missing for ESSIV mode";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 return -EINVAL;
370 }
371
Milan Brozb95bf2d2009-12-10 23:51:56 +0000372 /* Allocate hash algorithm */
Herbert Xu35058682006-08-24 19:10:20 +1000373 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
374 if (IS_ERR(hash_tfm)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700375 ti->error = "Error initializing ESSIV hash";
Milan Broz5861f1b2009-12-10 23:51:56 +0000376 err = PTR_ERR(hash_tfm);
377 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 }
379
Milan Brozb95bf2d2009-12-10 23:51:56 +0000380 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
Milan Broz5861f1b2009-12-10 23:51:56 +0000381 if (!salt) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700382 ti->error = "Error kmallocing salt storage in ESSIV";
Milan Broz5861f1b2009-12-10 23:51:56 +0000383 err = -ENOMEM;
384 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 }
386
Milan Brozb95bf2d2009-12-10 23:51:56 +0000387 cc->iv_gen_private.essiv.salt = salt;
Milan Brozb95bf2d2009-12-10 23:51:56 +0000388 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
389
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100390 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
391 crypto_hash_digestsize(hash_tfm));
392 if (IS_ERR(essiv_tfm)) {
393 crypt_iv_essiv_dtr(cc);
394 return PTR_ERR(essiv_tfm);
Andi Kleenc0297722011-01-13 19:59:53 +0000395 }
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100396 cc->iv_private = essiv_tfm;
Andi Kleenc0297722011-01-13 19:59:53 +0000397
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 return 0;
Milan Broz5861f1b2009-12-10 23:51:56 +0000399
400bad:
Milan Broz5861f1b2009-12-10 23:51:56 +0000401 if (hash_tfm && !IS_ERR(hash_tfm))
402 crypto_free_hash(hash_tfm);
Milan Brozb95bf2d2009-12-10 23:51:56 +0000403 kfree(salt);
Milan Broz5861f1b2009-12-10 23:51:56 +0000404 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405}
406
Milan Broz2dc53272011-01-13 19:59:54 +0000407static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
408 struct dm_crypt_request *dmreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409{
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100410 struct crypto_cipher *essiv_tfm = cc->iv_private;
Andi Kleenc0297722011-01-13 19:59:53 +0000411
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 memset(iv, 0, cc->iv_size);
Alasdair G Kergon283a8322011-08-02 12:32:01 +0100413 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
Andi Kleenc0297722011-01-13 19:59:53 +0000414 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
415
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 return 0;
417}
418
Rik Snel48527fa2006-09-03 08:56:39 +1000419static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
420 const char *opts)
421{
Andi Kleenc0297722011-01-13 19:59:53 +0000422 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
David Howellsf0d1b0b2006-12-08 02:37:49 -0800423 int log = ilog2(bs);
Rik Snel48527fa2006-09-03 08:56:39 +1000424
425 /* we need to calculate how far we must shift the sector count
426 * to get the cipher block count, we use this shift in _gen */
427
428 if (1 << log != bs) {
429 ti->error = "cypher blocksize is not a power of 2";
430 return -EINVAL;
431 }
432
433 if (log > 9) {
434 ti->error = "cypher blocksize is > 512";
435 return -EINVAL;
436 }
437
Milan Broz60473592009-12-10 23:51:55 +0000438 cc->iv_gen_private.benbi.shift = 9 - log;
Rik Snel48527fa2006-09-03 08:56:39 +1000439
440 return 0;
441}
442
443static void crypt_iv_benbi_dtr(struct crypt_config *cc)
444{
Rik Snel48527fa2006-09-03 08:56:39 +1000445}
446
Milan Broz2dc53272011-01-13 19:59:54 +0000447static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
448 struct dm_crypt_request *dmreq)
Rik Snel48527fa2006-09-03 08:56:39 +1000449{
Herbert Xu79066ad2006-12-05 13:41:52 -0800450 __be64 val;
451
Rik Snel48527fa2006-09-03 08:56:39 +1000452 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
Herbert Xu79066ad2006-12-05 13:41:52 -0800453
Milan Broz2dc53272011-01-13 19:59:54 +0000454 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
Herbert Xu79066ad2006-12-05 13:41:52 -0800455 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
Rik Snel48527fa2006-09-03 08:56:39 +1000456
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 return 0;
458}
459
Milan Broz2dc53272011-01-13 19:59:54 +0000460static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
461 struct dm_crypt_request *dmreq)
Ludwig Nussel46b47732007-05-09 02:32:55 -0700462{
463 memset(iv, 0, cc->iv_size);
464
465 return 0;
466}
467
Milan Broz34745782011-01-13 19:59:55 +0000468static void crypt_iv_lmk_dtr(struct crypt_config *cc)
469{
470 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
471
472 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
473 crypto_free_shash(lmk->hash_tfm);
474 lmk->hash_tfm = NULL;
475
476 kzfree(lmk->seed);
477 lmk->seed = NULL;
478}
479
480static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
481 const char *opts)
482{
483 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
484
485 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
486 if (IS_ERR(lmk->hash_tfm)) {
487 ti->error = "Error initializing LMK hash";
488 return PTR_ERR(lmk->hash_tfm);
489 }
490
491 /* No seed in LMK version 2 */
492 if (cc->key_parts == cc->tfms_count) {
493 lmk->seed = NULL;
494 return 0;
495 }
496
497 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
498 if (!lmk->seed) {
499 crypt_iv_lmk_dtr(cc);
500 ti->error = "Error kmallocing seed storage in LMK";
501 return -ENOMEM;
502 }
503
504 return 0;
505}
506
507static int crypt_iv_lmk_init(struct crypt_config *cc)
508{
509 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
510 int subkey_size = cc->key_size / cc->key_parts;
511
512 /* LMK seed is on the position of LMK_KEYS + 1 key */
513 if (lmk->seed)
514 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
515 crypto_shash_digestsize(lmk->hash_tfm));
516
517 return 0;
518}
519
520static int crypt_iv_lmk_wipe(struct crypt_config *cc)
521{
522 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
523
524 if (lmk->seed)
525 memset(lmk->seed, 0, LMK_SEED_SIZE);
526
527 return 0;
528}
529
530static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
531 struct dm_crypt_request *dmreq,
532 u8 *data)
533{
534 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200535 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
Milan Broz34745782011-01-13 19:59:55 +0000536 struct md5_state md5state;
Milan Brozda31a072013-10-28 23:21:03 +0100537 __le32 buf[4];
Milan Broz34745782011-01-13 19:59:55 +0000538 int i, r;
539
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200540 desc->tfm = lmk->hash_tfm;
541 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
Milan Broz34745782011-01-13 19:59:55 +0000542
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200543 r = crypto_shash_init(desc);
Milan Broz34745782011-01-13 19:59:55 +0000544 if (r)
545 return r;
546
547 if (lmk->seed) {
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200548 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
Milan Broz34745782011-01-13 19:59:55 +0000549 if (r)
550 return r;
551 }
552
553 /* Sector is always 512B, block size 16, add data of blocks 1-31 */
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200554 r = crypto_shash_update(desc, data + 16, 16 * 31);
Milan Broz34745782011-01-13 19:59:55 +0000555 if (r)
556 return r;
557
558 /* Sector is cropped to 56 bits here */
559 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
560 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
561 buf[2] = cpu_to_le32(4024);
562 buf[3] = 0;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200563 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
Milan Broz34745782011-01-13 19:59:55 +0000564 if (r)
565 return r;
566
567 /* No MD5 padding here */
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200568 r = crypto_shash_export(desc, &md5state);
Milan Broz34745782011-01-13 19:59:55 +0000569 if (r)
570 return r;
571
572 for (i = 0; i < MD5_HASH_WORDS; i++)
573 __cpu_to_le32s(&md5state.hash[i]);
574 memcpy(iv, &md5state.hash, cc->iv_size);
575
576 return 0;
577}
578
579static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
580 struct dm_crypt_request *dmreq)
581{
582 u8 *src;
583 int r = 0;
584
585 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
Cong Wangc2e022c2011-11-28 13:26:02 +0800586 src = kmap_atomic(sg_page(&dmreq->sg_in));
Milan Broz34745782011-01-13 19:59:55 +0000587 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
Cong Wangc2e022c2011-11-28 13:26:02 +0800588 kunmap_atomic(src);
Milan Broz34745782011-01-13 19:59:55 +0000589 } else
590 memset(iv, 0, cc->iv_size);
591
592 return r;
593}
594
595static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
596 struct dm_crypt_request *dmreq)
597{
598 u8 *dst;
599 int r;
600
601 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
602 return 0;
603
Cong Wangc2e022c2011-11-28 13:26:02 +0800604 dst = kmap_atomic(sg_page(&dmreq->sg_out));
Milan Broz34745782011-01-13 19:59:55 +0000605 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
606
607 /* Tweak the first block of plaintext sector */
608 if (!r)
609 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
610
Cong Wangc2e022c2011-11-28 13:26:02 +0800611 kunmap_atomic(dst);
Milan Broz34745782011-01-13 19:59:55 +0000612 return r;
613}
614
Milan Brozed04d982013-10-28 23:21:04 +0100615static void crypt_iv_tcw_dtr(struct crypt_config *cc)
616{
617 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
618
619 kzfree(tcw->iv_seed);
620 tcw->iv_seed = NULL;
621 kzfree(tcw->whitening);
622 tcw->whitening = NULL;
623
624 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
625 crypto_free_shash(tcw->crc32_tfm);
626 tcw->crc32_tfm = NULL;
627}
628
629static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
630 const char *opts)
631{
632 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
633
634 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
635 ti->error = "Wrong key size for TCW";
636 return -EINVAL;
637 }
638
639 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
640 if (IS_ERR(tcw->crc32_tfm)) {
641 ti->error = "Error initializing CRC32 in TCW";
642 return PTR_ERR(tcw->crc32_tfm);
643 }
644
645 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
646 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
647 if (!tcw->iv_seed || !tcw->whitening) {
648 crypt_iv_tcw_dtr(cc);
649 ti->error = "Error allocating seed storage in TCW";
650 return -ENOMEM;
651 }
652
653 return 0;
654}
655
656static int crypt_iv_tcw_init(struct crypt_config *cc)
657{
658 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
659 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
660
661 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
662 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
663 TCW_WHITENING_SIZE);
664
665 return 0;
666}
667
668static int crypt_iv_tcw_wipe(struct crypt_config *cc)
669{
670 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
671
672 memset(tcw->iv_seed, 0, cc->iv_size);
673 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
674
675 return 0;
676}
677
678static int crypt_iv_tcw_whitening(struct crypt_config *cc,
679 struct dm_crypt_request *dmreq,
680 u8 *data)
681{
682 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
683 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
684 u8 buf[TCW_WHITENING_SIZE];
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200685 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
Milan Brozed04d982013-10-28 23:21:04 +0100686 int i, r;
687
688 /* xor whitening with sector number */
689 memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
690 crypto_xor(buf, (u8 *)&sector, 8);
691 crypto_xor(&buf[8], (u8 *)&sector, 8);
692
693 /* calculate crc32 for every 32bit part and xor it */
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200694 desc->tfm = tcw->crc32_tfm;
695 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
Milan Brozed04d982013-10-28 23:21:04 +0100696 for (i = 0; i < 4; i++) {
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200697 r = crypto_shash_init(desc);
Milan Brozed04d982013-10-28 23:21:04 +0100698 if (r)
699 goto out;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200700 r = crypto_shash_update(desc, &buf[i * 4], 4);
Milan Brozed04d982013-10-28 23:21:04 +0100701 if (r)
702 goto out;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200703 r = crypto_shash_final(desc, &buf[i * 4]);
Milan Brozed04d982013-10-28 23:21:04 +0100704 if (r)
705 goto out;
706 }
707 crypto_xor(&buf[0], &buf[12], 4);
708 crypto_xor(&buf[4], &buf[8], 4);
709
710 /* apply whitening (8 bytes) to whole sector */
711 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
712 crypto_xor(data + i * 8, buf, 8);
713out:
Milan Broz1a71d6f2014-11-22 09:36:04 +0100714 memzero_explicit(buf, sizeof(buf));
Milan Brozed04d982013-10-28 23:21:04 +0100715 return r;
716}
717
718static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
719 struct dm_crypt_request *dmreq)
720{
721 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
722 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
723 u8 *src;
724 int r = 0;
725
726 /* Remove whitening from ciphertext */
727 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
728 src = kmap_atomic(sg_page(&dmreq->sg_in));
729 r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset);
730 kunmap_atomic(src);
731 }
732
733 /* Calculate IV */
734 memcpy(iv, tcw->iv_seed, cc->iv_size);
735 crypto_xor(iv, (u8 *)&sector, 8);
736 if (cc->iv_size > 8)
737 crypto_xor(&iv[8], (u8 *)&sector, cc->iv_size - 8);
738
739 return r;
740}
741
742static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
743 struct dm_crypt_request *dmreq)
744{
745 u8 *dst;
746 int r;
747
748 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
749 return 0;
750
751 /* Apply whitening on ciphertext */
752 dst = kmap_atomic(sg_page(&dmreq->sg_out));
753 r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset);
754 kunmap_atomic(dst);
755
756 return r;
757}
758
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759static struct crypt_iv_operations crypt_iv_plain_ops = {
760 .generator = crypt_iv_plain_gen
761};
762
Milan Broz61afef62009-12-10 23:52:25 +0000763static struct crypt_iv_operations crypt_iv_plain64_ops = {
764 .generator = crypt_iv_plain64_gen
765};
766
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767static struct crypt_iv_operations crypt_iv_essiv_ops = {
768 .ctr = crypt_iv_essiv_ctr,
769 .dtr = crypt_iv_essiv_dtr,
Milan Brozb95bf2d2009-12-10 23:51:56 +0000770 .init = crypt_iv_essiv_init,
Milan Broz542da312009-12-10 23:51:57 +0000771 .wipe = crypt_iv_essiv_wipe,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 .generator = crypt_iv_essiv_gen
773};
774
Rik Snel48527fa2006-09-03 08:56:39 +1000775static struct crypt_iv_operations crypt_iv_benbi_ops = {
776 .ctr = crypt_iv_benbi_ctr,
777 .dtr = crypt_iv_benbi_dtr,
778 .generator = crypt_iv_benbi_gen
779};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780
Ludwig Nussel46b47732007-05-09 02:32:55 -0700781static struct crypt_iv_operations crypt_iv_null_ops = {
782 .generator = crypt_iv_null_gen
783};
784
Milan Broz34745782011-01-13 19:59:55 +0000785static struct crypt_iv_operations crypt_iv_lmk_ops = {
786 .ctr = crypt_iv_lmk_ctr,
787 .dtr = crypt_iv_lmk_dtr,
788 .init = crypt_iv_lmk_init,
789 .wipe = crypt_iv_lmk_wipe,
790 .generator = crypt_iv_lmk_gen,
791 .post = crypt_iv_lmk_post
792};
793
Milan Brozed04d982013-10-28 23:21:04 +0100794static struct crypt_iv_operations crypt_iv_tcw_ops = {
795 .ctr = crypt_iv_tcw_ctr,
796 .dtr = crypt_iv_tcw_dtr,
797 .init = crypt_iv_tcw_init,
798 .wipe = crypt_iv_tcw_wipe,
799 .generator = crypt_iv_tcw_gen,
800 .post = crypt_iv_tcw_post
801};
802
Milan Brozd469f842007-10-19 22:42:37 +0100803static void crypt_convert_init(struct crypt_config *cc,
804 struct convert_context *ctx,
805 struct bio *bio_out, struct bio *bio_in,
Milan Brozfcd369d2008-02-08 02:10:41 +0000806 sector_t sector)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807{
808 ctx->bio_in = bio_in;
809 ctx->bio_out = bio_out;
Kent Overstreet003b5c52013-10-11 15:45:43 -0700810 if (bio_in)
811 ctx->iter_in = bio_in->bi_iter;
812 if (bio_out)
813 ctx->iter_out = bio_out->bi_iter;
Mikulas Patockac66029f2012-07-27 15:08:05 +0100814 ctx->cc_sector = sector + cc->iv_offset;
Milan Broz43d69032008-02-08 02:11:09 +0000815 init_completion(&ctx->restart);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816}
817
Huang Yingb2174ee2009-03-16 17:44:33 +0000818static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
819 struct ablkcipher_request *req)
820{
821 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
822}
823
824static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
825 struct dm_crypt_request *dmreq)
826{
827 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
828}
829
Milan Broz2dc53272011-01-13 19:59:54 +0000830static u8 *iv_of_dmreq(struct crypt_config *cc,
831 struct dm_crypt_request *dmreq)
832{
833 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
834 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
835}
836
Milan Broz01482b72008-02-08 02:11:04 +0000837static int crypt_convert_block(struct crypt_config *cc,
Milan Broz3a7f6c92008-02-08 02:11:14 +0000838 struct convert_context *ctx,
839 struct ablkcipher_request *req)
Milan Broz01482b72008-02-08 02:11:04 +0000840{
Kent Overstreet003b5c52013-10-11 15:45:43 -0700841 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
842 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000843 struct dm_crypt_request *dmreq;
844 u8 *iv;
Mikulas Patocka40b62292012-07-27 15:08:04 +0100845 int r;
Milan Broz01482b72008-02-08 02:11:04 +0000846
Huang Yingb2174ee2009-03-16 17:44:33 +0000847 dmreq = dmreq_of_req(cc, req);
Milan Broz2dc53272011-01-13 19:59:54 +0000848 iv = iv_of_dmreq(cc, dmreq);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000849
Mikulas Patockac66029f2012-07-27 15:08:05 +0100850 dmreq->iv_sector = ctx->cc_sector;
Huang Yingb2174ee2009-03-16 17:44:33 +0000851 dmreq->ctx = ctx;
Milan Broz3a7f6c92008-02-08 02:11:14 +0000852 sg_init_table(&dmreq->sg_in, 1);
Kent Overstreet003b5c52013-10-11 15:45:43 -0700853 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
854 bv_in.bv_offset);
Milan Broz01482b72008-02-08 02:11:04 +0000855
Milan Broz3a7f6c92008-02-08 02:11:14 +0000856 sg_init_table(&dmreq->sg_out, 1);
Kent Overstreet003b5c52013-10-11 15:45:43 -0700857 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
858 bv_out.bv_offset);
Milan Broz01482b72008-02-08 02:11:04 +0000859
Kent Overstreet003b5c52013-10-11 15:45:43 -0700860 bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
861 bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
Milan Broz01482b72008-02-08 02:11:04 +0000862
Milan Broz3a7f6c92008-02-08 02:11:14 +0000863 if (cc->iv_gen_ops) {
Milan Broz2dc53272011-01-13 19:59:54 +0000864 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000865 if (r < 0)
866 return r;
867 }
868
869 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
870 1 << SECTOR_SHIFT, iv);
871
872 if (bio_data_dir(ctx->bio_in) == WRITE)
873 r = crypto_ablkcipher_encrypt(req);
874 else
875 r = crypto_ablkcipher_decrypt(req);
876
Milan Broz2dc53272011-01-13 19:59:54 +0000877 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
878 r = cc->iv_gen_ops->post(cc, iv, dmreq);
879
Milan Broz3a7f6c92008-02-08 02:11:14 +0000880 return r;
Milan Broz01482b72008-02-08 02:11:04 +0000881}
882
Milan Broz95497a92008-02-08 02:11:12 +0000883static void kcryptd_async_done(struct crypto_async_request *async_req,
884 int error);
Andi Kleenc0297722011-01-13 19:59:53 +0000885
Milan Brozddd42ed2008-02-08 02:11:07 +0000886static void crypt_alloc_req(struct crypt_config *cc,
887 struct convert_context *ctx)
888{
Mikulas Patockac66029f2012-07-27 15:08:05 +0100889 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
Andi Kleenc0297722011-01-13 19:59:53 +0000890
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500891 if (!ctx->req)
892 ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
Andi Kleenc0297722011-01-13 19:59:53 +0000893
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500894 ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
Milan Broz54cea3f2015-05-15 17:00:25 +0200895
896 /*
897 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
898 * requests if driver request queue is full.
899 */
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500900 ablkcipher_request_set_callback(ctx->req,
Andi Kleenc0297722011-01-13 19:59:53 +0000901 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500902 kcryptd_async_done, dmreq_of_req(cc, ctx->req));
Milan Brozddd42ed2008-02-08 02:11:07 +0000903}
904
Mikulas Patocka298a9fa2014-03-28 15:51:55 -0400905static void crypt_free_req(struct crypt_config *cc,
906 struct ablkcipher_request *req, struct bio *base_bio)
907{
908 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
909
910 if ((struct ablkcipher_request *)(io + 1) != req)
911 mempool_free(req, cc->req_pool);
912}
913
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914/*
915 * Encrypt / decrypt data from one bio to another one (can be the same one)
916 */
917static int crypt_convert(struct crypt_config *cc,
Milan Brozd469f842007-10-19 22:42:37 +0100918 struct convert_context *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919{
Milan Broz3f1e9072008-03-28 14:16:07 -0700920 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921
Mikulas Patocka40b62292012-07-27 15:08:04 +0100922 atomic_set(&ctx->cc_pending, 1);
Milan Brozc8081612008-10-10 13:37:08 +0100923
Kent Overstreet003b5c52013-10-11 15:45:43 -0700924 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925
Milan Broz3a7f6c92008-02-08 02:11:14 +0000926 crypt_alloc_req(cc, ctx);
927
Mikulas Patocka40b62292012-07-27 15:08:04 +0100928 atomic_inc(&ctx->cc_pending);
Milan Broz3f1e9072008-03-28 14:16:07 -0700929
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500930 r = crypt_convert_block(cc, ctx, ctx->req);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000931
932 switch (r) {
Milan Broz54cea3f2015-05-15 17:00:25 +0200933 /*
934 * The request was queued by a crypto driver
935 * but the driver request queue is full, let's wait.
936 */
Milan Broz3a7f6c92008-02-08 02:11:14 +0000937 case -EBUSY:
938 wait_for_completion(&ctx->restart);
Wolfram Sang16735d02013-11-14 14:32:02 -0800939 reinit_completion(&ctx->restart);
Milan Broz54cea3f2015-05-15 17:00:25 +0200940 /* fall through */
941 /*
942 * The request is queued and processed asynchronously,
943 * completion function kcryptd_async_done() will be called.
944 */
Rabin Vincentc0403ec2015-05-05 15:15:56 +0200945 case -EINPROGRESS:
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500946 ctx->req = NULL;
Mikulas Patockac66029f2012-07-27 15:08:05 +0100947 ctx->cc_sector++;
Milan Broz3a7f6c92008-02-08 02:11:14 +0000948 continue;
Milan Broz54cea3f2015-05-15 17:00:25 +0200949 /*
950 * The request was already processed (synchronously).
951 */
Milan Broz3f1e9072008-03-28 14:16:07 -0700952 case 0:
Mikulas Patocka40b62292012-07-27 15:08:04 +0100953 atomic_dec(&ctx->cc_pending);
Mikulas Patockac66029f2012-07-27 15:08:05 +0100954 ctx->cc_sector++;
Milan Brozc7f1b202008-07-02 09:34:28 +0100955 cond_resched();
Milan Broz3f1e9072008-03-28 14:16:07 -0700956 continue;
957
Milan Broz54cea3f2015-05-15 17:00:25 +0200958 /* There was an error while processing the request. */
Milan Broz3f1e9072008-03-28 14:16:07 -0700959 default:
Mikulas Patocka40b62292012-07-27 15:08:04 +0100960 atomic_dec(&ctx->cc_pending);
Milan Broz3f1e9072008-03-28 14:16:07 -0700961 return r;
962 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 }
964
Milan Broz3f1e9072008-03-28 14:16:07 -0700965 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966}
967
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -0500968static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
969
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970/*
971 * Generate a new unfragmented bio with the given size
Mike Snitzer586b2862015-09-09 21:34:51 -0400972 * This should never violate the device limitations (but only because
973 * max_segment_size is being constrained to PAGE_SIZE).
Mikulas Patocka7145c242015-02-13 08:24:41 -0500974 *
975 * This function may be called concurrently. If we allocate from the mempool
976 * concurrently, there is a possibility of deadlock. For example, if we have
977 * mempool of 256 pages, two processes, each wanting 256, pages allocate from
978 * the mempool concurrently, it may deadlock in a situation where both processes
979 * have allocated 128 pages and the mempool is exhausted.
980 *
981 * In order to avoid this scenario we allocate the pages under a mutex.
982 *
983 * In order to not degrade performance with excessive locking, we try
984 * non-blocking allocations without a mutex first but on failure we fallback
985 * to blocking allocations with a mutex.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 */
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -0500987static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +0100989 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -0700990 struct bio *clone;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
Mikulas Patocka7145c242015-02-13 08:24:41 -0500992 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
993 unsigned i, len, remaining_size;
Milan Broz91e10622007-12-13 14:16:10 +0000994 struct page *page;
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -0500995 struct bio_vec *bvec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
Mikulas Patocka7145c242015-02-13 08:24:41 -0500997retry:
Mel Gormand0164ad2015-11-06 16:28:21 -0800998 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
Mikulas Patocka7145c242015-02-13 08:24:41 -0500999 mutex_lock(&cc->bio_alloc_lock);
1000
Olaf Kirch2f9941b2007-05-09 02:32:53 -07001001 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
Milan Broz8b004452006-10-03 01:15:37 -07001002 if (!clone)
Mikulas Patocka7145c242015-02-13 08:24:41 -05001003 goto return_clone;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
Olaf Kirch027581f2007-05-09 02:32:52 -07001005 clone_init(io, clone);
Milan Broz6a24c712006-10-03 01:15:40 -07001006
Mikulas Patocka7145c242015-02-13 08:24:41 -05001007 remaining_size = size;
1008
Olaf Kirchf97380b2007-05-09 02:32:54 -07001009 for (i = 0; i < nr_iovecs; i++) {
Milan Broz91e10622007-12-13 14:16:10 +00001010 page = mempool_alloc(cc->page_pool, gfp_mask);
Mikulas Patocka7145c242015-02-13 08:24:41 -05001011 if (!page) {
1012 crypt_free_buffer_pages(cc, clone);
1013 bio_put(clone);
Mel Gormand0164ad2015-11-06 16:28:21 -08001014 gfp_mask |= __GFP_DIRECT_RECLAIM;
Mikulas Patocka7145c242015-02-13 08:24:41 -05001015 goto retry;
1016 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017
Mikulas Patocka7145c242015-02-13 08:24:41 -05001018 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001020 bvec = &clone->bi_io_vec[clone->bi_vcnt++];
1021 bvec->bv_page = page;
1022 bvec->bv_len = len;
1023 bvec->bv_offset = 0;
1024
1025 clone->bi_iter.bi_size += len;
Milan Broz91e10622007-12-13 14:16:10 +00001026
Mikulas Patocka7145c242015-02-13 08:24:41 -05001027 remaining_size -= len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 }
1029
Mikulas Patocka7145c242015-02-13 08:24:41 -05001030return_clone:
Mel Gormand0164ad2015-11-06 16:28:21 -08001031 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
Mikulas Patocka7145c242015-02-13 08:24:41 -05001032 mutex_unlock(&cc->bio_alloc_lock);
1033
Milan Broz8b004452006-10-03 01:15:37 -07001034 return clone;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035}
1036
Neil Brown644bd2f2007-10-16 13:48:46 +02001037static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038{
Neil Brown644bd2f2007-10-16 13:48:46 +02001039 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 struct bio_vec *bv;
1041
Kent Overstreetcb34e052012-09-05 15:22:02 -07001042 bio_for_each_segment_all(bv, clone, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 BUG_ON(!bv->bv_page);
1044 mempool_free(bv->bv_page, cc->page_pool);
1045 bv->bv_page = NULL;
1046 }
1047}
1048
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001049static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1050 struct bio *bio, sector_t sector)
Milan Brozdc440d1e2008-10-10 13:37:03 +01001051{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001052 io->cc = cc;
Milan Brozdc440d1e2008-10-10 13:37:03 +01001053 io->base_bio = bio;
1054 io->sector = sector;
1055 io->error = 0;
Mikulas Patocka610f2de2014-02-20 18:01:01 -05001056 io->ctx.req = NULL;
Mikulas Patocka40b62292012-07-27 15:08:04 +01001057 atomic_set(&io->io_pending, 0);
Milan Brozdc440d1e2008-10-10 13:37:03 +01001058}
1059
Milan Broz3e1a8bd2008-10-10 13:37:02 +01001060static void crypt_inc_pending(struct dm_crypt_io *io)
1061{
Mikulas Patocka40b62292012-07-27 15:08:04 +01001062 atomic_inc(&io->io_pending);
Milan Broz3e1a8bd2008-10-10 13:37:02 +01001063}
1064
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065/*
1066 * One of the bios was finished. Check for completion of
1067 * the whole request and correctly clean up the buffer.
1068 */
Milan Broz5742fd72008-02-08 02:10:43 +00001069static void crypt_dec_pending(struct dm_crypt_io *io)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001071 struct crypt_config *cc = io->cc;
Milan Brozb35f8ca2009-03-16 17:44:36 +00001072 struct bio *base_bio = io->base_bio;
Milan Brozb35f8ca2009-03-16 17:44:36 +00001073 int error = io->error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074
Mikulas Patocka40b62292012-07-27 15:08:04 +01001075 if (!atomic_dec_and_test(&io->io_pending))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 return;
1077
Mikulas Patocka610f2de2014-02-20 18:01:01 -05001078 if (io->ctx.req)
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001079 crypt_free_req(cc, io->ctx.req, base_bio);
Milan Brozb35f8ca2009-03-16 17:44:36 +00001080
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001081 base_bio->bi_error = error;
1082 bio_endio(base_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083}
1084
1085/*
Milan Brozcabf08e2007-10-19 22:38:58 +01001086 * kcryptd/kcryptd_io:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 *
1088 * Needed because it would be very unwise to do decryption in an
Milan Broz23541d22006-10-03 01:15:39 -07001089 * interrupt context.
Milan Brozcabf08e2007-10-19 22:38:58 +01001090 *
1091 * kcryptd performs the actual encryption or decryption.
1092 *
1093 * kcryptd_io performs the IO submission.
1094 *
1095 * They must be separated as otherwise the final stages could be
1096 * starved by new requests which can block in the first stages due
1097 * to memory allocation.
Andi Kleenc0297722011-01-13 19:59:53 +00001098 *
1099 * The work is done per CPU global for all dm-crypt instances.
1100 * They should not depend on each other and do not block.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001102static void crypt_endio(struct bio *clone)
Milan Broz8b004452006-10-03 01:15:37 -07001103{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001104 struct dm_crypt_io *io = clone->bi_private;
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001105 struct crypt_config *cc = io->cc;
Milan Brozee7a4912008-02-08 02:10:46 +00001106 unsigned rw = bio_data_dir(clone);
Sasha Levin9b81c842015-08-10 19:05:18 -04001107 int error;
Milan Broz8b004452006-10-03 01:15:37 -07001108
1109 /*
NeilBrown6712ecf2007-09-27 12:47:43 +02001110 * free the processed pages
Milan Broz8b004452006-10-03 01:15:37 -07001111 */
Milan Brozee7a4912008-02-08 02:10:46 +00001112 if (rw == WRITE)
Neil Brown644bd2f2007-10-16 13:48:46 +02001113 crypt_free_buffer_pages(cc, clone);
Milan Brozee7a4912008-02-08 02:10:46 +00001114
Sasha Levin9b81c842015-08-10 19:05:18 -04001115 error = clone->bi_error;
Milan Brozee7a4912008-02-08 02:10:46 +00001116 bio_put(clone);
1117
Sasha Levin9b81c842015-08-10 19:05:18 -04001118 if (rw == READ && !error) {
Milan Brozee7a4912008-02-08 02:10:46 +00001119 kcryptd_queue_crypt(io);
1120 return;
NeilBrown6712ecf2007-09-27 12:47:43 +02001121 }
Milan Broz8b004452006-10-03 01:15:37 -07001122
Sasha Levin9b81c842015-08-10 19:05:18 -04001123 if (unlikely(error))
1124 io->error = error;
Milan Broz5742fd72008-02-08 02:10:43 +00001125
1126 crypt_dec_pending(io);
Milan Broz8b004452006-10-03 01:15:37 -07001127}
1128
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001129static void clone_init(struct dm_crypt_io *io, struct bio *clone)
Milan Broz8b004452006-10-03 01:15:37 -07001130{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001131 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -07001132
1133 clone->bi_private = io;
1134 clone->bi_end_io = crypt_endio;
1135 clone->bi_bdev = cc->dev->bdev;
1136 clone->bi_rw = io->base_bio->bi_rw;
1137}
1138
Milan Broz20c82532011-01-13 19:59:53 +00001139static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
Milan Broz8b004452006-10-03 01:15:37 -07001140{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001141 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -07001142 struct bio *clone;
Milan Broz93e605c2006-10-03 01:15:38 -07001143
Milan Broz8b004452006-10-03 01:15:37 -07001144 /*
Mike Snitzer59779072015-04-09 16:53:24 -04001145 * We need the original biovec array in order to decrypt
1146 * the whole bio data *afterwards* -- thanks to immutable
1147 * biovecs we don't need to worry about the block layer
1148 * modifying the biovec array; so leverage bio_clone_fast().
Milan Broz8b004452006-10-03 01:15:37 -07001149 */
Mike Snitzer59779072015-04-09 16:53:24 -04001150 clone = bio_clone_fast(io->base_bio, gfp, cc->bs);
Jens Axboe7eaceac2011-03-10 08:52:07 +01001151 if (!clone)
Milan Broz20c82532011-01-13 19:59:53 +00001152 return 1;
Milan Broz8b004452006-10-03 01:15:37 -07001153
Milan Broz20c82532011-01-13 19:59:53 +00001154 crypt_inc_pending(io);
1155
Milan Broz8b004452006-10-03 01:15:37 -07001156 clone_init(io, clone);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001157 clone->bi_iter.bi_sector = cc->start + io->sector;
Milan Broz8b004452006-10-03 01:15:37 -07001158
Milan Broz93e605c2006-10-03 01:15:38 -07001159 generic_make_request(clone);
Milan Broz20c82532011-01-13 19:59:53 +00001160 return 0;
Milan Broz8b004452006-10-03 01:15:37 -07001161}
1162
Mikulas Patockadc267622015-02-13 08:25:59 -05001163static void kcryptd_io_read_work(struct work_struct *work)
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001164{
1165 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1166
Mikulas Patockadc267622015-02-13 08:25:59 -05001167 crypt_inc_pending(io);
1168 if (kcryptd_io_read(io, GFP_NOIO))
1169 io->error = -ENOMEM;
1170 crypt_dec_pending(io);
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001171}
1172
Mikulas Patockadc267622015-02-13 08:25:59 -05001173static void kcryptd_queue_read(struct dm_crypt_io *io)
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001174{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001175 struct crypt_config *cc = io->cc;
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001176
Mikulas Patockadc267622015-02-13 08:25:59 -05001177 INIT_WORK(&io->work, kcryptd_io_read_work);
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001178 queue_work(cc->io_queue, &io->work);
1179}
1180
Mikulas Patockadc267622015-02-13 08:25:59 -05001181static void kcryptd_io_write(struct dm_crypt_io *io)
1182{
1183 struct bio *clone = io->ctx.bio_out;
1184
1185 generic_make_request(clone);
1186}
1187
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001188#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1189
Mikulas Patockadc267622015-02-13 08:25:59 -05001190static int dmcrypt_write(void *data)
1191{
1192 struct crypt_config *cc = data;
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001193 struct dm_crypt_io *io;
1194
Mikulas Patockadc267622015-02-13 08:25:59 -05001195 while (1) {
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001196 struct rb_root write_tree;
Mikulas Patockadc267622015-02-13 08:25:59 -05001197 struct blk_plug plug;
1198
1199 DECLARE_WAITQUEUE(wait, current);
1200
1201 spin_lock_irq(&cc->write_thread_wait.lock);
1202continue_locked:
1203
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001204 if (!RB_EMPTY_ROOT(&cc->write_tree))
Mikulas Patockadc267622015-02-13 08:25:59 -05001205 goto pop_from_list;
1206
Mikulas Patockabcbd94f2015-11-19 07:36:50 -05001207 if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) {
1208 spin_unlock_irq(&cc->write_thread_wait.lock);
1209 break;
1210 }
1211
Mikulas Patockadc267622015-02-13 08:25:59 -05001212 __set_current_state(TASK_INTERRUPTIBLE);
1213 __add_wait_queue(&cc->write_thread_wait, &wait);
1214
1215 spin_unlock_irq(&cc->write_thread_wait.lock);
1216
Mikulas Patockadc267622015-02-13 08:25:59 -05001217 schedule();
1218
Mikulas Patockadc267622015-02-13 08:25:59 -05001219 spin_lock_irq(&cc->write_thread_wait.lock);
1220 __remove_wait_queue(&cc->write_thread_wait, &wait);
1221 goto continue_locked;
1222
1223pop_from_list:
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001224 write_tree = cc->write_tree;
1225 cc->write_tree = RB_ROOT;
Mikulas Patockadc267622015-02-13 08:25:59 -05001226 spin_unlock_irq(&cc->write_thread_wait.lock);
1227
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001228 BUG_ON(rb_parent(write_tree.rb_node));
1229
1230 /*
1231 * Note: we cannot walk the tree here with rb_next because
1232 * the structures may be freed when kcryptd_io_write is called.
1233 */
Mikulas Patockadc267622015-02-13 08:25:59 -05001234 blk_start_plug(&plug);
1235 do {
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001236 io = crypt_io_from_node(rb_first(&write_tree));
1237 rb_erase(&io->rb_node, &write_tree);
Mikulas Patockadc267622015-02-13 08:25:59 -05001238 kcryptd_io_write(io);
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001239 } while (!RB_EMPTY_ROOT(&write_tree));
Mikulas Patockadc267622015-02-13 08:25:59 -05001240 blk_finish_plug(&plug);
1241 }
1242 return 0;
1243}
1244
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001245static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
Milan Broz4e4eef62008-02-08 02:10:49 +00001246{
Milan Brozdec1ced2008-02-08 02:10:57 +00001247 struct bio *clone = io->ctx.bio_out;
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001248 struct crypt_config *cc = io->cc;
Mikulas Patockadc267622015-02-13 08:25:59 -05001249 unsigned long flags;
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001250 sector_t sector;
1251 struct rb_node **rbp, *parent;
Milan Brozdec1ced2008-02-08 02:10:57 +00001252
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001253 if (unlikely(io->error < 0)) {
Milan Brozdec1ced2008-02-08 02:10:57 +00001254 crypt_free_buffer_pages(cc, clone);
1255 bio_put(clone);
Milan Broz6c031f42008-10-10 13:37:06 +01001256 crypt_dec_pending(io);
Milan Brozdec1ced2008-02-08 02:10:57 +00001257 return;
1258 }
1259
1260 /* crypt_convert should have filled the clone bio */
Kent Overstreet003b5c52013-10-11 15:45:43 -07001261 BUG_ON(io->ctx.iter_out.bi_size);
Milan Brozdec1ced2008-02-08 02:10:57 +00001262
Kent Overstreet4f024f32013-10-11 15:44:27 -07001263 clone->bi_iter.bi_sector = cc->start + io->sector;
Milan Broz899c95d2008-02-08 02:11:02 +00001264
Mikulas Patocka0f5d8e62015-02-13 08:27:08 -05001265 if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
1266 generic_make_request(clone);
1267 return;
1268 }
1269
Mikulas Patockadc267622015-02-13 08:25:59 -05001270 spin_lock_irqsave(&cc->write_thread_wait.lock, flags);
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001271 rbp = &cc->write_tree.rb_node;
1272 parent = NULL;
1273 sector = io->sector;
1274 while (*rbp) {
1275 parent = *rbp;
1276 if (sector < crypt_io_from_node(parent)->sector)
1277 rbp = &(*rbp)->rb_left;
1278 else
1279 rbp = &(*rbp)->rb_right;
1280 }
1281 rb_link_node(&io->rb_node, parent, rbp);
1282 rb_insert_color(&io->rb_node, &cc->write_tree);
1283
Mikulas Patockadc267622015-02-13 08:25:59 -05001284 wake_up_locked(&cc->write_thread_wait);
1285 spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags);
Milan Broz4e4eef62008-02-08 02:10:49 +00001286}
1287
Milan Brozfc5a5e92008-10-10 13:37:04 +01001288static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
Milan Broz8b004452006-10-03 01:15:37 -07001289{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001290 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -07001291 struct bio *clone;
Milan Brozc8081612008-10-10 13:37:08 +01001292 int crypt_finished;
Milan Brozb635b002008-10-21 17:45:00 +01001293 sector_t sector = io->sector;
Milan Brozdec1ced2008-02-08 02:10:57 +00001294 int r;
Milan Broz8b004452006-10-03 01:15:37 -07001295
Milan Broz93e605c2006-10-03 01:15:38 -07001296 /*
Milan Brozfc5a5e92008-10-10 13:37:04 +01001297 * Prevent io from disappearing until this function completes.
1298 */
1299 crypt_inc_pending(io);
Milan Brozb635b002008-10-21 17:45:00 +01001300 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
Milan Brozfc5a5e92008-10-10 13:37:04 +01001301
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001302 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
1303 if (unlikely(!clone)) {
1304 io->error = -EIO;
1305 goto dec;
Milan Broz8b004452006-10-03 01:15:37 -07001306 }
Milan Broz899c95d2008-02-08 02:11:02 +00001307
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001308 io->ctx.bio_out = clone;
1309 io->ctx.iter_out = clone->bi_iter;
1310
1311 sector += bio_sectors(clone);
1312
1313 crypt_inc_pending(io);
1314 r = crypt_convert(cc, &io->ctx);
1315 if (r)
1316 io->error = -EIO;
1317 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
1318
1319 /* Encryption was already finished, submit io now */
1320 if (crypt_finished) {
1321 kcryptd_crypt_write_io_submit(io, 0);
1322 io->sector = sector;
1323 }
1324
1325dec:
Milan Broz899c95d2008-02-08 02:11:02 +00001326 crypt_dec_pending(io);
Milan Broz84131db2008-02-08 02:10:59 +00001327}
1328
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001329static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
Milan Broz5742fd72008-02-08 02:10:43 +00001330{
Milan Broz5742fd72008-02-08 02:10:43 +00001331 crypt_dec_pending(io);
1332}
1333
Milan Broz4e4eef62008-02-08 02:10:49 +00001334static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
Milan Broz8b004452006-10-03 01:15:37 -07001335{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001336 struct crypt_config *cc = io->cc;
Milan Broz5742fd72008-02-08 02:10:43 +00001337 int r = 0;
Milan Broz8b004452006-10-03 01:15:37 -07001338
Milan Broz3e1a8bd2008-10-10 13:37:02 +01001339 crypt_inc_pending(io);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001340
Milan Broz53017032008-02-08 02:10:38 +00001341 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
Milan Broz0c395b02008-02-08 02:10:54 +00001342 io->sector);
Milan Broz8b004452006-10-03 01:15:37 -07001343
Milan Broz5742fd72008-02-08 02:10:43 +00001344 r = crypt_convert(cc, &io->ctx);
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001345 if (r < 0)
1346 io->error = -EIO;
Milan Broz5742fd72008-02-08 02:10:43 +00001347
Mikulas Patocka40b62292012-07-27 15:08:04 +01001348 if (atomic_dec_and_test(&io->ctx.cc_pending))
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001349 kcryptd_crypt_read_done(io);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001350
1351 crypt_dec_pending(io);
Milan Broz8b004452006-10-03 01:15:37 -07001352}
1353
Milan Broz95497a92008-02-08 02:11:12 +00001354static void kcryptd_async_done(struct crypto_async_request *async_req,
1355 int error)
1356{
Huang Yingb2174ee2009-03-16 17:44:33 +00001357 struct dm_crypt_request *dmreq = async_req->data;
1358 struct convert_context *ctx = dmreq->ctx;
Milan Broz95497a92008-02-08 02:11:12 +00001359 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001360 struct crypt_config *cc = io->cc;
Milan Broz95497a92008-02-08 02:11:12 +00001361
Milan Broz54cea3f2015-05-15 17:00:25 +02001362 /*
1363 * A request from crypto driver backlog is going to be processed now,
1364 * finish the completion and continue in crypt_convert().
1365 * (Callback will be called for the second time for this request.)
1366 */
Rabin Vincentc0403ec2015-05-05 15:15:56 +02001367 if (error == -EINPROGRESS) {
1368 complete(&ctx->restart);
Milan Broz95497a92008-02-08 02:11:12 +00001369 return;
Rabin Vincentc0403ec2015-05-05 15:15:56 +02001370 }
Milan Broz95497a92008-02-08 02:11:12 +00001371
Milan Broz2dc53272011-01-13 19:59:54 +00001372 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1373 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1374
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001375 if (error < 0)
1376 io->error = -EIO;
1377
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001378 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
Milan Broz95497a92008-02-08 02:11:12 +00001379
Mikulas Patocka40b62292012-07-27 15:08:04 +01001380 if (!atomic_dec_and_test(&ctx->cc_pending))
Rabin Vincentc0403ec2015-05-05 15:15:56 +02001381 return;
Milan Broz95497a92008-02-08 02:11:12 +00001382
1383 if (bio_data_dir(io->base_bio) == READ)
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001384 kcryptd_crypt_read_done(io);
Milan Broz95497a92008-02-08 02:11:12 +00001385 else
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001386 kcryptd_crypt_write_io_submit(io, 1);
Milan Broz95497a92008-02-08 02:11:12 +00001387}
1388
Milan Broz4e4eef62008-02-08 02:10:49 +00001389static void kcryptd_crypt(struct work_struct *work)
1390{
1391 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1392
1393 if (bio_data_dir(io->base_bio) == READ)
1394 kcryptd_crypt_read_convert(io);
1395 else
1396 kcryptd_crypt_write_convert(io);
Milan Broz8b004452006-10-03 01:15:37 -07001397}
1398
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001399static void kcryptd_queue_crypt(struct dm_crypt_io *io)
1400{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001401 struct crypt_config *cc = io->cc;
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001402
1403 INIT_WORK(&io->work, kcryptd_crypt);
1404 queue_work(cc->crypt_queue, &io->work);
1405}
1406
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407/*
1408 * Decode key from its hex representation
1409 */
1410static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
1411{
1412 char buffer[3];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 unsigned int i;
1414
1415 buffer[2] = '\0';
1416
Milan Broz8b004452006-10-03 01:15:37 -07001417 for (i = 0; i < size; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 buffer[0] = *hex++;
1419 buffer[1] = *hex++;
1420
majianpeng1a66a082012-07-27 15:07:59 +01001421 if (kstrtou8(buffer, 16, &key[i]))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 return -EINVAL;
1423 }
1424
1425 if (*hex != '\0')
1426 return -EINVAL;
1427
1428 return 0;
1429}
1430
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001431static void crypt_free_tfms(struct crypt_config *cc)
Milan Brozd1f96422011-01-13 19:59:54 +00001432{
Milan Brozd1f96422011-01-13 19:59:54 +00001433 unsigned i;
1434
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001435 if (!cc->tfms)
1436 return;
1437
Milan Brozd1f96422011-01-13 19:59:54 +00001438 for (i = 0; i < cc->tfms_count; i++)
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001439 if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
1440 crypto_free_ablkcipher(cc->tfms[i]);
1441 cc->tfms[i] = NULL;
Milan Brozd1f96422011-01-13 19:59:54 +00001442 }
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001443
1444 kfree(cc->tfms);
1445 cc->tfms = NULL;
Milan Brozd1f96422011-01-13 19:59:54 +00001446}
1447
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001448static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
Milan Brozd1f96422011-01-13 19:59:54 +00001449{
Milan Brozd1f96422011-01-13 19:59:54 +00001450 unsigned i;
1451 int err;
1452
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001453 cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
1454 GFP_KERNEL);
1455 if (!cc->tfms)
1456 return -ENOMEM;
1457
Milan Brozd1f96422011-01-13 19:59:54 +00001458 for (i = 0; i < cc->tfms_count; i++) {
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001459 cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
1460 if (IS_ERR(cc->tfms[i])) {
1461 err = PTR_ERR(cc->tfms[i]);
1462 crypt_free_tfms(cc);
Milan Brozd1f96422011-01-13 19:59:54 +00001463 return err;
1464 }
1465 }
1466
1467 return 0;
1468}
1469
Andi Kleenc0297722011-01-13 19:59:53 +00001470static int crypt_setkey_allcpus(struct crypt_config *cc)
1471{
Milan Brozda31a072013-10-28 23:21:03 +01001472 unsigned subkey_size;
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001473 int err = 0, i, r;
Andi Kleenc0297722011-01-13 19:59:53 +00001474
Milan Brozda31a072013-10-28 23:21:03 +01001475 /* Ignore extra keys (which are used for IV etc) */
1476 subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
1477
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001478 for (i = 0; i < cc->tfms_count; i++) {
1479 r = crypto_ablkcipher_setkey(cc->tfms[i],
1480 cc->key + (i * subkey_size),
1481 subkey_size);
1482 if (r)
1483 err = r;
Andi Kleenc0297722011-01-13 19:59:53 +00001484 }
1485
1486 return err;
1487}
1488
Milan Broze48d4bb2006-10-03 01:15:37 -07001489static int crypt_set_key(struct crypt_config *cc, char *key)
1490{
Milan Brozde8be5a2011-03-24 13:54:27 +00001491 int r = -EINVAL;
1492 int key_string_len = strlen(key);
1493
Milan Broz69a8cfc2011-01-13 19:59:49 +00001494 /* The key size may not be changed. */
Milan Brozde8be5a2011-03-24 13:54:27 +00001495 if (cc->key_size != (key_string_len >> 1))
1496 goto out;
Milan Broze48d4bb2006-10-03 01:15:37 -07001497
Milan Broz69a8cfc2011-01-13 19:59:49 +00001498 /* Hyphen (which gives a key_size of zero) means there is no key. */
1499 if (!cc->key_size && strcmp(key, "-"))
Milan Brozde8be5a2011-03-24 13:54:27 +00001500 goto out;
Milan Broze48d4bb2006-10-03 01:15:37 -07001501
Milan Broz69a8cfc2011-01-13 19:59:49 +00001502 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
Milan Brozde8be5a2011-03-24 13:54:27 +00001503 goto out;
Milan Broze48d4bb2006-10-03 01:15:37 -07001504
1505 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1506
Milan Brozde8be5a2011-03-24 13:54:27 +00001507 r = crypt_setkey_allcpus(cc);
1508
1509out:
1510 /* Hex key string not needed after here, so wipe it. */
1511 memset(key, '0', key_string_len);
1512
1513 return r;
Milan Broze48d4bb2006-10-03 01:15:37 -07001514}
1515
1516static int crypt_wipe_key(struct crypt_config *cc)
1517{
1518 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1519 memset(&cc->key, 0, cc->key_size * sizeof(u8));
Andi Kleenc0297722011-01-13 19:59:53 +00001520
1521 return crypt_setkey_allcpus(cc);
Milan Broze48d4bb2006-10-03 01:15:37 -07001522}
1523
Milan Broz28513fc2010-08-12 04:14:06 +01001524static void crypt_dtr(struct dm_target *ti)
1525{
1526 struct crypt_config *cc = ti->private;
1527
1528 ti->private = NULL;
1529
1530 if (!cc)
1531 return;
1532
Mikulas Patockabcbd94f2015-11-19 07:36:50 -05001533 if (cc->write_thread) {
1534 spin_lock_irq(&cc->write_thread_wait.lock);
1535 set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags);
1536 wake_up_locked(&cc->write_thread_wait);
1537 spin_unlock_irq(&cc->write_thread_wait.lock);
Mikulas Patockadc267622015-02-13 08:25:59 -05001538 kthread_stop(cc->write_thread);
Mikulas Patockabcbd94f2015-11-19 07:36:50 -05001539 }
Mikulas Patockadc267622015-02-13 08:25:59 -05001540
Milan Broz28513fc2010-08-12 04:14:06 +01001541 if (cc->io_queue)
1542 destroy_workqueue(cc->io_queue);
1543 if (cc->crypt_queue)
1544 destroy_workqueue(cc->crypt_queue);
1545
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001546 crypt_free_tfms(cc);
1547
Milan Broz28513fc2010-08-12 04:14:06 +01001548 if (cc->bs)
1549 bioset_free(cc->bs);
1550
Julia Lawall6f659852015-09-13 14:15:05 +02001551 mempool_destroy(cc->page_pool);
1552 mempool_destroy(cc->req_pool);
Milan Broz28513fc2010-08-12 04:14:06 +01001553
1554 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1555 cc->iv_gen_ops->dtr(cc);
1556
Milan Broz28513fc2010-08-12 04:14:06 +01001557 if (cc->dev)
1558 dm_put_device(ti, cc->dev);
1559
Milan Broz5ebaee62010-08-12 04:14:07 +01001560 kzfree(cc->cipher);
Milan Broz7dbcd132011-01-13 19:59:52 +00001561 kzfree(cc->cipher_string);
Milan Broz28513fc2010-08-12 04:14:06 +01001562
1563 /* Must zero key material before freeing */
1564 kzfree(cc);
1565}
1566
Milan Broz5ebaee62010-08-12 04:14:07 +01001567static int crypt_ctr_cipher(struct dm_target *ti,
1568 char *cipher_in, char *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569{
Milan Broz5ebaee62010-08-12 04:14:07 +01001570 struct crypt_config *cc = ti->private;
Milan Brozd1f96422011-01-13 19:59:54 +00001571 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
Milan Broz5ebaee62010-08-12 04:14:07 +01001572 char *cipher_api = NULL;
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001573 int ret = -EINVAL;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001574 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575
Milan Broz5ebaee62010-08-12 04:14:07 +01001576 /* Convert to crypto api definition? */
1577 if (strchr(cipher_in, '(')) {
1578 ti->error = "Bad cipher specification";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 return -EINVAL;
1580 }
1581
Milan Broz7dbcd132011-01-13 19:59:52 +00001582 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
1583 if (!cc->cipher_string)
1584 goto bad_mem;
1585
Milan Broz5ebaee62010-08-12 04:14:07 +01001586 /*
1587 * Legacy dm-crypt cipher specification
Milan Brozd1f96422011-01-13 19:59:54 +00001588 * cipher[:keycount]-mode-iv:ivopts
Milan Broz5ebaee62010-08-12 04:14:07 +01001589 */
1590 tmp = cipher_in;
Milan Brozd1f96422011-01-13 19:59:54 +00001591 keycount = strsep(&tmp, "-");
1592 cipher = strsep(&keycount, ":");
1593
1594 if (!keycount)
1595 cc->tfms_count = 1;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001596 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
Milan Brozd1f96422011-01-13 19:59:54 +00001597 !is_power_of_2(cc->tfms_count)) {
1598 ti->error = "Bad cipher key count specification";
1599 return -EINVAL;
1600 }
1601 cc->key_parts = cc->tfms_count;
Milan Brozda31a072013-10-28 23:21:03 +01001602 cc->key_extra_size = 0;
Milan Broz5ebaee62010-08-12 04:14:07 +01001603
1604 cc->cipher = kstrdup(cipher, GFP_KERNEL);
1605 if (!cc->cipher)
1606 goto bad_mem;
1607
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 chainmode = strsep(&tmp, "-");
1609 ivopts = strsep(&tmp, "-");
1610 ivmode = strsep(&ivopts, ":");
1611
1612 if (tmp)
Milan Broz5ebaee62010-08-12 04:14:07 +01001613 DMWARN("Ignoring unexpected additional cipher options");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614
Milan Broz7dbcd132011-01-13 19:59:52 +00001615 /*
1616 * For compatibility with the original dm-crypt mapping format, if
1617 * only the cipher name is supplied, use cbc-plain.
1618 */
Milan Broz5ebaee62010-08-12 04:14:07 +01001619 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 chainmode = "cbc";
1621 ivmode = "plain";
1622 }
1623
Herbert Xud1806f62006-08-22 20:29:17 +10001624 if (strcmp(chainmode, "ecb") && !ivmode) {
Milan Broz5ebaee62010-08-12 04:14:07 +01001625 ti->error = "IV mechanism required";
1626 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 }
1628
Milan Broz5ebaee62010-08-12 04:14:07 +01001629 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
1630 if (!cipher_api)
1631 goto bad_mem;
1632
1633 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
1634 "%s(%s)", chainmode, cipher);
1635 if (ret < 0) {
1636 kfree(cipher_api);
1637 goto bad_mem;
Herbert Xud1806f62006-08-22 20:29:17 +10001638 }
1639
Milan Broz5ebaee62010-08-12 04:14:07 +01001640 /* Allocate cipher */
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001641 ret = crypt_alloc_tfms(cc, cipher_api);
1642 if (ret < 0) {
1643 ti->error = "Error allocating crypto tfm";
1644 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646
Milan Broz5ebaee62010-08-12 04:14:07 +01001647 /* Initialize IV */
Andi Kleenc0297722011-01-13 19:59:53 +00001648 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
Milan Broz5ebaee62010-08-12 04:14:07 +01001649 if (cc->iv_size)
1650 /* at least a 64 bit sector number should fit in our buffer */
1651 cc->iv_size = max(cc->iv_size,
1652 (unsigned int)(sizeof(u64) / sizeof(u8)));
1653 else if (ivmode) {
1654 DMWARN("Selected cipher does not support IVs");
1655 ivmode = NULL;
1656 }
1657
1658 /* Choose ivmode, see comments at iv code. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 if (ivmode == NULL)
1660 cc->iv_gen_ops = NULL;
1661 else if (strcmp(ivmode, "plain") == 0)
1662 cc->iv_gen_ops = &crypt_iv_plain_ops;
Milan Broz61afef62009-12-10 23:52:25 +00001663 else if (strcmp(ivmode, "plain64") == 0)
1664 cc->iv_gen_ops = &crypt_iv_plain64_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 else if (strcmp(ivmode, "essiv") == 0)
1666 cc->iv_gen_ops = &crypt_iv_essiv_ops;
Rik Snel48527fa2006-09-03 08:56:39 +10001667 else if (strcmp(ivmode, "benbi") == 0)
1668 cc->iv_gen_ops = &crypt_iv_benbi_ops;
Ludwig Nussel46b47732007-05-09 02:32:55 -07001669 else if (strcmp(ivmode, "null") == 0)
1670 cc->iv_gen_ops = &crypt_iv_null_ops;
Milan Broz34745782011-01-13 19:59:55 +00001671 else if (strcmp(ivmode, "lmk") == 0) {
1672 cc->iv_gen_ops = &crypt_iv_lmk_ops;
Milan Brozed04d982013-10-28 23:21:04 +01001673 /*
1674 * Version 2 and 3 is recognised according
Milan Broz34745782011-01-13 19:59:55 +00001675 * to length of provided multi-key string.
1676 * If present (version 3), last key is used as IV seed.
Milan Brozed04d982013-10-28 23:21:04 +01001677 * All keys (including IV seed) are always the same size.
Milan Broz34745782011-01-13 19:59:55 +00001678 */
Milan Brozda31a072013-10-28 23:21:03 +01001679 if (cc->key_size % cc->key_parts) {
Milan Broz34745782011-01-13 19:59:55 +00001680 cc->key_parts++;
Milan Brozda31a072013-10-28 23:21:03 +01001681 cc->key_extra_size = cc->key_size / cc->key_parts;
1682 }
Milan Brozed04d982013-10-28 23:21:04 +01001683 } else if (strcmp(ivmode, "tcw") == 0) {
1684 cc->iv_gen_ops = &crypt_iv_tcw_ops;
1685 cc->key_parts += 2; /* IV + whitening */
1686 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
Milan Broz34745782011-01-13 19:59:55 +00001687 } else {
Milan Broz5ebaee62010-08-12 04:14:07 +01001688 ret = -EINVAL;
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001689 ti->error = "Invalid IV mode";
Milan Broz28513fc2010-08-12 04:14:06 +01001690 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 }
1692
Milan Brozda31a072013-10-28 23:21:03 +01001693 /* Initialize and set key */
1694 ret = crypt_set_key(cc, key);
1695 if (ret < 0) {
1696 ti->error = "Error decoding and setting key";
1697 goto bad;
1698 }
1699
Milan Broz28513fc2010-08-12 04:14:06 +01001700 /* Allocate IV */
1701 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
1702 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
1703 if (ret < 0) {
1704 ti->error = "Error creating IV";
1705 goto bad;
1706 }
Milan Brozb95bf2d2009-12-10 23:51:56 +00001707 }
1708
Milan Broz28513fc2010-08-12 04:14:06 +01001709 /* Initialize IV (set keys for ESSIV etc) */
1710 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
1711 ret = cc->iv_gen_ops->init(cc);
1712 if (ret < 0) {
1713 ti->error = "Error initialising IV";
1714 goto bad;
1715 }
1716 }
1717
Milan Broz5ebaee62010-08-12 04:14:07 +01001718 ret = 0;
1719bad:
1720 kfree(cipher_api);
1721 return ret;
1722
1723bad_mem:
1724 ti->error = "Cannot allocate cipher strings";
1725 return -ENOMEM;
1726}
1727
1728/*
1729 * Construct an encryption mapping:
1730 * <cipher> <key> <iv_offset> <dev_path> <start>
1731 */
1732static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1733{
1734 struct crypt_config *cc;
Milan Broz772ae5f2011-08-02 12:32:08 +01001735 unsigned int key_size, opt_params;
Milan Broz5ebaee62010-08-12 04:14:07 +01001736 unsigned long long tmpll;
1737 int ret;
Mikulas Patockad49ec522014-08-28 11:09:31 -04001738 size_t iv_size_padding;
Milan Broz772ae5f2011-08-02 12:32:08 +01001739 struct dm_arg_set as;
1740 const char *opt_string;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001741 char dummy;
Milan Broz5ebaee62010-08-12 04:14:07 +01001742
Milan Broz772ae5f2011-08-02 12:32:08 +01001743 static struct dm_arg _args[] = {
Mikulas Patocka0f5d8e62015-02-13 08:27:08 -05001744 {0, 3, "Invalid number of feature args"},
Milan Broz772ae5f2011-08-02 12:32:08 +01001745 };
1746
1747 if (argc < 5) {
Milan Broz5ebaee62010-08-12 04:14:07 +01001748 ti->error = "Not enough arguments";
1749 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 }
1751
Milan Broz5ebaee62010-08-12 04:14:07 +01001752 key_size = strlen(argv[1]) >> 1;
1753
1754 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1755 if (!cc) {
1756 ti->error = "Cannot allocate encryption context";
1757 return -ENOMEM;
1758 }
Milan Broz69a8cfc2011-01-13 19:59:49 +00001759 cc->key_size = key_size;
Milan Broz5ebaee62010-08-12 04:14:07 +01001760
1761 ti->private = cc;
1762 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
1763 if (ret < 0)
1764 goto bad;
1765
Milan Brozddd42ed2008-02-08 02:11:07 +00001766 cc->dmreq_start = sizeof(struct ablkcipher_request);
Andi Kleenc0297722011-01-13 19:59:53 +00001767 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
Mikulas Patockad49ec522014-08-28 11:09:31 -04001768 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
1769
1770 if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
1771 /* Allocate the padding exactly */
1772 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
1773 & crypto_ablkcipher_alignmask(any_tfm(cc));
1774 } else {
1775 /*
1776 * If the cipher requires greater alignment than kmalloc
1777 * alignment, we don't know the exact position of the
1778 * initialization vector. We must assume worst case.
1779 */
1780 iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
1781 }
Milan Brozddd42ed2008-02-08 02:11:07 +00001782
Mikulas Patocka94f5e022015-02-13 08:25:26 -05001783 ret = -ENOMEM;
Milan Brozddd42ed2008-02-08 02:11:07 +00001784 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
Mikulas Patockad49ec522014-08-28 11:09:31 -04001785 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
Milan Brozddd42ed2008-02-08 02:11:07 +00001786 if (!cc->req_pool) {
1787 ti->error = "Cannot allocate crypt request mempool";
Milan Broz28513fc2010-08-12 04:14:06 +01001788 goto bad;
Milan Brozddd42ed2008-02-08 02:11:07 +00001789 }
Milan Brozddd42ed2008-02-08 02:11:07 +00001790
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001791 cc->per_bio_data_size = ti->per_bio_data_size =
Mikulas Patockad49ec522014-08-28 11:09:31 -04001792 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start +
1793 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
1794 ARCH_KMALLOC_MINALIGN);
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001795
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001796 cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 if (!cc->page_pool) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001798 ti->error = "Cannot allocate page mempool";
Milan Broz28513fc2010-08-12 04:14:06 +01001799 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 }
1801
Jens Axboebb799ca2008-12-10 15:35:05 +01001802 cc->bs = bioset_create(MIN_IOS, 0);
Milan Broz6a24c712006-10-03 01:15:40 -07001803 if (!cc->bs) {
1804 ti->error = "Cannot allocate crypt bioset";
Milan Broz28513fc2010-08-12 04:14:06 +01001805 goto bad;
Milan Broz6a24c712006-10-03 01:15:40 -07001806 }
1807
Mikulas Patocka7145c242015-02-13 08:24:41 -05001808 mutex_init(&cc->bio_alloc_lock);
1809
Milan Broz28513fc2010-08-12 04:14:06 +01001810 ret = -EINVAL;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001811 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001812 ti->error = "Invalid iv_offset sector";
Milan Broz28513fc2010-08-12 04:14:06 +01001813 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 }
Andrew Morton4ee218c2006-03-27 01:17:48 -08001815 cc->iv_offset = tmpll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816
Vivek Goyale80d1c82015-07-31 09:20:36 -04001817 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
1818 if (ret) {
Milan Broz28513fc2010-08-12 04:14:06 +01001819 ti->error = "Device lookup failed";
1820 goto bad;
1821 }
1822
Vivek Goyale80d1c82015-07-31 09:20:36 -04001823 ret = -EINVAL;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001824 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001825 ti->error = "Invalid device sector";
Milan Broz28513fc2010-08-12 04:14:06 +01001826 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 }
Andrew Morton4ee218c2006-03-27 01:17:48 -08001828 cc->start = tmpll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829
Milan Broz772ae5f2011-08-02 12:32:08 +01001830 argv += 5;
1831 argc -= 5;
1832
1833 /* Optional parameters */
1834 if (argc) {
1835 as.argc = argc;
1836 as.argv = argv;
1837
1838 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
1839 if (ret)
1840 goto bad;
1841
Wei Yongjun44c144f2015-04-16 22:00:50 -04001842 ret = -EINVAL;
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001843 while (opt_params--) {
1844 opt_string = dm_shift_arg(&as);
1845 if (!opt_string) {
1846 ti->error = "Not enough feature arguments";
1847 goto bad;
1848 }
Milan Broz772ae5f2011-08-02 12:32:08 +01001849
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001850 if (!strcasecmp(opt_string, "allow_discards"))
1851 ti->num_discard_bios = 1;
1852
1853 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
1854 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
1855
Mikulas Patocka0f5d8e62015-02-13 08:27:08 -05001856 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
1857 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
1858
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001859 else {
1860 ti->error = "Invalid feature arguments";
1861 goto bad;
1862 }
Milan Broz772ae5f2011-08-02 12:32:08 +01001863 }
1864 }
1865
Milan Broz28513fc2010-08-12 04:14:06 +01001866 ret = -ENOMEM;
Tejun Heo670368a2013-07-30 08:40:21 -04001867 cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
Milan Brozcabf08e2007-10-19 22:38:58 +01001868 if (!cc->io_queue) {
1869 ti->error = "Couldn't create kcryptd io queue";
Milan Broz28513fc2010-08-12 04:14:06 +01001870 goto bad;
Milan Brozcabf08e2007-10-19 22:38:58 +01001871 }
1872
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001873 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1874 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
1875 else
1876 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
1877 num_online_cpus());
Milan Brozcabf08e2007-10-19 22:38:58 +01001878 if (!cc->crypt_queue) {
Milan Broz9934a8b2007-10-19 22:38:57 +01001879 ti->error = "Couldn't create kcryptd queue";
Milan Broz28513fc2010-08-12 04:14:06 +01001880 goto bad;
Milan Broz9934a8b2007-10-19 22:38:57 +01001881 }
1882
Mikulas Patockadc267622015-02-13 08:25:59 -05001883 init_waitqueue_head(&cc->write_thread_wait);
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001884 cc->write_tree = RB_ROOT;
Mikulas Patockadc267622015-02-13 08:25:59 -05001885
1886 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
1887 if (IS_ERR(cc->write_thread)) {
1888 ret = PTR_ERR(cc->write_thread);
1889 cc->write_thread = NULL;
1890 ti->error = "Couldn't spawn write thread";
1891 goto bad;
1892 }
1893 wake_up_process(cc->write_thread);
1894
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001895 ti->num_flush_bios = 1;
Alasdair G Kergon0ac55482012-07-27 15:08:08 +01001896 ti->discard_zeroes_data_unsupported = true;
Milan Broz983c7db2011-09-25 23:26:21 +01001897
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 return 0;
1899
Milan Broz28513fc2010-08-12 04:14:06 +01001900bad:
1901 crypt_dtr(ti);
1902 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903}
1904
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00001905static int crypt_map(struct dm_target *ti, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001907 struct dm_crypt_io *io;
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001908 struct crypt_config *cc = ti->private;
Mikulas Patocka647c7db2009-06-22 10:12:23 +01001909
Milan Broz772ae5f2011-08-02 12:32:08 +01001910 /*
1911 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
1912 * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
1913 * - for REQ_DISCARD caller must use flush if IO ordering matters
1914 */
1915 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
Mikulas Patocka647c7db2009-06-22 10:12:23 +01001916 bio->bi_bdev = cc->dev->bdev;
Milan Broz772ae5f2011-08-02 12:32:08 +01001917 if (bio_sectors(bio))
Kent Overstreet4f024f32013-10-11 15:44:27 -07001918 bio->bi_iter.bi_sector = cc->start +
1919 dm_target_offset(ti, bio->bi_iter.bi_sector);
Mikulas Patocka647c7db2009-06-22 10:12:23 +01001920 return DM_MAPIO_REMAPPED;
1921 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001923 io = dm_per_bio_data(bio, cc->per_bio_data_size);
1924 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
1925 io->ctx.req = (struct ablkcipher_request *)(io + 1);
Milan Brozcabf08e2007-10-19 22:38:58 +01001926
Milan Broz20c82532011-01-13 19:59:53 +00001927 if (bio_data_dir(io->base_bio) == READ) {
1928 if (kcryptd_io_read(io, GFP_NOWAIT))
Mikulas Patockadc267622015-02-13 08:25:59 -05001929 kcryptd_queue_read(io);
Milan Broz20c82532011-01-13 19:59:53 +00001930 } else
Milan Brozcabf08e2007-10-19 22:38:58 +01001931 kcryptd_queue_crypt(io);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001933 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934}
1935
Mikulas Patockafd7c0922013-03-01 22:45:44 +00001936static void crypt_status(struct dm_target *ti, status_type_t type,
1937 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938{
Milan Broz5ebaee62010-08-12 04:14:07 +01001939 struct crypt_config *cc = ti->private;
Mikulas Patockafd7c0922013-03-01 22:45:44 +00001940 unsigned i, sz = 0;
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001941 int num_feature_args = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942
1943 switch (type) {
1944 case STATUSTYPE_INFO:
1945 result[0] = '\0';
1946 break;
1947
1948 case STATUSTYPE_TABLE:
Milan Broz7dbcd132011-01-13 19:59:52 +00001949 DMEMIT("%s ", cc->cipher_string);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950
Mikulas Patockafd7c0922013-03-01 22:45:44 +00001951 if (cc->key_size > 0)
1952 for (i = 0; i < cc->key_size; i++)
1953 DMEMIT("%02x", cc->key[i]);
1954 else
1955 DMEMIT("-");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956
Andrew Morton4ee218c2006-03-27 01:17:48 -08001957 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1958 cc->dev->name, (unsigned long long)cc->start);
Milan Broz772ae5f2011-08-02 12:32:08 +01001959
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001960 num_feature_args += !!ti->num_discard_bios;
1961 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
Mikulas Patocka0f5d8e62015-02-13 08:27:08 -05001962 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001963 if (num_feature_args) {
1964 DMEMIT(" %d", num_feature_args);
1965 if (ti->num_discard_bios)
1966 DMEMIT(" allow_discards");
1967 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1968 DMEMIT(" same_cpu_crypt");
Mikulas Patocka0f5d8e62015-02-13 08:27:08 -05001969 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
1970 DMEMIT(" submit_from_crypt_cpus");
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001971 }
Milan Broz772ae5f2011-08-02 12:32:08 +01001972
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 break;
1974 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975}
1976
Milan Broze48d4bb2006-10-03 01:15:37 -07001977static void crypt_postsuspend(struct dm_target *ti)
1978{
1979 struct crypt_config *cc = ti->private;
1980
1981 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1982}
1983
1984static int crypt_preresume(struct dm_target *ti)
1985{
1986 struct crypt_config *cc = ti->private;
1987
1988 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1989 DMERR("aborting resume - crypt key is not set.");
1990 return -EAGAIN;
1991 }
1992
1993 return 0;
1994}
1995
1996static void crypt_resume(struct dm_target *ti)
1997{
1998 struct crypt_config *cc = ti->private;
1999
2000 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
2001}
2002
2003/* Message interface
2004 * key set <key>
2005 * key wipe
2006 */
2007static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
2008{
2009 struct crypt_config *cc = ti->private;
Milan Broz542da312009-12-10 23:51:57 +00002010 int ret = -EINVAL;
Milan Broze48d4bb2006-10-03 01:15:37 -07002011
2012 if (argc < 2)
2013 goto error;
2014
Mike Snitzer498f0102011-08-02 12:32:04 +01002015 if (!strcasecmp(argv[0], "key")) {
Milan Broze48d4bb2006-10-03 01:15:37 -07002016 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
2017 DMWARN("not suspended during key manipulation.");
2018 return -EINVAL;
2019 }
Mike Snitzer498f0102011-08-02 12:32:04 +01002020 if (argc == 3 && !strcasecmp(argv[1], "set")) {
Milan Broz542da312009-12-10 23:51:57 +00002021 ret = crypt_set_key(cc, argv[2]);
2022 if (ret)
2023 return ret;
2024 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
2025 ret = cc->iv_gen_ops->init(cc);
2026 return ret;
2027 }
Mike Snitzer498f0102011-08-02 12:32:04 +01002028 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
Milan Broz542da312009-12-10 23:51:57 +00002029 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2030 ret = cc->iv_gen_ops->wipe(cc);
2031 if (ret)
2032 return ret;
2033 }
Milan Broze48d4bb2006-10-03 01:15:37 -07002034 return crypt_wipe_key(cc);
Milan Broz542da312009-12-10 23:51:57 +00002035 }
Milan Broze48d4bb2006-10-03 01:15:37 -07002036 }
2037
2038error:
2039 DMWARN("unrecognised message received.");
2040 return -EINVAL;
2041}
2042
Mike Snitzeraf4874e2009-06-22 10:12:33 +01002043static int crypt_iterate_devices(struct dm_target *ti,
2044 iterate_devices_callout_fn fn, void *data)
2045{
2046 struct crypt_config *cc = ti->private;
2047
Mike Snitzer5dea2712009-07-23 20:30:42 +01002048 return fn(ti, cc->dev, cc->start, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01002049}
2050
Mike Snitzer586b2862015-09-09 21:34:51 -04002051static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
2052{
2053 /*
2054 * Unfortunate constraint that is required to avoid the potential
2055 * for exceeding underlying device's max_segments limits -- due to
2056 * crypt_alloc_buffer() possibly allocating pages for the encryption
2057 * bio that are not as physically contiguous as the original bio.
2058 */
2059 limits->max_segment_size = PAGE_SIZE;
2060}
2061
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062static struct target_type crypt_target = {
2063 .name = "crypt",
Mike Snitzer586b2862015-09-09 21:34:51 -04002064 .version = {1, 14, 1},
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 .module = THIS_MODULE,
2066 .ctr = crypt_ctr,
2067 .dtr = crypt_dtr,
2068 .map = crypt_map,
2069 .status = crypt_status,
Milan Broze48d4bb2006-10-03 01:15:37 -07002070 .postsuspend = crypt_postsuspend,
2071 .preresume = crypt_preresume,
2072 .resume = crypt_resume,
2073 .message = crypt_message,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01002074 .iterate_devices = crypt_iterate_devices,
Mike Snitzer586b2862015-09-09 21:34:51 -04002075 .io_hints = crypt_io_hints,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076};
2077
2078static int __init dm_crypt_init(void)
2079{
2080 int r;
2081
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 r = dm_register_target(&crypt_target);
Mikulas Patocka94f5e022015-02-13 08:25:26 -05002083 if (r < 0)
Alasdair G Kergon72d94862006-06-26 00:27:35 -07002084 DMERR("register failed %d", r);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 return r;
2087}
2088
2089static void __exit dm_crypt_exit(void)
2090{
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00002091 dm_unregister_target(&crypt_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092}
2093
2094module_init(dm_crypt_init);
2095module_exit(dm_crypt_exit);
2096
Jana Saoutbf142992014-06-24 14:27:04 -04002097MODULE_AUTHOR("Jana Saout <jana@saout.de>");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
2099MODULE_LICENSE("GPL");