blob: 6199245ea6a6d7c0f3b9eb3fa3ae04d7cb03b0fd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Jana Saoutbf142992014-06-24 14:27:04 -04002 * Copyright (C) 2003 Jana Saout <jana@saout.de>
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
Milan Broz542da312009-12-10 23:51:57 +00004 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
Milan Brozed04d982013-10-28 23:21:04 +01005 * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This file is released under the GPL.
8 */
9
Milan Broz43d69032008-02-08 02:11:09 +000010#include <linux/completion.h>
Herbert Xud1806f62006-08-22 20:29:17 +100011#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/bio.h>
16#include <linux/blkdev.h>
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/crypto.h>
20#include <linux/workqueue.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070021#include <linux/backing-dev.h>
Arun Sharma600634972011-07-26 16:09:06 -070022#include <linux/atomic.h>
David Hardeman378f0582005-09-17 17:55:31 +100023#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/page.h>
Rik Snel48527fa2006-09-03 08:56:39 +100025#include <asm/unaligned.h>
Milan Broz34745782011-01-13 19:59:55 +000026#include <crypto/hash.h>
27#include <crypto/md5.h>
28#include <crypto/algapi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Mikulas Patocka586e80e2008-10-21 17:44:59 +010030#include <linux/device-mapper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Alasdair G Kergon72d94862006-06-26 00:27:35 -070032#define DM_MSG_PREFIX "crypt"
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 * context holding the current state of a multi-part conversion
36 */
37struct convert_context {
Milan Broz43d69032008-02-08 02:11:09 +000038 struct completion restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 struct bio *bio_in;
40 struct bio *bio_out;
Kent Overstreet003b5c52013-10-11 15:45:43 -070041 struct bvec_iter iter_in;
42 struct bvec_iter iter_out;
Mikulas Patockac66029f2012-07-27 15:08:05 +010043 sector_t cc_sector;
Mikulas Patocka40b62292012-07-27 15:08:04 +010044 atomic_t cc_pending;
Mikulas Patocka610f2de2014-02-20 18:01:01 -050045 struct ablkcipher_request *req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046};
47
Milan Broz53017032008-02-08 02:10:38 +000048/*
49 * per bio private data
50 */
51struct dm_crypt_io {
Alasdair G Kergon49a8a922012-07-27 15:08:05 +010052 struct crypt_config *cc;
Milan Broz53017032008-02-08 02:10:38 +000053 struct bio *base_bio;
54 struct work_struct work;
55
56 struct convert_context ctx;
57
Mikulas Patocka40b62292012-07-27 15:08:04 +010058 atomic_t io_pending;
Milan Broz53017032008-02-08 02:10:38 +000059 int error;
Milan Broz0c395b02008-02-08 02:10:54 +000060 sector_t sector;
Mikulas Patocka298a9fa2014-03-28 15:51:55 -040061} CRYPTO_MINALIGN_ATTR;
Milan Broz53017032008-02-08 02:10:38 +000062
Milan Broz01482b72008-02-08 02:11:04 +000063struct dm_crypt_request {
Huang Yingb2174ee2009-03-16 17:44:33 +000064 struct convert_context *ctx;
Milan Broz01482b72008-02-08 02:11:04 +000065 struct scatterlist sg_in;
66 struct scatterlist sg_out;
Milan Broz2dc53272011-01-13 19:59:54 +000067 sector_t iv_sector;
Milan Broz01482b72008-02-08 02:11:04 +000068};
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070struct crypt_config;
71
72struct crypt_iv_operations {
73 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
Milan Brozd469f842007-10-19 22:42:37 +010074 const char *opts);
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 void (*dtr)(struct crypt_config *cc);
Milan Brozb95bf2d2009-12-10 23:51:56 +000076 int (*init)(struct crypt_config *cc);
Milan Broz542da312009-12-10 23:51:57 +000077 int (*wipe)(struct crypt_config *cc);
Milan Broz2dc53272011-01-13 19:59:54 +000078 int (*generator)(struct crypt_config *cc, u8 *iv,
79 struct dm_crypt_request *dmreq);
80 int (*post)(struct crypt_config *cc, u8 *iv,
81 struct dm_crypt_request *dmreq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082};
83
Milan Broz60473592009-12-10 23:51:55 +000084struct iv_essiv_private {
Milan Brozb95bf2d2009-12-10 23:51:56 +000085 struct crypto_hash *hash_tfm;
86 u8 *salt;
Milan Broz60473592009-12-10 23:51:55 +000087};
88
89struct iv_benbi_private {
90 int shift;
91};
92
Milan Broz34745782011-01-13 19:59:55 +000093#define LMK_SEED_SIZE 64 /* hash + 0 */
94struct iv_lmk_private {
95 struct crypto_shash *hash_tfm;
96 u8 *seed;
97};
98
Milan Brozed04d982013-10-28 23:21:04 +010099#define TCW_WHITENING_SIZE 16
100struct iv_tcw_private {
101 struct crypto_shash *crc32_tfm;
102 u8 *iv_seed;
103 u8 *whitening;
104};
105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106/*
107 * Crypt: maps a linear range of a block device
108 * and encrypts / decrypts at the same time.
109 */
Mikulas Patockaf3396c582015-02-13 08:23:09 -0500110enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, DM_CRYPT_SAME_CPU };
Andi Kleenc0297722011-01-13 19:59:53 +0000111
112/*
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500113 * The fields in here must be read only after initialization.
Andi Kleenc0297722011-01-13 19:59:53 +0000114 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115struct crypt_config {
116 struct dm_dev *dev;
117 sector_t start;
118
119 /*
Milan Brozddd42ed2008-02-08 02:11:07 +0000120 * pool for per bio private data, crypto requests and
121 * encryption requeusts/buffer pages
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 */
123 mempool_t *io_pool;
Milan Brozddd42ed2008-02-08 02:11:07 +0000124 mempool_t *req_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 mempool_t *page_pool;
Milan Broz6a24c712006-10-03 01:15:40 -0700126 struct bio_set *bs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Milan Brozcabf08e2007-10-19 22:38:58 +0100128 struct workqueue_struct *io_queue;
129 struct workqueue_struct *crypt_queue;
Milan Broz3f1e9072008-03-28 14:16:07 -0700130
Milan Broz5ebaee62010-08-12 04:14:07 +0100131 char *cipher;
Milan Broz7dbcd132011-01-13 19:59:52 +0000132 char *cipher_string;
Milan Broz5ebaee62010-08-12 04:14:07 +0100133
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 struct crypt_iv_operations *iv_gen_ops;
Herbert Xu79066ad2006-12-05 13:41:52 -0800135 union {
Milan Broz60473592009-12-10 23:51:55 +0000136 struct iv_essiv_private essiv;
137 struct iv_benbi_private benbi;
Milan Broz34745782011-01-13 19:59:55 +0000138 struct iv_lmk_private lmk;
Milan Brozed04d982013-10-28 23:21:04 +0100139 struct iv_tcw_private tcw;
Herbert Xu79066ad2006-12-05 13:41:52 -0800140 } iv_gen_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 sector_t iv_offset;
142 unsigned int iv_size;
143
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100144 /* ESSIV: struct crypto_cipher *essiv_tfm */
145 void *iv_private;
146 struct crypto_ablkcipher **tfms;
Milan Brozd1f96422011-01-13 19:59:54 +0000147 unsigned tfms_count;
Andi Kleenc0297722011-01-13 19:59:53 +0000148
149 /*
Milan Brozddd42ed2008-02-08 02:11:07 +0000150 * Layout of each crypto request:
151 *
152 * struct ablkcipher_request
153 * context
154 * padding
155 * struct dm_crypt_request
156 * padding
157 * IV
158 *
159 * The padding is added so that dm_crypt_request and the IV are
160 * correctly aligned.
161 */
162 unsigned int dmreq_start;
Milan Brozddd42ed2008-02-08 02:11:07 +0000163
Mikulas Patocka298a9fa2014-03-28 15:51:55 -0400164 unsigned int per_bio_data_size;
165
Milan Broze48d4bb2006-10-03 01:15:37 -0700166 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 unsigned int key_size;
Milan Brozda31a072013-10-28 23:21:03 +0100168 unsigned int key_parts; /* independent parts in key buffer */
169 unsigned int key_extra_size; /* additional keys length */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 u8 key[0];
171};
172
Milan Broz6a24c712006-10-03 01:15:40 -0700173#define MIN_IOS 16
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
Christoph Lametere18b8902006-12-06 20:33:20 -0800175static struct kmem_cache *_crypt_io_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100177static void clone_init(struct dm_crypt_io *, struct bio *);
Alasdair G Kergon395b1672008-02-08 02:10:52 +0000178static void kcryptd_queue_crypt(struct dm_crypt_io *io);
Milan Broz2dc53272011-01-13 19:59:54 +0000179static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
Olaf Kirch027581f2007-05-09 02:32:52 -0700180
Andi Kleenc0297722011-01-13 19:59:53 +0000181/*
182 * Use this to access cipher attributes that are the same for each CPU.
183 */
184static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
185{
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100186 return cc->tfms[0];
Andi Kleenc0297722011-01-13 19:59:53 +0000187}
188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 * Different IV generation algorithms:
191 *
Rik Snel3c164bd2006-09-02 18:17:33 +1000192 * plain: the initial vector is the 32-bit little-endian version of the sector
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +0200193 * number, padded with zeros if necessary.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 *
Milan Broz61afef62009-12-10 23:52:25 +0000195 * plain64: the initial vector is the 64-bit little-endian version of the sector
196 * number, padded with zeros if necessary.
197 *
Rik Snel3c164bd2006-09-02 18:17:33 +1000198 * essiv: "encrypted sector|salt initial vector", the sector number is
199 * encrypted with the bulk cipher using a salt as key. The salt
200 * should be derived from the bulk cipher's key via hashing.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 *
Rik Snel48527fa2006-09-03 08:56:39 +1000202 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
203 * (needed for LRW-32-AES and possible other narrow block modes)
204 *
Ludwig Nussel46b47732007-05-09 02:32:55 -0700205 * null: the initial vector is always zero. Provides compatibility with
206 * obsolete loop_fish2 devices. Do not use for new devices.
207 *
Milan Broz34745782011-01-13 19:59:55 +0000208 * lmk: Compatible implementation of the block chaining mode used
209 * by the Loop-AES block device encryption system
210 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
211 * It operates on full 512 byte sectors and uses CBC
212 * with an IV derived from the sector number, the data and
213 * optionally extra IV seed.
214 * This means that after decryption the first block
215 * of sector must be tweaked according to decrypted data.
216 * Loop-AES can use three encryption schemes:
217 * version 1: is plain aes-cbc mode
218 * version 2: uses 64 multikey scheme with lmk IV generator
219 * version 3: the same as version 2 with additional IV seed
220 * (it uses 65 keys, last key is used as IV seed)
221 *
Milan Brozed04d982013-10-28 23:21:04 +0100222 * tcw: Compatible implementation of the block chaining mode used
223 * by the TrueCrypt device encryption system (prior to version 4.1).
224 * For more info see: http://www.truecrypt.org
225 * It operates on full 512 byte sectors and uses CBC
226 * with an IV derived from initial key and the sector number.
227 * In addition, whitening value is applied on every sector, whitening
228 * is calculated from initial key, sector number and mixed using CRC32.
229 * Note that this encryption scheme is vulnerable to watermarking attacks
230 * and should be used for old compatible containers access only.
231 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 * plumb: unimplemented, see:
233 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
234 */
235
Milan Broz2dc53272011-01-13 19:59:54 +0000236static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
237 struct dm_crypt_request *dmreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238{
239 memset(iv, 0, cc->iv_size);
Alasdair G Kergon283a8322011-08-02 12:32:01 +0100240 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
242 return 0;
243}
244
Milan Broz61afef62009-12-10 23:52:25 +0000245static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
Milan Broz2dc53272011-01-13 19:59:54 +0000246 struct dm_crypt_request *dmreq)
Milan Broz61afef62009-12-10 23:52:25 +0000247{
248 memset(iv, 0, cc->iv_size);
Alasdair G Kergon283a8322011-08-02 12:32:01 +0100249 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
Milan Broz61afef62009-12-10 23:52:25 +0000250
251 return 0;
252}
253
Milan Brozb95bf2d2009-12-10 23:51:56 +0000254/* Initialise ESSIV - compute salt but no local memory allocations */
255static int crypt_iv_essiv_init(struct crypt_config *cc)
256{
257 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
258 struct hash_desc desc;
259 struct scatterlist sg;
Andi Kleenc0297722011-01-13 19:59:53 +0000260 struct crypto_cipher *essiv_tfm;
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100261 int err;
Milan Brozb95bf2d2009-12-10 23:51:56 +0000262
263 sg_init_one(&sg, cc->key, cc->key_size);
264 desc.tfm = essiv->hash_tfm;
265 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
266
267 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
268 if (err)
269 return err;
270
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100271 essiv_tfm = cc->iv_private;
Andi Kleenc0297722011-01-13 19:59:53 +0000272
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100273 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
274 crypto_hash_digestsize(essiv->hash_tfm));
275 if (err)
276 return err;
Andi Kleenc0297722011-01-13 19:59:53 +0000277
278 return 0;
Milan Brozb95bf2d2009-12-10 23:51:56 +0000279}
280
Milan Broz542da312009-12-10 23:51:57 +0000281/* Wipe salt and reset key derived from volume key */
282static int crypt_iv_essiv_wipe(struct crypt_config *cc)
283{
284 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
285 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
Andi Kleenc0297722011-01-13 19:59:53 +0000286 struct crypto_cipher *essiv_tfm;
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100287 int r, err = 0;
Milan Broz542da312009-12-10 23:51:57 +0000288
289 memset(essiv->salt, 0, salt_size);
290
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100291 essiv_tfm = cc->iv_private;
292 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
293 if (r)
294 err = r;
Andi Kleenc0297722011-01-13 19:59:53 +0000295
296 return err;
297}
298
299/* Set up per cpu cipher state */
300static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
301 struct dm_target *ti,
302 u8 *salt, unsigned saltsize)
303{
304 struct crypto_cipher *essiv_tfm;
305 int err;
306
307 /* Setup the essiv_tfm with the given salt */
308 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
309 if (IS_ERR(essiv_tfm)) {
310 ti->error = "Error allocating crypto tfm for ESSIV";
311 return essiv_tfm;
312 }
313
314 if (crypto_cipher_blocksize(essiv_tfm) !=
315 crypto_ablkcipher_ivsize(any_tfm(cc))) {
316 ti->error = "Block size of ESSIV cipher does "
317 "not match IV size of block cipher";
318 crypto_free_cipher(essiv_tfm);
319 return ERR_PTR(-EINVAL);
320 }
321
322 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
323 if (err) {
324 ti->error = "Failed to set key for ESSIV cipher";
325 crypto_free_cipher(essiv_tfm);
326 return ERR_PTR(err);
327 }
328
329 return essiv_tfm;
Milan Broz542da312009-12-10 23:51:57 +0000330}
331
Milan Broz60473592009-12-10 23:51:55 +0000332static void crypt_iv_essiv_dtr(struct crypt_config *cc)
333{
Andi Kleenc0297722011-01-13 19:59:53 +0000334 struct crypto_cipher *essiv_tfm;
Milan Broz60473592009-12-10 23:51:55 +0000335 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
336
Milan Brozb95bf2d2009-12-10 23:51:56 +0000337 crypto_free_hash(essiv->hash_tfm);
338 essiv->hash_tfm = NULL;
339
340 kzfree(essiv->salt);
341 essiv->salt = NULL;
Andi Kleenc0297722011-01-13 19:59:53 +0000342
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100343 essiv_tfm = cc->iv_private;
Andi Kleenc0297722011-01-13 19:59:53 +0000344
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100345 if (essiv_tfm)
346 crypto_free_cipher(essiv_tfm);
Andi Kleenc0297722011-01-13 19:59:53 +0000347
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100348 cc->iv_private = NULL;
Milan Broz60473592009-12-10 23:51:55 +0000349}
350
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
Milan Brozd469f842007-10-19 22:42:37 +0100352 const char *opts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353{
Milan Broz5861f1b2009-12-10 23:51:56 +0000354 struct crypto_cipher *essiv_tfm = NULL;
355 struct crypto_hash *hash_tfm = NULL;
Milan Broz5861f1b2009-12-10 23:51:56 +0000356 u8 *salt = NULL;
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100357 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358
Milan Broz5861f1b2009-12-10 23:51:56 +0000359 if (!opts) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700360 ti->error = "Digest algorithm missing for ESSIV mode";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 return -EINVAL;
362 }
363
Milan Brozb95bf2d2009-12-10 23:51:56 +0000364 /* Allocate hash algorithm */
Herbert Xu35058682006-08-24 19:10:20 +1000365 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
366 if (IS_ERR(hash_tfm)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700367 ti->error = "Error initializing ESSIV hash";
Milan Broz5861f1b2009-12-10 23:51:56 +0000368 err = PTR_ERR(hash_tfm);
369 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 }
371
Milan Brozb95bf2d2009-12-10 23:51:56 +0000372 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
Milan Broz5861f1b2009-12-10 23:51:56 +0000373 if (!salt) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700374 ti->error = "Error kmallocing salt storage in ESSIV";
Milan Broz5861f1b2009-12-10 23:51:56 +0000375 err = -ENOMEM;
376 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 }
378
Milan Brozb95bf2d2009-12-10 23:51:56 +0000379 cc->iv_gen_private.essiv.salt = salt;
Milan Brozb95bf2d2009-12-10 23:51:56 +0000380 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
381
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100382 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
383 crypto_hash_digestsize(hash_tfm));
384 if (IS_ERR(essiv_tfm)) {
385 crypt_iv_essiv_dtr(cc);
386 return PTR_ERR(essiv_tfm);
Andi Kleenc0297722011-01-13 19:59:53 +0000387 }
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100388 cc->iv_private = essiv_tfm;
Andi Kleenc0297722011-01-13 19:59:53 +0000389
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 return 0;
Milan Broz5861f1b2009-12-10 23:51:56 +0000391
392bad:
Milan Broz5861f1b2009-12-10 23:51:56 +0000393 if (hash_tfm && !IS_ERR(hash_tfm))
394 crypto_free_hash(hash_tfm);
Milan Brozb95bf2d2009-12-10 23:51:56 +0000395 kfree(salt);
Milan Broz5861f1b2009-12-10 23:51:56 +0000396 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397}
398
Milan Broz2dc53272011-01-13 19:59:54 +0000399static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
400 struct dm_crypt_request *dmreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401{
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100402 struct crypto_cipher *essiv_tfm = cc->iv_private;
Andi Kleenc0297722011-01-13 19:59:53 +0000403
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 memset(iv, 0, cc->iv_size);
Alasdair G Kergon283a8322011-08-02 12:32:01 +0100405 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
Andi Kleenc0297722011-01-13 19:59:53 +0000406 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
407
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 return 0;
409}
410
Rik Snel48527fa2006-09-03 08:56:39 +1000411static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
412 const char *opts)
413{
Andi Kleenc0297722011-01-13 19:59:53 +0000414 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
David Howellsf0d1b0b2006-12-08 02:37:49 -0800415 int log = ilog2(bs);
Rik Snel48527fa2006-09-03 08:56:39 +1000416
417 /* we need to calculate how far we must shift the sector count
418 * to get the cipher block count, we use this shift in _gen */
419
420 if (1 << log != bs) {
421 ti->error = "cypher blocksize is not a power of 2";
422 return -EINVAL;
423 }
424
425 if (log > 9) {
426 ti->error = "cypher blocksize is > 512";
427 return -EINVAL;
428 }
429
Milan Broz60473592009-12-10 23:51:55 +0000430 cc->iv_gen_private.benbi.shift = 9 - log;
Rik Snel48527fa2006-09-03 08:56:39 +1000431
432 return 0;
433}
434
435static void crypt_iv_benbi_dtr(struct crypt_config *cc)
436{
Rik Snel48527fa2006-09-03 08:56:39 +1000437}
438
Milan Broz2dc53272011-01-13 19:59:54 +0000439static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
440 struct dm_crypt_request *dmreq)
Rik Snel48527fa2006-09-03 08:56:39 +1000441{
Herbert Xu79066ad2006-12-05 13:41:52 -0800442 __be64 val;
443
Rik Snel48527fa2006-09-03 08:56:39 +1000444 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
Herbert Xu79066ad2006-12-05 13:41:52 -0800445
Milan Broz2dc53272011-01-13 19:59:54 +0000446 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
Herbert Xu79066ad2006-12-05 13:41:52 -0800447 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
Rik Snel48527fa2006-09-03 08:56:39 +1000448
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 return 0;
450}
451
Milan Broz2dc53272011-01-13 19:59:54 +0000452static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
453 struct dm_crypt_request *dmreq)
Ludwig Nussel46b47732007-05-09 02:32:55 -0700454{
455 memset(iv, 0, cc->iv_size);
456
457 return 0;
458}
459
Milan Broz34745782011-01-13 19:59:55 +0000460static void crypt_iv_lmk_dtr(struct crypt_config *cc)
461{
462 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
463
464 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
465 crypto_free_shash(lmk->hash_tfm);
466 lmk->hash_tfm = NULL;
467
468 kzfree(lmk->seed);
469 lmk->seed = NULL;
470}
471
472static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
473 const char *opts)
474{
475 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
476
477 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
478 if (IS_ERR(lmk->hash_tfm)) {
479 ti->error = "Error initializing LMK hash";
480 return PTR_ERR(lmk->hash_tfm);
481 }
482
483 /* No seed in LMK version 2 */
484 if (cc->key_parts == cc->tfms_count) {
485 lmk->seed = NULL;
486 return 0;
487 }
488
489 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
490 if (!lmk->seed) {
491 crypt_iv_lmk_dtr(cc);
492 ti->error = "Error kmallocing seed storage in LMK";
493 return -ENOMEM;
494 }
495
496 return 0;
497}
498
499static int crypt_iv_lmk_init(struct crypt_config *cc)
500{
501 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
502 int subkey_size = cc->key_size / cc->key_parts;
503
504 /* LMK seed is on the position of LMK_KEYS + 1 key */
505 if (lmk->seed)
506 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
507 crypto_shash_digestsize(lmk->hash_tfm));
508
509 return 0;
510}
511
512static int crypt_iv_lmk_wipe(struct crypt_config *cc)
513{
514 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
515
516 if (lmk->seed)
517 memset(lmk->seed, 0, LMK_SEED_SIZE);
518
519 return 0;
520}
521
522static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
523 struct dm_crypt_request *dmreq,
524 u8 *data)
525{
526 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200527 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
Milan Broz34745782011-01-13 19:59:55 +0000528 struct md5_state md5state;
Milan Brozda31a072013-10-28 23:21:03 +0100529 __le32 buf[4];
Milan Broz34745782011-01-13 19:59:55 +0000530 int i, r;
531
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200532 desc->tfm = lmk->hash_tfm;
533 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
Milan Broz34745782011-01-13 19:59:55 +0000534
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200535 r = crypto_shash_init(desc);
Milan Broz34745782011-01-13 19:59:55 +0000536 if (r)
537 return r;
538
539 if (lmk->seed) {
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200540 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
Milan Broz34745782011-01-13 19:59:55 +0000541 if (r)
542 return r;
543 }
544
545 /* Sector is always 512B, block size 16, add data of blocks 1-31 */
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200546 r = crypto_shash_update(desc, data + 16, 16 * 31);
Milan Broz34745782011-01-13 19:59:55 +0000547 if (r)
548 return r;
549
550 /* Sector is cropped to 56 bits here */
551 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
552 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
553 buf[2] = cpu_to_le32(4024);
554 buf[3] = 0;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200555 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
Milan Broz34745782011-01-13 19:59:55 +0000556 if (r)
557 return r;
558
559 /* No MD5 padding here */
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200560 r = crypto_shash_export(desc, &md5state);
Milan Broz34745782011-01-13 19:59:55 +0000561 if (r)
562 return r;
563
564 for (i = 0; i < MD5_HASH_WORDS; i++)
565 __cpu_to_le32s(&md5state.hash[i]);
566 memcpy(iv, &md5state.hash, cc->iv_size);
567
568 return 0;
569}
570
571static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
572 struct dm_crypt_request *dmreq)
573{
574 u8 *src;
575 int r = 0;
576
577 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
Cong Wangc2e022c2011-11-28 13:26:02 +0800578 src = kmap_atomic(sg_page(&dmreq->sg_in));
Milan Broz34745782011-01-13 19:59:55 +0000579 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
Cong Wangc2e022c2011-11-28 13:26:02 +0800580 kunmap_atomic(src);
Milan Broz34745782011-01-13 19:59:55 +0000581 } else
582 memset(iv, 0, cc->iv_size);
583
584 return r;
585}
586
587static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
588 struct dm_crypt_request *dmreq)
589{
590 u8 *dst;
591 int r;
592
593 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
594 return 0;
595
Cong Wangc2e022c2011-11-28 13:26:02 +0800596 dst = kmap_atomic(sg_page(&dmreq->sg_out));
Milan Broz34745782011-01-13 19:59:55 +0000597 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
598
599 /* Tweak the first block of plaintext sector */
600 if (!r)
601 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
602
Cong Wangc2e022c2011-11-28 13:26:02 +0800603 kunmap_atomic(dst);
Milan Broz34745782011-01-13 19:59:55 +0000604 return r;
605}
606
Milan Brozed04d982013-10-28 23:21:04 +0100607static void crypt_iv_tcw_dtr(struct crypt_config *cc)
608{
609 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
610
611 kzfree(tcw->iv_seed);
612 tcw->iv_seed = NULL;
613 kzfree(tcw->whitening);
614 tcw->whitening = NULL;
615
616 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
617 crypto_free_shash(tcw->crc32_tfm);
618 tcw->crc32_tfm = NULL;
619}
620
621static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
622 const char *opts)
623{
624 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
625
626 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
627 ti->error = "Wrong key size for TCW";
628 return -EINVAL;
629 }
630
631 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
632 if (IS_ERR(tcw->crc32_tfm)) {
633 ti->error = "Error initializing CRC32 in TCW";
634 return PTR_ERR(tcw->crc32_tfm);
635 }
636
637 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
638 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
639 if (!tcw->iv_seed || !tcw->whitening) {
640 crypt_iv_tcw_dtr(cc);
641 ti->error = "Error allocating seed storage in TCW";
642 return -ENOMEM;
643 }
644
645 return 0;
646}
647
648static int crypt_iv_tcw_init(struct crypt_config *cc)
649{
650 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
651 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
652
653 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
654 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
655 TCW_WHITENING_SIZE);
656
657 return 0;
658}
659
660static int crypt_iv_tcw_wipe(struct crypt_config *cc)
661{
662 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
663
664 memset(tcw->iv_seed, 0, cc->iv_size);
665 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
666
667 return 0;
668}
669
670static int crypt_iv_tcw_whitening(struct crypt_config *cc,
671 struct dm_crypt_request *dmreq,
672 u8 *data)
673{
674 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
675 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
676 u8 buf[TCW_WHITENING_SIZE];
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200677 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
Milan Brozed04d982013-10-28 23:21:04 +0100678 int i, r;
679
680 /* xor whitening with sector number */
681 memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
682 crypto_xor(buf, (u8 *)&sector, 8);
683 crypto_xor(&buf[8], (u8 *)&sector, 8);
684
685 /* calculate crc32 for every 32bit part and xor it */
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200686 desc->tfm = tcw->crc32_tfm;
687 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
Milan Brozed04d982013-10-28 23:21:04 +0100688 for (i = 0; i < 4; i++) {
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200689 r = crypto_shash_init(desc);
Milan Brozed04d982013-10-28 23:21:04 +0100690 if (r)
691 goto out;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200692 r = crypto_shash_update(desc, &buf[i * 4], 4);
Milan Brozed04d982013-10-28 23:21:04 +0100693 if (r)
694 goto out;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200695 r = crypto_shash_final(desc, &buf[i * 4]);
Milan Brozed04d982013-10-28 23:21:04 +0100696 if (r)
697 goto out;
698 }
699 crypto_xor(&buf[0], &buf[12], 4);
700 crypto_xor(&buf[4], &buf[8], 4);
701
702 /* apply whitening (8 bytes) to whole sector */
703 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
704 crypto_xor(data + i * 8, buf, 8);
705out:
Milan Broz1a71d6f2014-11-22 09:36:04 +0100706 memzero_explicit(buf, sizeof(buf));
Milan Brozed04d982013-10-28 23:21:04 +0100707 return r;
708}
709
710static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
711 struct dm_crypt_request *dmreq)
712{
713 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
714 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
715 u8 *src;
716 int r = 0;
717
718 /* Remove whitening from ciphertext */
719 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
720 src = kmap_atomic(sg_page(&dmreq->sg_in));
721 r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset);
722 kunmap_atomic(src);
723 }
724
725 /* Calculate IV */
726 memcpy(iv, tcw->iv_seed, cc->iv_size);
727 crypto_xor(iv, (u8 *)&sector, 8);
728 if (cc->iv_size > 8)
729 crypto_xor(&iv[8], (u8 *)&sector, cc->iv_size - 8);
730
731 return r;
732}
733
734static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
735 struct dm_crypt_request *dmreq)
736{
737 u8 *dst;
738 int r;
739
740 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
741 return 0;
742
743 /* Apply whitening on ciphertext */
744 dst = kmap_atomic(sg_page(&dmreq->sg_out));
745 r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset);
746 kunmap_atomic(dst);
747
748 return r;
749}
750
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751static struct crypt_iv_operations crypt_iv_plain_ops = {
752 .generator = crypt_iv_plain_gen
753};
754
Milan Broz61afef62009-12-10 23:52:25 +0000755static struct crypt_iv_operations crypt_iv_plain64_ops = {
756 .generator = crypt_iv_plain64_gen
757};
758
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759static struct crypt_iv_operations crypt_iv_essiv_ops = {
760 .ctr = crypt_iv_essiv_ctr,
761 .dtr = crypt_iv_essiv_dtr,
Milan Brozb95bf2d2009-12-10 23:51:56 +0000762 .init = crypt_iv_essiv_init,
Milan Broz542da312009-12-10 23:51:57 +0000763 .wipe = crypt_iv_essiv_wipe,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 .generator = crypt_iv_essiv_gen
765};
766
Rik Snel48527fa2006-09-03 08:56:39 +1000767static struct crypt_iv_operations crypt_iv_benbi_ops = {
768 .ctr = crypt_iv_benbi_ctr,
769 .dtr = crypt_iv_benbi_dtr,
770 .generator = crypt_iv_benbi_gen
771};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772
Ludwig Nussel46b47732007-05-09 02:32:55 -0700773static struct crypt_iv_operations crypt_iv_null_ops = {
774 .generator = crypt_iv_null_gen
775};
776
Milan Broz34745782011-01-13 19:59:55 +0000777static struct crypt_iv_operations crypt_iv_lmk_ops = {
778 .ctr = crypt_iv_lmk_ctr,
779 .dtr = crypt_iv_lmk_dtr,
780 .init = crypt_iv_lmk_init,
781 .wipe = crypt_iv_lmk_wipe,
782 .generator = crypt_iv_lmk_gen,
783 .post = crypt_iv_lmk_post
784};
785
Milan Brozed04d982013-10-28 23:21:04 +0100786static struct crypt_iv_operations crypt_iv_tcw_ops = {
787 .ctr = crypt_iv_tcw_ctr,
788 .dtr = crypt_iv_tcw_dtr,
789 .init = crypt_iv_tcw_init,
790 .wipe = crypt_iv_tcw_wipe,
791 .generator = crypt_iv_tcw_gen,
792 .post = crypt_iv_tcw_post
793};
794
Milan Brozd469f842007-10-19 22:42:37 +0100795static void crypt_convert_init(struct crypt_config *cc,
796 struct convert_context *ctx,
797 struct bio *bio_out, struct bio *bio_in,
Milan Brozfcd369d2008-02-08 02:10:41 +0000798 sector_t sector)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799{
800 ctx->bio_in = bio_in;
801 ctx->bio_out = bio_out;
Kent Overstreet003b5c52013-10-11 15:45:43 -0700802 if (bio_in)
803 ctx->iter_in = bio_in->bi_iter;
804 if (bio_out)
805 ctx->iter_out = bio_out->bi_iter;
Mikulas Patockac66029f2012-07-27 15:08:05 +0100806 ctx->cc_sector = sector + cc->iv_offset;
Milan Broz43d69032008-02-08 02:11:09 +0000807 init_completion(&ctx->restart);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808}
809
Huang Yingb2174ee2009-03-16 17:44:33 +0000810static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
811 struct ablkcipher_request *req)
812{
813 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
814}
815
816static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
817 struct dm_crypt_request *dmreq)
818{
819 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
820}
821
Milan Broz2dc53272011-01-13 19:59:54 +0000822static u8 *iv_of_dmreq(struct crypt_config *cc,
823 struct dm_crypt_request *dmreq)
824{
825 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
826 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
827}
828
Milan Broz01482b72008-02-08 02:11:04 +0000829static int crypt_convert_block(struct crypt_config *cc,
Milan Broz3a7f6c92008-02-08 02:11:14 +0000830 struct convert_context *ctx,
831 struct ablkcipher_request *req)
Milan Broz01482b72008-02-08 02:11:04 +0000832{
Kent Overstreet003b5c52013-10-11 15:45:43 -0700833 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
834 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000835 struct dm_crypt_request *dmreq;
836 u8 *iv;
Mikulas Patocka40b62292012-07-27 15:08:04 +0100837 int r;
Milan Broz01482b72008-02-08 02:11:04 +0000838
Huang Yingb2174ee2009-03-16 17:44:33 +0000839 dmreq = dmreq_of_req(cc, req);
Milan Broz2dc53272011-01-13 19:59:54 +0000840 iv = iv_of_dmreq(cc, dmreq);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000841
Mikulas Patockac66029f2012-07-27 15:08:05 +0100842 dmreq->iv_sector = ctx->cc_sector;
Huang Yingb2174ee2009-03-16 17:44:33 +0000843 dmreq->ctx = ctx;
Milan Broz3a7f6c92008-02-08 02:11:14 +0000844 sg_init_table(&dmreq->sg_in, 1);
Kent Overstreet003b5c52013-10-11 15:45:43 -0700845 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
846 bv_in.bv_offset);
Milan Broz01482b72008-02-08 02:11:04 +0000847
Milan Broz3a7f6c92008-02-08 02:11:14 +0000848 sg_init_table(&dmreq->sg_out, 1);
Kent Overstreet003b5c52013-10-11 15:45:43 -0700849 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
850 bv_out.bv_offset);
Milan Broz01482b72008-02-08 02:11:04 +0000851
Kent Overstreet003b5c52013-10-11 15:45:43 -0700852 bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
853 bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
Milan Broz01482b72008-02-08 02:11:04 +0000854
Milan Broz3a7f6c92008-02-08 02:11:14 +0000855 if (cc->iv_gen_ops) {
Milan Broz2dc53272011-01-13 19:59:54 +0000856 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000857 if (r < 0)
858 return r;
859 }
860
861 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
862 1 << SECTOR_SHIFT, iv);
863
864 if (bio_data_dir(ctx->bio_in) == WRITE)
865 r = crypto_ablkcipher_encrypt(req);
866 else
867 r = crypto_ablkcipher_decrypt(req);
868
Milan Broz2dc53272011-01-13 19:59:54 +0000869 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
870 r = cc->iv_gen_ops->post(cc, iv, dmreq);
871
Milan Broz3a7f6c92008-02-08 02:11:14 +0000872 return r;
Milan Broz01482b72008-02-08 02:11:04 +0000873}
874
Milan Broz95497a92008-02-08 02:11:12 +0000875static void kcryptd_async_done(struct crypto_async_request *async_req,
876 int error);
Andi Kleenc0297722011-01-13 19:59:53 +0000877
Milan Brozddd42ed2008-02-08 02:11:07 +0000878static void crypt_alloc_req(struct crypt_config *cc,
879 struct convert_context *ctx)
880{
Mikulas Patockac66029f2012-07-27 15:08:05 +0100881 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
Andi Kleenc0297722011-01-13 19:59:53 +0000882
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500883 if (!ctx->req)
884 ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
Andi Kleenc0297722011-01-13 19:59:53 +0000885
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500886 ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
887 ablkcipher_request_set_callback(ctx->req,
Andi Kleenc0297722011-01-13 19:59:53 +0000888 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500889 kcryptd_async_done, dmreq_of_req(cc, ctx->req));
Milan Brozddd42ed2008-02-08 02:11:07 +0000890}
891
Mikulas Patocka298a9fa2014-03-28 15:51:55 -0400892static void crypt_free_req(struct crypt_config *cc,
893 struct ablkcipher_request *req, struct bio *base_bio)
894{
895 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
896
897 if ((struct ablkcipher_request *)(io + 1) != req)
898 mempool_free(req, cc->req_pool);
899}
900
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901/*
902 * Encrypt / decrypt data from one bio to another one (can be the same one)
903 */
904static int crypt_convert(struct crypt_config *cc,
Milan Brozd469f842007-10-19 22:42:37 +0100905 struct convert_context *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906{
Milan Broz3f1e9072008-03-28 14:16:07 -0700907 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
Mikulas Patocka40b62292012-07-27 15:08:04 +0100909 atomic_set(&ctx->cc_pending, 1);
Milan Brozc8081612008-10-10 13:37:08 +0100910
Kent Overstreet003b5c52013-10-11 15:45:43 -0700911 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912
Milan Broz3a7f6c92008-02-08 02:11:14 +0000913 crypt_alloc_req(cc, ctx);
914
Mikulas Patocka40b62292012-07-27 15:08:04 +0100915 atomic_inc(&ctx->cc_pending);
Milan Broz3f1e9072008-03-28 14:16:07 -0700916
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500917 r = crypt_convert_block(cc, ctx, ctx->req);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000918
919 switch (r) {
Milan Broz3f1e9072008-03-28 14:16:07 -0700920 /* async */
Milan Broz3a7f6c92008-02-08 02:11:14 +0000921 case -EBUSY:
922 wait_for_completion(&ctx->restart);
Wolfram Sang16735d02013-11-14 14:32:02 -0800923 reinit_completion(&ctx->restart);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000924 /* fall through*/
925 case -EINPROGRESS:
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500926 ctx->req = NULL;
Mikulas Patockac66029f2012-07-27 15:08:05 +0100927 ctx->cc_sector++;
Milan Broz3a7f6c92008-02-08 02:11:14 +0000928 continue;
Milan Broz3a7f6c92008-02-08 02:11:14 +0000929
Milan Broz3f1e9072008-03-28 14:16:07 -0700930 /* sync */
931 case 0:
Mikulas Patocka40b62292012-07-27 15:08:04 +0100932 atomic_dec(&ctx->cc_pending);
Mikulas Patockac66029f2012-07-27 15:08:05 +0100933 ctx->cc_sector++;
Milan Brozc7f1b202008-07-02 09:34:28 +0100934 cond_resched();
Milan Broz3f1e9072008-03-28 14:16:07 -0700935 continue;
936
937 /* error */
938 default:
Mikulas Patocka40b62292012-07-27 15:08:04 +0100939 atomic_dec(&ctx->cc_pending);
Milan Broz3f1e9072008-03-28 14:16:07 -0700940 return r;
941 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 }
943
Milan Broz3f1e9072008-03-28 14:16:07 -0700944 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945}
946
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -0500947static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
948
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949/*
950 * Generate a new unfragmented bio with the given size
951 * This should never violate the device limitations
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 */
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -0500953static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +0100955 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -0700956 struct bio *clone;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
Al Virob4e3ca12005-10-21 03:22:34 -0400958 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
Milan Broz91e10622007-12-13 14:16:10 +0000959 unsigned i, len;
960 struct page *page;
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -0500961 struct bio_vec *bvec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962
Olaf Kirch2f9941b2007-05-09 02:32:53 -0700963 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
Milan Broz8b004452006-10-03 01:15:37 -0700964 if (!clone)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966
Olaf Kirch027581f2007-05-09 02:32:52 -0700967 clone_init(io, clone);
Milan Broz6a24c712006-10-03 01:15:40 -0700968
Olaf Kirchf97380b2007-05-09 02:32:54 -0700969 for (i = 0; i < nr_iovecs; i++) {
Milan Broz91e10622007-12-13 14:16:10 +0000970 page = mempool_alloc(cc->page_pool, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
Milan Broz91e10622007-12-13 14:16:10 +0000972 len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -0500974 bvec = &clone->bi_io_vec[clone->bi_vcnt++];
975 bvec->bv_page = page;
976 bvec->bv_len = len;
977 bvec->bv_offset = 0;
978
979 clone->bi_iter.bi_size += len;
Milan Broz91e10622007-12-13 14:16:10 +0000980
981 size -= len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 }
983
Milan Broz8b004452006-10-03 01:15:37 -0700984 return clone;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985}
986
Neil Brown644bd2f2007-10-16 13:48:46 +0200987static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988{
Neil Brown644bd2f2007-10-16 13:48:46 +0200989 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 struct bio_vec *bv;
991
Kent Overstreetcb34e052012-09-05 15:22:02 -0700992 bio_for_each_segment_all(bv, clone, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 BUG_ON(!bv->bv_page);
994 mempool_free(bv->bv_page, cc->page_pool);
995 bv->bv_page = NULL;
996 }
997}
998
Mikulas Patocka298a9fa2014-03-28 15:51:55 -0400999static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1000 struct bio *bio, sector_t sector)
Milan Brozdc440d1e2008-10-10 13:37:03 +01001001{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001002 io->cc = cc;
Milan Brozdc440d1e2008-10-10 13:37:03 +01001003 io->base_bio = bio;
1004 io->sector = sector;
1005 io->error = 0;
Mikulas Patocka610f2de2014-02-20 18:01:01 -05001006 io->ctx.req = NULL;
Mikulas Patocka40b62292012-07-27 15:08:04 +01001007 atomic_set(&io->io_pending, 0);
Milan Brozdc440d1e2008-10-10 13:37:03 +01001008}
1009
Milan Broz3e1a8bd2008-10-10 13:37:02 +01001010static void crypt_inc_pending(struct dm_crypt_io *io)
1011{
Mikulas Patocka40b62292012-07-27 15:08:04 +01001012 atomic_inc(&io->io_pending);
Milan Broz3e1a8bd2008-10-10 13:37:02 +01001013}
1014
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015/*
1016 * One of the bios was finished. Check for completion of
1017 * the whole request and correctly clean up the buffer.
1018 */
Milan Broz5742fd72008-02-08 02:10:43 +00001019static void crypt_dec_pending(struct dm_crypt_io *io)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001021 struct crypt_config *cc = io->cc;
Milan Brozb35f8ca2009-03-16 17:44:36 +00001022 struct bio *base_bio = io->base_bio;
Milan Brozb35f8ca2009-03-16 17:44:36 +00001023 int error = io->error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024
Mikulas Patocka40b62292012-07-27 15:08:04 +01001025 if (!atomic_dec_and_test(&io->io_pending))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 return;
1027
Mikulas Patocka610f2de2014-02-20 18:01:01 -05001028 if (io->ctx.req)
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001029 crypt_free_req(cc, io->ctx.req, base_bio);
1030 if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size))
1031 mempool_free(io, cc->io_pool);
Milan Brozb35f8ca2009-03-16 17:44:36 +00001032
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001033 bio_endio(base_bio, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034}
1035
1036/*
Milan Brozcabf08e2007-10-19 22:38:58 +01001037 * kcryptd/kcryptd_io:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 *
1039 * Needed because it would be very unwise to do decryption in an
Milan Broz23541d22006-10-03 01:15:39 -07001040 * interrupt context.
Milan Brozcabf08e2007-10-19 22:38:58 +01001041 *
1042 * kcryptd performs the actual encryption or decryption.
1043 *
1044 * kcryptd_io performs the IO submission.
1045 *
1046 * They must be separated as otherwise the final stages could be
1047 * starved by new requests which can block in the first stages due
1048 * to memory allocation.
Andi Kleenc0297722011-01-13 19:59:53 +00001049 *
1050 * The work is done per CPU global for all dm-crypt instances.
1051 * They should not depend on each other and do not block.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 */
NeilBrown6712ecf2007-09-27 12:47:43 +02001053static void crypt_endio(struct bio *clone, int error)
Milan Broz8b004452006-10-03 01:15:37 -07001054{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001055 struct dm_crypt_io *io = clone->bi_private;
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001056 struct crypt_config *cc = io->cc;
Milan Brozee7a4912008-02-08 02:10:46 +00001057 unsigned rw = bio_data_dir(clone);
Milan Broz8b004452006-10-03 01:15:37 -07001058
Milan Brozadfe4772007-12-13 14:15:51 +00001059 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
1060 error = -EIO;
1061
Milan Broz8b004452006-10-03 01:15:37 -07001062 /*
NeilBrown6712ecf2007-09-27 12:47:43 +02001063 * free the processed pages
Milan Broz8b004452006-10-03 01:15:37 -07001064 */
Milan Brozee7a4912008-02-08 02:10:46 +00001065 if (rw == WRITE)
Neil Brown644bd2f2007-10-16 13:48:46 +02001066 crypt_free_buffer_pages(cc, clone);
Milan Brozee7a4912008-02-08 02:10:46 +00001067
1068 bio_put(clone);
1069
1070 if (rw == READ && !error) {
1071 kcryptd_queue_crypt(io);
1072 return;
NeilBrown6712ecf2007-09-27 12:47:43 +02001073 }
Milan Broz8b004452006-10-03 01:15:37 -07001074
Milan Brozadfe4772007-12-13 14:15:51 +00001075 if (unlikely(error))
Milan Broz5742fd72008-02-08 02:10:43 +00001076 io->error = error;
1077
1078 crypt_dec_pending(io);
Milan Broz8b004452006-10-03 01:15:37 -07001079}
1080
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001081static void clone_init(struct dm_crypt_io *io, struct bio *clone)
Milan Broz8b004452006-10-03 01:15:37 -07001082{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001083 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -07001084
1085 clone->bi_private = io;
1086 clone->bi_end_io = crypt_endio;
1087 clone->bi_bdev = cc->dev->bdev;
1088 clone->bi_rw = io->base_bio->bi_rw;
1089}
1090
Milan Broz20c82532011-01-13 19:59:53 +00001091static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
Milan Broz8b004452006-10-03 01:15:37 -07001092{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001093 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -07001094 struct bio *base_bio = io->base_bio;
1095 struct bio *clone;
Milan Broz93e605c2006-10-03 01:15:38 -07001096
Milan Broz8b004452006-10-03 01:15:37 -07001097 /*
1098 * The block layer might modify the bvec array, so always
1099 * copy the required bvecs because we need the original
1100 * one in order to decrypt the whole bio data *afterwards*.
1101 */
Kent Overstreetbf800ef2012-09-06 15:35:02 -07001102 clone = bio_clone_bioset(base_bio, gfp, cc->bs);
Jens Axboe7eaceac2011-03-10 08:52:07 +01001103 if (!clone)
Milan Broz20c82532011-01-13 19:59:53 +00001104 return 1;
Milan Broz8b004452006-10-03 01:15:37 -07001105
Milan Broz20c82532011-01-13 19:59:53 +00001106 crypt_inc_pending(io);
1107
Milan Broz8b004452006-10-03 01:15:37 -07001108 clone_init(io, clone);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001109 clone->bi_iter.bi_sector = cc->start + io->sector;
Milan Broz8b004452006-10-03 01:15:37 -07001110
Milan Broz93e605c2006-10-03 01:15:38 -07001111 generic_make_request(clone);
Milan Broz20c82532011-01-13 19:59:53 +00001112 return 0;
Milan Broz8b004452006-10-03 01:15:37 -07001113}
1114
Milan Broz4e4eef62008-02-08 02:10:49 +00001115static void kcryptd_io_write(struct dm_crypt_io *io)
1116{
Milan Broz95497a92008-02-08 02:11:12 +00001117 struct bio *clone = io->ctx.bio_out;
Milan Broz95497a92008-02-08 02:11:12 +00001118 generic_make_request(clone);
Milan Broz4e4eef62008-02-08 02:10:49 +00001119}
1120
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001121static void kcryptd_io(struct work_struct *work)
1122{
1123 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1124
Milan Broz20c82532011-01-13 19:59:53 +00001125 if (bio_data_dir(io->base_bio) == READ) {
1126 crypt_inc_pending(io);
1127 if (kcryptd_io_read(io, GFP_NOIO))
1128 io->error = -ENOMEM;
1129 crypt_dec_pending(io);
1130 } else
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001131 kcryptd_io_write(io);
1132}
1133
1134static void kcryptd_queue_io(struct dm_crypt_io *io)
1135{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001136 struct crypt_config *cc = io->cc;
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001137
1138 INIT_WORK(&io->work, kcryptd_io);
1139 queue_work(cc->io_queue, &io->work);
1140}
1141
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001142static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
Milan Broz4e4eef62008-02-08 02:10:49 +00001143{
Milan Brozdec1ced2008-02-08 02:10:57 +00001144 struct bio *clone = io->ctx.bio_out;
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001145 struct crypt_config *cc = io->cc;
Milan Brozdec1ced2008-02-08 02:10:57 +00001146
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001147 if (unlikely(io->error < 0)) {
Milan Brozdec1ced2008-02-08 02:10:57 +00001148 crypt_free_buffer_pages(cc, clone);
1149 bio_put(clone);
Milan Broz6c031f42008-10-10 13:37:06 +01001150 crypt_dec_pending(io);
Milan Brozdec1ced2008-02-08 02:10:57 +00001151 return;
1152 }
1153
1154 /* crypt_convert should have filled the clone bio */
Kent Overstreet003b5c52013-10-11 15:45:43 -07001155 BUG_ON(io->ctx.iter_out.bi_size);
Milan Brozdec1ced2008-02-08 02:10:57 +00001156
Kent Overstreet4f024f32013-10-11 15:44:27 -07001157 clone->bi_iter.bi_sector = cc->start + io->sector;
Milan Broz899c95d2008-02-08 02:11:02 +00001158
Milan Broz95497a92008-02-08 02:11:12 +00001159 if (async)
1160 kcryptd_queue_io(io);
Alasdair G Kergon1e37bb82008-10-10 13:37:05 +01001161 else
Milan Broz95497a92008-02-08 02:11:12 +00001162 generic_make_request(clone);
Milan Broz4e4eef62008-02-08 02:10:49 +00001163}
1164
Milan Brozfc5a5e92008-10-10 13:37:04 +01001165static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
Milan Broz8b004452006-10-03 01:15:37 -07001166{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001167 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -07001168 struct bio *clone;
Milan Brozc8081612008-10-10 13:37:08 +01001169 int crypt_finished;
Milan Brozb635b002008-10-21 17:45:00 +01001170 sector_t sector = io->sector;
Milan Brozdec1ced2008-02-08 02:10:57 +00001171 int r;
Milan Broz8b004452006-10-03 01:15:37 -07001172
Milan Broz93e605c2006-10-03 01:15:38 -07001173 /*
Milan Brozfc5a5e92008-10-10 13:37:04 +01001174 * Prevent io from disappearing until this function completes.
1175 */
1176 crypt_inc_pending(io);
Milan Brozb635b002008-10-21 17:45:00 +01001177 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
Milan Brozfc5a5e92008-10-10 13:37:04 +01001178
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001179 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
1180 if (unlikely(!clone)) {
1181 io->error = -EIO;
1182 goto dec;
Milan Broz8b004452006-10-03 01:15:37 -07001183 }
Milan Broz899c95d2008-02-08 02:11:02 +00001184
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001185 io->ctx.bio_out = clone;
1186 io->ctx.iter_out = clone->bi_iter;
1187
1188 sector += bio_sectors(clone);
1189
1190 crypt_inc_pending(io);
1191 r = crypt_convert(cc, &io->ctx);
1192 if (r)
1193 io->error = -EIO;
1194 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
1195
1196 /* Encryption was already finished, submit io now */
1197 if (crypt_finished) {
1198 kcryptd_crypt_write_io_submit(io, 0);
1199 io->sector = sector;
1200 }
1201
1202dec:
Milan Broz899c95d2008-02-08 02:11:02 +00001203 crypt_dec_pending(io);
Milan Broz84131db2008-02-08 02:10:59 +00001204}
1205
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001206static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
Milan Broz5742fd72008-02-08 02:10:43 +00001207{
Milan Broz5742fd72008-02-08 02:10:43 +00001208 crypt_dec_pending(io);
1209}
1210
Milan Broz4e4eef62008-02-08 02:10:49 +00001211static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
Milan Broz8b004452006-10-03 01:15:37 -07001212{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001213 struct crypt_config *cc = io->cc;
Milan Broz5742fd72008-02-08 02:10:43 +00001214 int r = 0;
Milan Broz8b004452006-10-03 01:15:37 -07001215
Milan Broz3e1a8bd2008-10-10 13:37:02 +01001216 crypt_inc_pending(io);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001217
Milan Broz53017032008-02-08 02:10:38 +00001218 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
Milan Broz0c395b02008-02-08 02:10:54 +00001219 io->sector);
Milan Broz8b004452006-10-03 01:15:37 -07001220
Milan Broz5742fd72008-02-08 02:10:43 +00001221 r = crypt_convert(cc, &io->ctx);
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001222 if (r < 0)
1223 io->error = -EIO;
Milan Broz5742fd72008-02-08 02:10:43 +00001224
Mikulas Patocka40b62292012-07-27 15:08:04 +01001225 if (atomic_dec_and_test(&io->ctx.cc_pending))
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001226 kcryptd_crypt_read_done(io);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001227
1228 crypt_dec_pending(io);
Milan Broz8b004452006-10-03 01:15:37 -07001229}
1230
Milan Broz95497a92008-02-08 02:11:12 +00001231static void kcryptd_async_done(struct crypto_async_request *async_req,
1232 int error)
1233{
Huang Yingb2174ee2009-03-16 17:44:33 +00001234 struct dm_crypt_request *dmreq = async_req->data;
1235 struct convert_context *ctx = dmreq->ctx;
Milan Broz95497a92008-02-08 02:11:12 +00001236 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001237 struct crypt_config *cc = io->cc;
Milan Broz95497a92008-02-08 02:11:12 +00001238
1239 if (error == -EINPROGRESS) {
1240 complete(&ctx->restart);
1241 return;
1242 }
1243
Milan Broz2dc53272011-01-13 19:59:54 +00001244 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1245 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1246
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001247 if (error < 0)
1248 io->error = -EIO;
1249
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001250 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
Milan Broz95497a92008-02-08 02:11:12 +00001251
Mikulas Patocka40b62292012-07-27 15:08:04 +01001252 if (!atomic_dec_and_test(&ctx->cc_pending))
Milan Broz95497a92008-02-08 02:11:12 +00001253 return;
1254
1255 if (bio_data_dir(io->base_bio) == READ)
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001256 kcryptd_crypt_read_done(io);
Milan Broz95497a92008-02-08 02:11:12 +00001257 else
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001258 kcryptd_crypt_write_io_submit(io, 1);
Milan Broz95497a92008-02-08 02:11:12 +00001259}
1260
Milan Broz4e4eef62008-02-08 02:10:49 +00001261static void kcryptd_crypt(struct work_struct *work)
1262{
1263 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1264
1265 if (bio_data_dir(io->base_bio) == READ)
1266 kcryptd_crypt_read_convert(io);
1267 else
1268 kcryptd_crypt_write_convert(io);
Milan Broz8b004452006-10-03 01:15:37 -07001269}
1270
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001271static void kcryptd_queue_crypt(struct dm_crypt_io *io)
1272{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001273 struct crypt_config *cc = io->cc;
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001274
1275 INIT_WORK(&io->work, kcryptd_crypt);
1276 queue_work(cc->crypt_queue, &io->work);
1277}
1278
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279/*
1280 * Decode key from its hex representation
1281 */
1282static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
1283{
1284 char buffer[3];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 unsigned int i;
1286
1287 buffer[2] = '\0';
1288
Milan Broz8b004452006-10-03 01:15:37 -07001289 for (i = 0; i < size; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 buffer[0] = *hex++;
1291 buffer[1] = *hex++;
1292
majianpeng1a66a082012-07-27 15:07:59 +01001293 if (kstrtou8(buffer, 16, &key[i]))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 return -EINVAL;
1295 }
1296
1297 if (*hex != '\0')
1298 return -EINVAL;
1299
1300 return 0;
1301}
1302
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001303static void crypt_free_tfms(struct crypt_config *cc)
Milan Brozd1f96422011-01-13 19:59:54 +00001304{
Milan Brozd1f96422011-01-13 19:59:54 +00001305 unsigned i;
1306
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001307 if (!cc->tfms)
1308 return;
1309
Milan Brozd1f96422011-01-13 19:59:54 +00001310 for (i = 0; i < cc->tfms_count; i++)
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001311 if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
1312 crypto_free_ablkcipher(cc->tfms[i]);
1313 cc->tfms[i] = NULL;
Milan Brozd1f96422011-01-13 19:59:54 +00001314 }
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001315
1316 kfree(cc->tfms);
1317 cc->tfms = NULL;
Milan Brozd1f96422011-01-13 19:59:54 +00001318}
1319
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001320static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
Milan Brozd1f96422011-01-13 19:59:54 +00001321{
Milan Brozd1f96422011-01-13 19:59:54 +00001322 unsigned i;
1323 int err;
1324
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001325 cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
1326 GFP_KERNEL);
1327 if (!cc->tfms)
1328 return -ENOMEM;
1329
Milan Brozd1f96422011-01-13 19:59:54 +00001330 for (i = 0; i < cc->tfms_count; i++) {
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001331 cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
1332 if (IS_ERR(cc->tfms[i])) {
1333 err = PTR_ERR(cc->tfms[i]);
1334 crypt_free_tfms(cc);
Milan Brozd1f96422011-01-13 19:59:54 +00001335 return err;
1336 }
1337 }
1338
1339 return 0;
1340}
1341
Andi Kleenc0297722011-01-13 19:59:53 +00001342static int crypt_setkey_allcpus(struct crypt_config *cc)
1343{
Milan Brozda31a072013-10-28 23:21:03 +01001344 unsigned subkey_size;
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001345 int err = 0, i, r;
Andi Kleenc0297722011-01-13 19:59:53 +00001346
Milan Brozda31a072013-10-28 23:21:03 +01001347 /* Ignore extra keys (which are used for IV etc) */
1348 subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
1349
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001350 for (i = 0; i < cc->tfms_count; i++) {
1351 r = crypto_ablkcipher_setkey(cc->tfms[i],
1352 cc->key + (i * subkey_size),
1353 subkey_size);
1354 if (r)
1355 err = r;
Andi Kleenc0297722011-01-13 19:59:53 +00001356 }
1357
1358 return err;
1359}
1360
Milan Broze48d4bb2006-10-03 01:15:37 -07001361static int crypt_set_key(struct crypt_config *cc, char *key)
1362{
Milan Brozde8be5a2011-03-24 13:54:27 +00001363 int r = -EINVAL;
1364 int key_string_len = strlen(key);
1365
Milan Broz69a8cfc2011-01-13 19:59:49 +00001366 /* The key size may not be changed. */
Milan Brozde8be5a2011-03-24 13:54:27 +00001367 if (cc->key_size != (key_string_len >> 1))
1368 goto out;
Milan Broze48d4bb2006-10-03 01:15:37 -07001369
Milan Broz69a8cfc2011-01-13 19:59:49 +00001370 /* Hyphen (which gives a key_size of zero) means there is no key. */
1371 if (!cc->key_size && strcmp(key, "-"))
Milan Brozde8be5a2011-03-24 13:54:27 +00001372 goto out;
Milan Broze48d4bb2006-10-03 01:15:37 -07001373
Milan Broz69a8cfc2011-01-13 19:59:49 +00001374 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
Milan Brozde8be5a2011-03-24 13:54:27 +00001375 goto out;
Milan Broze48d4bb2006-10-03 01:15:37 -07001376
1377 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1378
Milan Brozde8be5a2011-03-24 13:54:27 +00001379 r = crypt_setkey_allcpus(cc);
1380
1381out:
1382 /* Hex key string not needed after here, so wipe it. */
1383 memset(key, '0', key_string_len);
1384
1385 return r;
Milan Broze48d4bb2006-10-03 01:15:37 -07001386}
1387
1388static int crypt_wipe_key(struct crypt_config *cc)
1389{
1390 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1391 memset(&cc->key, 0, cc->key_size * sizeof(u8));
Andi Kleenc0297722011-01-13 19:59:53 +00001392
1393 return crypt_setkey_allcpus(cc);
Milan Broze48d4bb2006-10-03 01:15:37 -07001394}
1395
Milan Broz28513fc2010-08-12 04:14:06 +01001396static void crypt_dtr(struct dm_target *ti)
1397{
1398 struct crypt_config *cc = ti->private;
1399
1400 ti->private = NULL;
1401
1402 if (!cc)
1403 return;
1404
1405 if (cc->io_queue)
1406 destroy_workqueue(cc->io_queue);
1407 if (cc->crypt_queue)
1408 destroy_workqueue(cc->crypt_queue);
1409
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001410 crypt_free_tfms(cc);
1411
Milan Broz28513fc2010-08-12 04:14:06 +01001412 if (cc->bs)
1413 bioset_free(cc->bs);
1414
1415 if (cc->page_pool)
1416 mempool_destroy(cc->page_pool);
1417 if (cc->req_pool)
1418 mempool_destroy(cc->req_pool);
1419 if (cc->io_pool)
1420 mempool_destroy(cc->io_pool);
1421
1422 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1423 cc->iv_gen_ops->dtr(cc);
1424
Milan Broz28513fc2010-08-12 04:14:06 +01001425 if (cc->dev)
1426 dm_put_device(ti, cc->dev);
1427
Milan Broz5ebaee62010-08-12 04:14:07 +01001428 kzfree(cc->cipher);
Milan Broz7dbcd132011-01-13 19:59:52 +00001429 kzfree(cc->cipher_string);
Milan Broz28513fc2010-08-12 04:14:06 +01001430
1431 /* Must zero key material before freeing */
1432 kzfree(cc);
1433}
1434
Milan Broz5ebaee62010-08-12 04:14:07 +01001435static int crypt_ctr_cipher(struct dm_target *ti,
1436 char *cipher_in, char *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437{
Milan Broz5ebaee62010-08-12 04:14:07 +01001438 struct crypt_config *cc = ti->private;
Milan Brozd1f96422011-01-13 19:59:54 +00001439 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
Milan Broz5ebaee62010-08-12 04:14:07 +01001440 char *cipher_api = NULL;
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001441 int ret = -EINVAL;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001442 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443
Milan Broz5ebaee62010-08-12 04:14:07 +01001444 /* Convert to crypto api definition? */
1445 if (strchr(cipher_in, '(')) {
1446 ti->error = "Bad cipher specification";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 return -EINVAL;
1448 }
1449
Milan Broz7dbcd132011-01-13 19:59:52 +00001450 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
1451 if (!cc->cipher_string)
1452 goto bad_mem;
1453
Milan Broz5ebaee62010-08-12 04:14:07 +01001454 /*
1455 * Legacy dm-crypt cipher specification
Milan Brozd1f96422011-01-13 19:59:54 +00001456 * cipher[:keycount]-mode-iv:ivopts
Milan Broz5ebaee62010-08-12 04:14:07 +01001457 */
1458 tmp = cipher_in;
Milan Brozd1f96422011-01-13 19:59:54 +00001459 keycount = strsep(&tmp, "-");
1460 cipher = strsep(&keycount, ":");
1461
1462 if (!keycount)
1463 cc->tfms_count = 1;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001464 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
Milan Brozd1f96422011-01-13 19:59:54 +00001465 !is_power_of_2(cc->tfms_count)) {
1466 ti->error = "Bad cipher key count specification";
1467 return -EINVAL;
1468 }
1469 cc->key_parts = cc->tfms_count;
Milan Brozda31a072013-10-28 23:21:03 +01001470 cc->key_extra_size = 0;
Milan Broz5ebaee62010-08-12 04:14:07 +01001471
1472 cc->cipher = kstrdup(cipher, GFP_KERNEL);
1473 if (!cc->cipher)
1474 goto bad_mem;
1475
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 chainmode = strsep(&tmp, "-");
1477 ivopts = strsep(&tmp, "-");
1478 ivmode = strsep(&ivopts, ":");
1479
1480 if (tmp)
Milan Broz5ebaee62010-08-12 04:14:07 +01001481 DMWARN("Ignoring unexpected additional cipher options");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482
Milan Broz7dbcd132011-01-13 19:59:52 +00001483 /*
1484 * For compatibility with the original dm-crypt mapping format, if
1485 * only the cipher name is supplied, use cbc-plain.
1486 */
Milan Broz5ebaee62010-08-12 04:14:07 +01001487 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 chainmode = "cbc";
1489 ivmode = "plain";
1490 }
1491
Herbert Xud1806f62006-08-22 20:29:17 +10001492 if (strcmp(chainmode, "ecb") && !ivmode) {
Milan Broz5ebaee62010-08-12 04:14:07 +01001493 ti->error = "IV mechanism required";
1494 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 }
1496
Milan Broz5ebaee62010-08-12 04:14:07 +01001497 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
1498 if (!cipher_api)
1499 goto bad_mem;
1500
1501 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
1502 "%s(%s)", chainmode, cipher);
1503 if (ret < 0) {
1504 kfree(cipher_api);
1505 goto bad_mem;
Herbert Xud1806f62006-08-22 20:29:17 +10001506 }
1507
Milan Broz5ebaee62010-08-12 04:14:07 +01001508 /* Allocate cipher */
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001509 ret = crypt_alloc_tfms(cc, cipher_api);
1510 if (ret < 0) {
1511 ti->error = "Error allocating crypto tfm";
1512 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514
Milan Broz5ebaee62010-08-12 04:14:07 +01001515 /* Initialize IV */
Andi Kleenc0297722011-01-13 19:59:53 +00001516 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
Milan Broz5ebaee62010-08-12 04:14:07 +01001517 if (cc->iv_size)
1518 /* at least a 64 bit sector number should fit in our buffer */
1519 cc->iv_size = max(cc->iv_size,
1520 (unsigned int)(sizeof(u64) / sizeof(u8)));
1521 else if (ivmode) {
1522 DMWARN("Selected cipher does not support IVs");
1523 ivmode = NULL;
1524 }
1525
1526 /* Choose ivmode, see comments at iv code. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 if (ivmode == NULL)
1528 cc->iv_gen_ops = NULL;
1529 else if (strcmp(ivmode, "plain") == 0)
1530 cc->iv_gen_ops = &crypt_iv_plain_ops;
Milan Broz61afef62009-12-10 23:52:25 +00001531 else if (strcmp(ivmode, "plain64") == 0)
1532 cc->iv_gen_ops = &crypt_iv_plain64_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 else if (strcmp(ivmode, "essiv") == 0)
1534 cc->iv_gen_ops = &crypt_iv_essiv_ops;
Rik Snel48527fa2006-09-03 08:56:39 +10001535 else if (strcmp(ivmode, "benbi") == 0)
1536 cc->iv_gen_ops = &crypt_iv_benbi_ops;
Ludwig Nussel46b47732007-05-09 02:32:55 -07001537 else if (strcmp(ivmode, "null") == 0)
1538 cc->iv_gen_ops = &crypt_iv_null_ops;
Milan Broz34745782011-01-13 19:59:55 +00001539 else if (strcmp(ivmode, "lmk") == 0) {
1540 cc->iv_gen_ops = &crypt_iv_lmk_ops;
Milan Brozed04d982013-10-28 23:21:04 +01001541 /*
1542 * Version 2 and 3 is recognised according
Milan Broz34745782011-01-13 19:59:55 +00001543 * to length of provided multi-key string.
1544 * If present (version 3), last key is used as IV seed.
Milan Brozed04d982013-10-28 23:21:04 +01001545 * All keys (including IV seed) are always the same size.
Milan Broz34745782011-01-13 19:59:55 +00001546 */
Milan Brozda31a072013-10-28 23:21:03 +01001547 if (cc->key_size % cc->key_parts) {
Milan Broz34745782011-01-13 19:59:55 +00001548 cc->key_parts++;
Milan Brozda31a072013-10-28 23:21:03 +01001549 cc->key_extra_size = cc->key_size / cc->key_parts;
1550 }
Milan Brozed04d982013-10-28 23:21:04 +01001551 } else if (strcmp(ivmode, "tcw") == 0) {
1552 cc->iv_gen_ops = &crypt_iv_tcw_ops;
1553 cc->key_parts += 2; /* IV + whitening */
1554 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
Milan Broz34745782011-01-13 19:59:55 +00001555 } else {
Milan Broz5ebaee62010-08-12 04:14:07 +01001556 ret = -EINVAL;
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001557 ti->error = "Invalid IV mode";
Milan Broz28513fc2010-08-12 04:14:06 +01001558 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 }
1560
Milan Brozda31a072013-10-28 23:21:03 +01001561 /* Initialize and set key */
1562 ret = crypt_set_key(cc, key);
1563 if (ret < 0) {
1564 ti->error = "Error decoding and setting key";
1565 goto bad;
1566 }
1567
Milan Broz28513fc2010-08-12 04:14:06 +01001568 /* Allocate IV */
1569 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
1570 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
1571 if (ret < 0) {
1572 ti->error = "Error creating IV";
1573 goto bad;
1574 }
Milan Brozb95bf2d2009-12-10 23:51:56 +00001575 }
1576
Milan Broz28513fc2010-08-12 04:14:06 +01001577 /* Initialize IV (set keys for ESSIV etc) */
1578 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
1579 ret = cc->iv_gen_ops->init(cc);
1580 if (ret < 0) {
1581 ti->error = "Error initialising IV";
1582 goto bad;
1583 }
1584 }
1585
Milan Broz5ebaee62010-08-12 04:14:07 +01001586 ret = 0;
1587bad:
1588 kfree(cipher_api);
1589 return ret;
1590
1591bad_mem:
1592 ti->error = "Cannot allocate cipher strings";
1593 return -ENOMEM;
1594}
1595
1596/*
1597 * Construct an encryption mapping:
1598 * <cipher> <key> <iv_offset> <dev_path> <start>
1599 */
1600static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1601{
1602 struct crypt_config *cc;
Milan Broz772ae5f2011-08-02 12:32:08 +01001603 unsigned int key_size, opt_params;
Milan Broz5ebaee62010-08-12 04:14:07 +01001604 unsigned long long tmpll;
1605 int ret;
Mikulas Patockad49ec522014-08-28 11:09:31 -04001606 size_t iv_size_padding;
Milan Broz772ae5f2011-08-02 12:32:08 +01001607 struct dm_arg_set as;
1608 const char *opt_string;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001609 char dummy;
Milan Broz5ebaee62010-08-12 04:14:07 +01001610
Milan Broz772ae5f2011-08-02 12:32:08 +01001611 static struct dm_arg _args[] = {
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001612 {0, 2, "Invalid number of feature args"},
Milan Broz772ae5f2011-08-02 12:32:08 +01001613 };
1614
1615 if (argc < 5) {
Milan Broz5ebaee62010-08-12 04:14:07 +01001616 ti->error = "Not enough arguments";
1617 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 }
1619
Milan Broz5ebaee62010-08-12 04:14:07 +01001620 key_size = strlen(argv[1]) >> 1;
1621
1622 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1623 if (!cc) {
1624 ti->error = "Cannot allocate encryption context";
1625 return -ENOMEM;
1626 }
Milan Broz69a8cfc2011-01-13 19:59:49 +00001627 cc->key_size = key_size;
Milan Broz5ebaee62010-08-12 04:14:07 +01001628
1629 ti->private = cc;
1630 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
1631 if (ret < 0)
1632 goto bad;
1633
Milan Broz28513fc2010-08-12 04:14:06 +01001634 ret = -ENOMEM;
Matthew Dobson93d23412006-03-26 01:37:50 -08001635 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 if (!cc->io_pool) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001637 ti->error = "Cannot allocate crypt io mempool";
Milan Broz28513fc2010-08-12 04:14:06 +01001638 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 }
1640
Milan Brozddd42ed2008-02-08 02:11:07 +00001641 cc->dmreq_start = sizeof(struct ablkcipher_request);
Andi Kleenc0297722011-01-13 19:59:53 +00001642 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
Mikulas Patockad49ec522014-08-28 11:09:31 -04001643 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
1644
1645 if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
1646 /* Allocate the padding exactly */
1647 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
1648 & crypto_ablkcipher_alignmask(any_tfm(cc));
1649 } else {
1650 /*
1651 * If the cipher requires greater alignment than kmalloc
1652 * alignment, we don't know the exact position of the
1653 * initialization vector. We must assume worst case.
1654 */
1655 iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
1656 }
Milan Brozddd42ed2008-02-08 02:11:07 +00001657
1658 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
Mikulas Patockad49ec522014-08-28 11:09:31 -04001659 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
Milan Brozddd42ed2008-02-08 02:11:07 +00001660 if (!cc->req_pool) {
1661 ti->error = "Cannot allocate crypt request mempool";
Milan Broz28513fc2010-08-12 04:14:06 +01001662 goto bad;
Milan Brozddd42ed2008-02-08 02:11:07 +00001663 }
Milan Brozddd42ed2008-02-08 02:11:07 +00001664
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001665 cc->per_bio_data_size = ti->per_bio_data_size =
Mikulas Patockad49ec522014-08-28 11:09:31 -04001666 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start +
1667 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
1668 ARCH_KMALLOC_MINALIGN);
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001669
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001670 cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 if (!cc->page_pool) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001672 ti->error = "Cannot allocate page mempool";
Milan Broz28513fc2010-08-12 04:14:06 +01001673 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 }
1675
Jens Axboebb799ca2008-12-10 15:35:05 +01001676 cc->bs = bioset_create(MIN_IOS, 0);
Milan Broz6a24c712006-10-03 01:15:40 -07001677 if (!cc->bs) {
1678 ti->error = "Cannot allocate crypt bioset";
Milan Broz28513fc2010-08-12 04:14:06 +01001679 goto bad;
Milan Broz6a24c712006-10-03 01:15:40 -07001680 }
1681
Milan Broz28513fc2010-08-12 04:14:06 +01001682 ret = -EINVAL;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001683 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001684 ti->error = "Invalid iv_offset sector";
Milan Broz28513fc2010-08-12 04:14:06 +01001685 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 }
Andrew Morton4ee218c2006-03-27 01:17:48 -08001687 cc->iv_offset = tmpll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688
Milan Broz28513fc2010-08-12 04:14:06 +01001689 if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
1690 ti->error = "Device lookup failed";
1691 goto bad;
1692 }
1693
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001694 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001695 ti->error = "Invalid device sector";
Milan Broz28513fc2010-08-12 04:14:06 +01001696 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 }
Andrew Morton4ee218c2006-03-27 01:17:48 -08001698 cc->start = tmpll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699
Milan Broz772ae5f2011-08-02 12:32:08 +01001700 argv += 5;
1701 argc -= 5;
1702
1703 /* Optional parameters */
1704 if (argc) {
1705 as.argc = argc;
1706 as.argv = argv;
1707
1708 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
1709 if (ret)
1710 goto bad;
1711
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001712 while (opt_params--) {
1713 opt_string = dm_shift_arg(&as);
1714 if (!opt_string) {
1715 ti->error = "Not enough feature arguments";
1716 goto bad;
1717 }
Milan Broz772ae5f2011-08-02 12:32:08 +01001718
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001719 if (!strcasecmp(opt_string, "allow_discards"))
1720 ti->num_discard_bios = 1;
1721
1722 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
1723 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
1724
1725 else {
1726 ti->error = "Invalid feature arguments";
1727 goto bad;
1728 }
Milan Broz772ae5f2011-08-02 12:32:08 +01001729 }
1730 }
1731
Milan Broz28513fc2010-08-12 04:14:06 +01001732 ret = -ENOMEM;
Tejun Heo670368a2013-07-30 08:40:21 -04001733 cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
Milan Brozcabf08e2007-10-19 22:38:58 +01001734 if (!cc->io_queue) {
1735 ti->error = "Couldn't create kcryptd io queue";
Milan Broz28513fc2010-08-12 04:14:06 +01001736 goto bad;
Milan Brozcabf08e2007-10-19 22:38:58 +01001737 }
1738
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001739 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1740 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
1741 else
1742 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
1743 num_online_cpus());
Milan Brozcabf08e2007-10-19 22:38:58 +01001744 if (!cc->crypt_queue) {
Milan Broz9934a8b2007-10-19 22:38:57 +01001745 ti->error = "Couldn't create kcryptd queue";
Milan Broz28513fc2010-08-12 04:14:06 +01001746 goto bad;
Milan Broz9934a8b2007-10-19 22:38:57 +01001747 }
1748
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001749 ti->num_flush_bios = 1;
Alasdair G Kergon0ac55482012-07-27 15:08:08 +01001750 ti->discard_zeroes_data_unsupported = true;
Milan Broz983c7db2011-09-25 23:26:21 +01001751
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 return 0;
1753
Milan Broz28513fc2010-08-12 04:14:06 +01001754bad:
1755 crypt_dtr(ti);
1756 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757}
1758
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00001759static int crypt_map(struct dm_target *ti, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001761 struct dm_crypt_io *io;
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001762 struct crypt_config *cc = ti->private;
Mikulas Patocka647c7db2009-06-22 10:12:23 +01001763
Milan Broz772ae5f2011-08-02 12:32:08 +01001764 /*
1765 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
1766 * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
1767 * - for REQ_DISCARD caller must use flush if IO ordering matters
1768 */
1769 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
Mikulas Patocka647c7db2009-06-22 10:12:23 +01001770 bio->bi_bdev = cc->dev->bdev;
Milan Broz772ae5f2011-08-02 12:32:08 +01001771 if (bio_sectors(bio))
Kent Overstreet4f024f32013-10-11 15:44:27 -07001772 bio->bi_iter.bi_sector = cc->start +
1773 dm_target_offset(ti, bio->bi_iter.bi_sector);
Mikulas Patocka647c7db2009-06-22 10:12:23 +01001774 return DM_MAPIO_REMAPPED;
1775 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001777 io = dm_per_bio_data(bio, cc->per_bio_data_size);
1778 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
1779 io->ctx.req = (struct ablkcipher_request *)(io + 1);
Milan Brozcabf08e2007-10-19 22:38:58 +01001780
Milan Broz20c82532011-01-13 19:59:53 +00001781 if (bio_data_dir(io->base_bio) == READ) {
1782 if (kcryptd_io_read(io, GFP_NOWAIT))
1783 kcryptd_queue_io(io);
1784 } else
Milan Brozcabf08e2007-10-19 22:38:58 +01001785 kcryptd_queue_crypt(io);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001787 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788}
1789
Mikulas Patockafd7c0922013-03-01 22:45:44 +00001790static void crypt_status(struct dm_target *ti, status_type_t type,
1791 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792{
Milan Broz5ebaee62010-08-12 04:14:07 +01001793 struct crypt_config *cc = ti->private;
Mikulas Patockafd7c0922013-03-01 22:45:44 +00001794 unsigned i, sz = 0;
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001795 int num_feature_args = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796
1797 switch (type) {
1798 case STATUSTYPE_INFO:
1799 result[0] = '\0';
1800 break;
1801
1802 case STATUSTYPE_TABLE:
Milan Broz7dbcd132011-01-13 19:59:52 +00001803 DMEMIT("%s ", cc->cipher_string);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804
Mikulas Patockafd7c0922013-03-01 22:45:44 +00001805 if (cc->key_size > 0)
1806 for (i = 0; i < cc->key_size; i++)
1807 DMEMIT("%02x", cc->key[i]);
1808 else
1809 DMEMIT("-");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810
Andrew Morton4ee218c2006-03-27 01:17:48 -08001811 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1812 cc->dev->name, (unsigned long long)cc->start);
Milan Broz772ae5f2011-08-02 12:32:08 +01001813
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001814 num_feature_args += !!ti->num_discard_bios;
1815 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
1816 if (num_feature_args) {
1817 DMEMIT(" %d", num_feature_args);
1818 if (ti->num_discard_bios)
1819 DMEMIT(" allow_discards");
1820 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1821 DMEMIT(" same_cpu_crypt");
1822 }
Milan Broz772ae5f2011-08-02 12:32:08 +01001823
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 break;
1825 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826}
1827
Milan Broze48d4bb2006-10-03 01:15:37 -07001828static void crypt_postsuspend(struct dm_target *ti)
1829{
1830 struct crypt_config *cc = ti->private;
1831
1832 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1833}
1834
1835static int crypt_preresume(struct dm_target *ti)
1836{
1837 struct crypt_config *cc = ti->private;
1838
1839 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1840 DMERR("aborting resume - crypt key is not set.");
1841 return -EAGAIN;
1842 }
1843
1844 return 0;
1845}
1846
1847static void crypt_resume(struct dm_target *ti)
1848{
1849 struct crypt_config *cc = ti->private;
1850
1851 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1852}
1853
1854/* Message interface
1855 * key set <key>
1856 * key wipe
1857 */
1858static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1859{
1860 struct crypt_config *cc = ti->private;
Milan Broz542da312009-12-10 23:51:57 +00001861 int ret = -EINVAL;
Milan Broze48d4bb2006-10-03 01:15:37 -07001862
1863 if (argc < 2)
1864 goto error;
1865
Mike Snitzer498f0102011-08-02 12:32:04 +01001866 if (!strcasecmp(argv[0], "key")) {
Milan Broze48d4bb2006-10-03 01:15:37 -07001867 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
1868 DMWARN("not suspended during key manipulation.");
1869 return -EINVAL;
1870 }
Mike Snitzer498f0102011-08-02 12:32:04 +01001871 if (argc == 3 && !strcasecmp(argv[1], "set")) {
Milan Broz542da312009-12-10 23:51:57 +00001872 ret = crypt_set_key(cc, argv[2]);
1873 if (ret)
1874 return ret;
1875 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
1876 ret = cc->iv_gen_ops->init(cc);
1877 return ret;
1878 }
Mike Snitzer498f0102011-08-02 12:32:04 +01001879 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
Milan Broz542da312009-12-10 23:51:57 +00001880 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
1881 ret = cc->iv_gen_ops->wipe(cc);
1882 if (ret)
1883 return ret;
1884 }
Milan Broze48d4bb2006-10-03 01:15:37 -07001885 return crypt_wipe_key(cc);
Milan Broz542da312009-12-10 23:51:57 +00001886 }
Milan Broze48d4bb2006-10-03 01:15:37 -07001887 }
1888
1889error:
1890 DMWARN("unrecognised message received.");
1891 return -EINVAL;
1892}
1893
Milan Brozd41e26b2008-07-21 12:00:40 +01001894static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
1895 struct bio_vec *biovec, int max_size)
1896{
1897 struct crypt_config *cc = ti->private;
1898 struct request_queue *q = bdev_get_queue(cc->dev->bdev);
1899
1900 if (!q->merge_bvec_fn)
1901 return max_size;
1902
1903 bvm->bi_bdev = cc->dev->bdev;
Alasdair G Kergonb441a2622010-08-12 04:14:11 +01001904 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
Milan Brozd41e26b2008-07-21 12:00:40 +01001905
1906 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
1907}
1908
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001909static int crypt_iterate_devices(struct dm_target *ti,
1910 iterate_devices_callout_fn fn, void *data)
1911{
1912 struct crypt_config *cc = ti->private;
1913
Mike Snitzer5dea2712009-07-23 20:30:42 +01001914 return fn(ti, cc->dev, cc->start, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001915}
1916
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917static struct target_type crypt_target = {
1918 .name = "crypt",
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001919 .version = {1, 14, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 .module = THIS_MODULE,
1921 .ctr = crypt_ctr,
1922 .dtr = crypt_dtr,
1923 .map = crypt_map,
1924 .status = crypt_status,
Milan Broze48d4bb2006-10-03 01:15:37 -07001925 .postsuspend = crypt_postsuspend,
1926 .preresume = crypt_preresume,
1927 .resume = crypt_resume,
1928 .message = crypt_message,
Milan Brozd41e26b2008-07-21 12:00:40 +01001929 .merge = crypt_merge,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001930 .iterate_devices = crypt_iterate_devices,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931};
1932
1933static int __init dm_crypt_init(void)
1934{
1935 int r;
1936
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001937 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 if (!_crypt_io_pool)
1939 return -ENOMEM;
1940
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 r = dm_register_target(&crypt_target);
1942 if (r < 0) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001943 DMERR("register failed %d", r);
Milan Broz9934a8b2007-10-19 22:38:57 +01001944 kmem_cache_destroy(_crypt_io_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 }
1946
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 return r;
1948}
1949
1950static void __exit dm_crypt_exit(void)
1951{
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00001952 dm_unregister_target(&crypt_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 kmem_cache_destroy(_crypt_io_pool);
1954}
1955
1956module_init(dm_crypt_init);
1957module_exit(dm_crypt_exit);
1958
Jana Saoutbf142992014-06-24 14:27:04 -04001959MODULE_AUTHOR("Jana Saout <jana@saout.de>");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
1961MODULE_LICENSE("GPL");