blob: fc93b9330af4f7195768b67525fd504dd8701395 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Jana Saoutbf142992014-06-24 14:27:04 -04002 * Copyright (C) 2003 Jana Saout <jana@saout.de>
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
Milan Broz542da312009-12-10 23:51:57 +00004 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
Milan Brozed04d982013-10-28 23:21:04 +01005 * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This file is released under the GPL.
8 */
9
Milan Broz43d69032008-02-08 02:11:09 +000010#include <linux/completion.h>
Herbert Xud1806f62006-08-22 20:29:17 +100011#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/bio.h>
16#include <linux/blkdev.h>
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/crypto.h>
20#include <linux/workqueue.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070021#include <linux/backing-dev.h>
Arun Sharma600634972011-07-26 16:09:06 -070022#include <linux/atomic.h>
David Hardeman378f0582005-09-17 17:55:31 +100023#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/page.h>
Rik Snel48527fa2006-09-03 08:56:39 +100025#include <asm/unaligned.h>
Milan Broz34745782011-01-13 19:59:55 +000026#include <crypto/hash.h>
27#include <crypto/md5.h>
28#include <crypto/algapi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Mikulas Patocka586e80e2008-10-21 17:44:59 +010030#include <linux/device-mapper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Alasdair G Kergon72d94862006-06-26 00:27:35 -070032#define DM_MSG_PREFIX "crypt"
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 * context holding the current state of a multi-part conversion
36 */
37struct convert_context {
Milan Broz43d69032008-02-08 02:11:09 +000038 struct completion restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 struct bio *bio_in;
40 struct bio *bio_out;
Kent Overstreet003b5c52013-10-11 15:45:43 -070041 struct bvec_iter iter_in;
42 struct bvec_iter iter_out;
Mikulas Patockac66029f2012-07-27 15:08:05 +010043 sector_t cc_sector;
Mikulas Patocka40b62292012-07-27 15:08:04 +010044 atomic_t cc_pending;
Mikulas Patocka610f2de2014-02-20 18:01:01 -050045 struct ablkcipher_request *req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046};
47
Milan Broz53017032008-02-08 02:10:38 +000048/*
49 * per bio private data
50 */
51struct dm_crypt_io {
Alasdair G Kergon49a8a922012-07-27 15:08:05 +010052 struct crypt_config *cc;
Milan Broz53017032008-02-08 02:10:38 +000053 struct bio *base_bio;
54 struct work_struct work;
55
56 struct convert_context ctx;
57
Mikulas Patocka40b62292012-07-27 15:08:04 +010058 atomic_t io_pending;
Milan Broz53017032008-02-08 02:10:38 +000059 int error;
Milan Broz0c395b02008-02-08 02:10:54 +000060 sector_t sector;
Milan Broz393b47e2008-10-21 17:45:02 +010061 struct dm_crypt_io *base_io;
Mikulas Patocka298a9fa2014-03-28 15:51:55 -040062} CRYPTO_MINALIGN_ATTR;
Milan Broz53017032008-02-08 02:10:38 +000063
Milan Broz01482b72008-02-08 02:11:04 +000064struct dm_crypt_request {
Huang Yingb2174ee2009-03-16 17:44:33 +000065 struct convert_context *ctx;
Milan Broz01482b72008-02-08 02:11:04 +000066 struct scatterlist sg_in;
67 struct scatterlist sg_out;
Milan Broz2dc53272011-01-13 19:59:54 +000068 sector_t iv_sector;
Milan Broz01482b72008-02-08 02:11:04 +000069};
70
Linus Torvalds1da177e2005-04-16 15:20:36 -070071struct crypt_config;
72
73struct crypt_iv_operations {
74 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
Milan Brozd469f842007-10-19 22:42:37 +010075 const char *opts);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 void (*dtr)(struct crypt_config *cc);
Milan Brozb95bf2d2009-12-10 23:51:56 +000077 int (*init)(struct crypt_config *cc);
Milan Broz542da312009-12-10 23:51:57 +000078 int (*wipe)(struct crypt_config *cc);
Milan Broz2dc53272011-01-13 19:59:54 +000079 int (*generator)(struct crypt_config *cc, u8 *iv,
80 struct dm_crypt_request *dmreq);
81 int (*post)(struct crypt_config *cc, u8 *iv,
82 struct dm_crypt_request *dmreq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083};
84
Milan Broz60473592009-12-10 23:51:55 +000085struct iv_essiv_private {
Milan Brozb95bf2d2009-12-10 23:51:56 +000086 struct crypto_hash *hash_tfm;
87 u8 *salt;
Milan Broz60473592009-12-10 23:51:55 +000088};
89
90struct iv_benbi_private {
91 int shift;
92};
93
Milan Broz34745782011-01-13 19:59:55 +000094#define LMK_SEED_SIZE 64 /* hash + 0 */
95struct iv_lmk_private {
96 struct crypto_shash *hash_tfm;
97 u8 *seed;
98};
99
Milan Brozed04d982013-10-28 23:21:04 +0100100#define TCW_WHITENING_SIZE 16
101struct iv_tcw_private {
102 struct crypto_shash *crc32_tfm;
103 u8 *iv_seed;
104 u8 *whitening;
105};
106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107/*
108 * Crypt: maps a linear range of a block device
109 * and encrypts / decrypts at the same time.
110 */
Milan Broze48d4bb2006-10-03 01:15:37 -0700111enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
Andi Kleenc0297722011-01-13 19:59:53 +0000112
113/*
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500114 * The fields in here must be read only after initialization.
Andi Kleenc0297722011-01-13 19:59:53 +0000115 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116struct crypt_config {
117 struct dm_dev *dev;
118 sector_t start;
119
120 /*
Milan Brozddd42ed2008-02-08 02:11:07 +0000121 * pool for per bio private data, crypto requests and
122 * encryption requeusts/buffer pages
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 */
124 mempool_t *io_pool;
Milan Brozddd42ed2008-02-08 02:11:07 +0000125 mempool_t *req_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 mempool_t *page_pool;
Milan Broz6a24c712006-10-03 01:15:40 -0700127 struct bio_set *bs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
Milan Brozcabf08e2007-10-19 22:38:58 +0100129 struct workqueue_struct *io_queue;
130 struct workqueue_struct *crypt_queue;
Milan Broz3f1e9072008-03-28 14:16:07 -0700131
Milan Broz5ebaee62010-08-12 04:14:07 +0100132 char *cipher;
Milan Broz7dbcd132011-01-13 19:59:52 +0000133 char *cipher_string;
Milan Broz5ebaee62010-08-12 04:14:07 +0100134
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 struct crypt_iv_operations *iv_gen_ops;
Herbert Xu79066ad2006-12-05 13:41:52 -0800136 union {
Milan Broz60473592009-12-10 23:51:55 +0000137 struct iv_essiv_private essiv;
138 struct iv_benbi_private benbi;
Milan Broz34745782011-01-13 19:59:55 +0000139 struct iv_lmk_private lmk;
Milan Brozed04d982013-10-28 23:21:04 +0100140 struct iv_tcw_private tcw;
Herbert Xu79066ad2006-12-05 13:41:52 -0800141 } iv_gen_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 sector_t iv_offset;
143 unsigned int iv_size;
144
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100145 /* ESSIV: struct crypto_cipher *essiv_tfm */
146 void *iv_private;
147 struct crypto_ablkcipher **tfms;
Milan Brozd1f96422011-01-13 19:59:54 +0000148 unsigned tfms_count;
Andi Kleenc0297722011-01-13 19:59:53 +0000149
150 /*
Milan Brozddd42ed2008-02-08 02:11:07 +0000151 * Layout of each crypto request:
152 *
153 * struct ablkcipher_request
154 * context
155 * padding
156 * struct dm_crypt_request
157 * padding
158 * IV
159 *
160 * The padding is added so that dm_crypt_request and the IV are
161 * correctly aligned.
162 */
163 unsigned int dmreq_start;
Milan Brozddd42ed2008-02-08 02:11:07 +0000164
Mikulas Patocka298a9fa2014-03-28 15:51:55 -0400165 unsigned int per_bio_data_size;
166
Milan Broze48d4bb2006-10-03 01:15:37 -0700167 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 unsigned int key_size;
Milan Brozda31a072013-10-28 23:21:03 +0100169 unsigned int key_parts; /* independent parts in key buffer */
170 unsigned int key_extra_size; /* additional keys length */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 u8 key[0];
172};
173
Milan Broz6a24c712006-10-03 01:15:40 -0700174#define MIN_IOS 16
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175#define MIN_POOL_PAGES 32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
Christoph Lametere18b8902006-12-06 20:33:20 -0800177static struct kmem_cache *_crypt_io_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100179static void clone_init(struct dm_crypt_io *, struct bio *);
Alasdair G Kergon395b1672008-02-08 02:10:52 +0000180static void kcryptd_queue_crypt(struct dm_crypt_io *io);
Milan Broz2dc53272011-01-13 19:59:54 +0000181static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
Olaf Kirch027581f2007-05-09 02:32:52 -0700182
Andi Kleenc0297722011-01-13 19:59:53 +0000183/*
184 * Use this to access cipher attributes that are the same for each CPU.
185 */
186static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
187{
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100188 return cc->tfms[0];
Andi Kleenc0297722011-01-13 19:59:53 +0000189}
190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 * Different IV generation algorithms:
193 *
Rik Snel3c164bd2006-09-02 18:17:33 +1000194 * plain: the initial vector is the 32-bit little-endian version of the sector
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +0200195 * number, padded with zeros if necessary.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 *
Milan Broz61afef62009-12-10 23:52:25 +0000197 * plain64: the initial vector is the 64-bit little-endian version of the sector
198 * number, padded with zeros if necessary.
199 *
Rik Snel3c164bd2006-09-02 18:17:33 +1000200 * essiv: "encrypted sector|salt initial vector", the sector number is
201 * encrypted with the bulk cipher using a salt as key. The salt
202 * should be derived from the bulk cipher's key via hashing.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 *
Rik Snel48527fa2006-09-03 08:56:39 +1000204 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
205 * (needed for LRW-32-AES and possible other narrow block modes)
206 *
Ludwig Nussel46b47732007-05-09 02:32:55 -0700207 * null: the initial vector is always zero. Provides compatibility with
208 * obsolete loop_fish2 devices. Do not use for new devices.
209 *
Milan Broz34745782011-01-13 19:59:55 +0000210 * lmk: Compatible implementation of the block chaining mode used
211 * by the Loop-AES block device encryption system
212 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
213 * It operates on full 512 byte sectors and uses CBC
214 * with an IV derived from the sector number, the data and
215 * optionally extra IV seed.
216 * This means that after decryption the first block
217 * of sector must be tweaked according to decrypted data.
218 * Loop-AES can use three encryption schemes:
219 * version 1: is plain aes-cbc mode
220 * version 2: uses 64 multikey scheme with lmk IV generator
221 * version 3: the same as version 2 with additional IV seed
222 * (it uses 65 keys, last key is used as IV seed)
223 *
Milan Brozed04d982013-10-28 23:21:04 +0100224 * tcw: Compatible implementation of the block chaining mode used
225 * by the TrueCrypt device encryption system (prior to version 4.1).
226 * For more info see: http://www.truecrypt.org
227 * It operates on full 512 byte sectors and uses CBC
228 * with an IV derived from initial key and the sector number.
229 * In addition, whitening value is applied on every sector, whitening
230 * is calculated from initial key, sector number and mixed using CRC32.
231 * Note that this encryption scheme is vulnerable to watermarking attacks
232 * and should be used for old compatible containers access only.
233 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 * plumb: unimplemented, see:
235 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
236 */
237
Milan Broz2dc53272011-01-13 19:59:54 +0000238static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
239 struct dm_crypt_request *dmreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240{
241 memset(iv, 0, cc->iv_size);
Alasdair G Kergon283a8322011-08-02 12:32:01 +0100242 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
244 return 0;
245}
246
Milan Broz61afef62009-12-10 23:52:25 +0000247static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
Milan Broz2dc53272011-01-13 19:59:54 +0000248 struct dm_crypt_request *dmreq)
Milan Broz61afef62009-12-10 23:52:25 +0000249{
250 memset(iv, 0, cc->iv_size);
Alasdair G Kergon283a8322011-08-02 12:32:01 +0100251 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
Milan Broz61afef62009-12-10 23:52:25 +0000252
253 return 0;
254}
255
Milan Brozb95bf2d2009-12-10 23:51:56 +0000256/* Initialise ESSIV - compute salt but no local memory allocations */
257static int crypt_iv_essiv_init(struct crypt_config *cc)
258{
259 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
260 struct hash_desc desc;
261 struct scatterlist sg;
Andi Kleenc0297722011-01-13 19:59:53 +0000262 struct crypto_cipher *essiv_tfm;
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100263 int err;
Milan Brozb95bf2d2009-12-10 23:51:56 +0000264
265 sg_init_one(&sg, cc->key, cc->key_size);
266 desc.tfm = essiv->hash_tfm;
267 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
268
269 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
270 if (err)
271 return err;
272
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100273 essiv_tfm = cc->iv_private;
Andi Kleenc0297722011-01-13 19:59:53 +0000274
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100275 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
276 crypto_hash_digestsize(essiv->hash_tfm));
277 if (err)
278 return err;
Andi Kleenc0297722011-01-13 19:59:53 +0000279
280 return 0;
Milan Brozb95bf2d2009-12-10 23:51:56 +0000281}
282
Milan Broz542da312009-12-10 23:51:57 +0000283/* Wipe salt and reset key derived from volume key */
284static int crypt_iv_essiv_wipe(struct crypt_config *cc)
285{
286 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
287 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
Andi Kleenc0297722011-01-13 19:59:53 +0000288 struct crypto_cipher *essiv_tfm;
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100289 int r, err = 0;
Milan Broz542da312009-12-10 23:51:57 +0000290
291 memset(essiv->salt, 0, salt_size);
292
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100293 essiv_tfm = cc->iv_private;
294 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
295 if (r)
296 err = r;
Andi Kleenc0297722011-01-13 19:59:53 +0000297
298 return err;
299}
300
301/* Set up per cpu cipher state */
302static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
303 struct dm_target *ti,
304 u8 *salt, unsigned saltsize)
305{
306 struct crypto_cipher *essiv_tfm;
307 int err;
308
309 /* Setup the essiv_tfm with the given salt */
310 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
311 if (IS_ERR(essiv_tfm)) {
312 ti->error = "Error allocating crypto tfm for ESSIV";
313 return essiv_tfm;
314 }
315
316 if (crypto_cipher_blocksize(essiv_tfm) !=
317 crypto_ablkcipher_ivsize(any_tfm(cc))) {
318 ti->error = "Block size of ESSIV cipher does "
319 "not match IV size of block cipher";
320 crypto_free_cipher(essiv_tfm);
321 return ERR_PTR(-EINVAL);
322 }
323
324 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
325 if (err) {
326 ti->error = "Failed to set key for ESSIV cipher";
327 crypto_free_cipher(essiv_tfm);
328 return ERR_PTR(err);
329 }
330
331 return essiv_tfm;
Milan Broz542da312009-12-10 23:51:57 +0000332}
333
Milan Broz60473592009-12-10 23:51:55 +0000334static void crypt_iv_essiv_dtr(struct crypt_config *cc)
335{
Andi Kleenc0297722011-01-13 19:59:53 +0000336 struct crypto_cipher *essiv_tfm;
Milan Broz60473592009-12-10 23:51:55 +0000337 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
338
Milan Brozb95bf2d2009-12-10 23:51:56 +0000339 crypto_free_hash(essiv->hash_tfm);
340 essiv->hash_tfm = NULL;
341
342 kzfree(essiv->salt);
343 essiv->salt = NULL;
Andi Kleenc0297722011-01-13 19:59:53 +0000344
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100345 essiv_tfm = cc->iv_private;
Andi Kleenc0297722011-01-13 19:59:53 +0000346
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100347 if (essiv_tfm)
348 crypto_free_cipher(essiv_tfm);
Andi Kleenc0297722011-01-13 19:59:53 +0000349
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100350 cc->iv_private = NULL;
Milan Broz60473592009-12-10 23:51:55 +0000351}
352
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
Milan Brozd469f842007-10-19 22:42:37 +0100354 const char *opts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355{
Milan Broz5861f1b2009-12-10 23:51:56 +0000356 struct crypto_cipher *essiv_tfm = NULL;
357 struct crypto_hash *hash_tfm = NULL;
Milan Broz5861f1b2009-12-10 23:51:56 +0000358 u8 *salt = NULL;
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100359 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
Milan Broz5861f1b2009-12-10 23:51:56 +0000361 if (!opts) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700362 ti->error = "Digest algorithm missing for ESSIV mode";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 return -EINVAL;
364 }
365
Milan Brozb95bf2d2009-12-10 23:51:56 +0000366 /* Allocate hash algorithm */
Herbert Xu35058682006-08-24 19:10:20 +1000367 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
368 if (IS_ERR(hash_tfm)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700369 ti->error = "Error initializing ESSIV hash";
Milan Broz5861f1b2009-12-10 23:51:56 +0000370 err = PTR_ERR(hash_tfm);
371 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 }
373
Milan Brozb95bf2d2009-12-10 23:51:56 +0000374 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
Milan Broz5861f1b2009-12-10 23:51:56 +0000375 if (!salt) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700376 ti->error = "Error kmallocing salt storage in ESSIV";
Milan Broz5861f1b2009-12-10 23:51:56 +0000377 err = -ENOMEM;
378 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 }
380
Milan Brozb95bf2d2009-12-10 23:51:56 +0000381 cc->iv_gen_private.essiv.salt = salt;
Milan Brozb95bf2d2009-12-10 23:51:56 +0000382 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
383
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100384 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
385 crypto_hash_digestsize(hash_tfm));
386 if (IS_ERR(essiv_tfm)) {
387 crypt_iv_essiv_dtr(cc);
388 return PTR_ERR(essiv_tfm);
Andi Kleenc0297722011-01-13 19:59:53 +0000389 }
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100390 cc->iv_private = essiv_tfm;
Andi Kleenc0297722011-01-13 19:59:53 +0000391
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 return 0;
Milan Broz5861f1b2009-12-10 23:51:56 +0000393
394bad:
Milan Broz5861f1b2009-12-10 23:51:56 +0000395 if (hash_tfm && !IS_ERR(hash_tfm))
396 crypto_free_hash(hash_tfm);
Milan Brozb95bf2d2009-12-10 23:51:56 +0000397 kfree(salt);
Milan Broz5861f1b2009-12-10 23:51:56 +0000398 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399}
400
Milan Broz2dc53272011-01-13 19:59:54 +0000401static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
402 struct dm_crypt_request *dmreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403{
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100404 struct crypto_cipher *essiv_tfm = cc->iv_private;
Andi Kleenc0297722011-01-13 19:59:53 +0000405
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 memset(iv, 0, cc->iv_size);
Alasdair G Kergon283a8322011-08-02 12:32:01 +0100407 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
Andi Kleenc0297722011-01-13 19:59:53 +0000408 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
409
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 return 0;
411}
412
Rik Snel48527fa2006-09-03 08:56:39 +1000413static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
414 const char *opts)
415{
Andi Kleenc0297722011-01-13 19:59:53 +0000416 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
David Howellsf0d1b0b2006-12-08 02:37:49 -0800417 int log = ilog2(bs);
Rik Snel48527fa2006-09-03 08:56:39 +1000418
419 /* we need to calculate how far we must shift the sector count
420 * to get the cipher block count, we use this shift in _gen */
421
422 if (1 << log != bs) {
423 ti->error = "cypher blocksize is not a power of 2";
424 return -EINVAL;
425 }
426
427 if (log > 9) {
428 ti->error = "cypher blocksize is > 512";
429 return -EINVAL;
430 }
431
Milan Broz60473592009-12-10 23:51:55 +0000432 cc->iv_gen_private.benbi.shift = 9 - log;
Rik Snel48527fa2006-09-03 08:56:39 +1000433
434 return 0;
435}
436
437static void crypt_iv_benbi_dtr(struct crypt_config *cc)
438{
Rik Snel48527fa2006-09-03 08:56:39 +1000439}
440
Milan Broz2dc53272011-01-13 19:59:54 +0000441static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
442 struct dm_crypt_request *dmreq)
Rik Snel48527fa2006-09-03 08:56:39 +1000443{
Herbert Xu79066ad2006-12-05 13:41:52 -0800444 __be64 val;
445
Rik Snel48527fa2006-09-03 08:56:39 +1000446 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
Herbert Xu79066ad2006-12-05 13:41:52 -0800447
Milan Broz2dc53272011-01-13 19:59:54 +0000448 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
Herbert Xu79066ad2006-12-05 13:41:52 -0800449 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
Rik Snel48527fa2006-09-03 08:56:39 +1000450
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 return 0;
452}
453
Milan Broz2dc53272011-01-13 19:59:54 +0000454static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
455 struct dm_crypt_request *dmreq)
Ludwig Nussel46b47732007-05-09 02:32:55 -0700456{
457 memset(iv, 0, cc->iv_size);
458
459 return 0;
460}
461
Milan Broz34745782011-01-13 19:59:55 +0000462static void crypt_iv_lmk_dtr(struct crypt_config *cc)
463{
464 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
465
466 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
467 crypto_free_shash(lmk->hash_tfm);
468 lmk->hash_tfm = NULL;
469
470 kzfree(lmk->seed);
471 lmk->seed = NULL;
472}
473
474static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
475 const char *opts)
476{
477 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
478
479 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
480 if (IS_ERR(lmk->hash_tfm)) {
481 ti->error = "Error initializing LMK hash";
482 return PTR_ERR(lmk->hash_tfm);
483 }
484
485 /* No seed in LMK version 2 */
486 if (cc->key_parts == cc->tfms_count) {
487 lmk->seed = NULL;
488 return 0;
489 }
490
491 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
492 if (!lmk->seed) {
493 crypt_iv_lmk_dtr(cc);
494 ti->error = "Error kmallocing seed storage in LMK";
495 return -ENOMEM;
496 }
497
498 return 0;
499}
500
501static int crypt_iv_lmk_init(struct crypt_config *cc)
502{
503 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
504 int subkey_size = cc->key_size / cc->key_parts;
505
506 /* LMK seed is on the position of LMK_KEYS + 1 key */
507 if (lmk->seed)
508 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
509 crypto_shash_digestsize(lmk->hash_tfm));
510
511 return 0;
512}
513
514static int crypt_iv_lmk_wipe(struct crypt_config *cc)
515{
516 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
517
518 if (lmk->seed)
519 memset(lmk->seed, 0, LMK_SEED_SIZE);
520
521 return 0;
522}
523
524static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
525 struct dm_crypt_request *dmreq,
526 u8 *data)
527{
528 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200529 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
Milan Broz34745782011-01-13 19:59:55 +0000530 struct md5_state md5state;
Milan Brozda31a072013-10-28 23:21:03 +0100531 __le32 buf[4];
Milan Broz34745782011-01-13 19:59:55 +0000532 int i, r;
533
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200534 desc->tfm = lmk->hash_tfm;
535 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
Milan Broz34745782011-01-13 19:59:55 +0000536
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200537 r = crypto_shash_init(desc);
Milan Broz34745782011-01-13 19:59:55 +0000538 if (r)
539 return r;
540
541 if (lmk->seed) {
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200542 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
Milan Broz34745782011-01-13 19:59:55 +0000543 if (r)
544 return r;
545 }
546
547 /* Sector is always 512B, block size 16, add data of blocks 1-31 */
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200548 r = crypto_shash_update(desc, data + 16, 16 * 31);
Milan Broz34745782011-01-13 19:59:55 +0000549 if (r)
550 return r;
551
552 /* Sector is cropped to 56 bits here */
553 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
554 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
555 buf[2] = cpu_to_le32(4024);
556 buf[3] = 0;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200557 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
Milan Broz34745782011-01-13 19:59:55 +0000558 if (r)
559 return r;
560
561 /* No MD5 padding here */
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200562 r = crypto_shash_export(desc, &md5state);
Milan Broz34745782011-01-13 19:59:55 +0000563 if (r)
564 return r;
565
566 for (i = 0; i < MD5_HASH_WORDS; i++)
567 __cpu_to_le32s(&md5state.hash[i]);
568 memcpy(iv, &md5state.hash, cc->iv_size);
569
570 return 0;
571}
572
573static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
574 struct dm_crypt_request *dmreq)
575{
576 u8 *src;
577 int r = 0;
578
579 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
Cong Wangc2e022c2011-11-28 13:26:02 +0800580 src = kmap_atomic(sg_page(&dmreq->sg_in));
Milan Broz34745782011-01-13 19:59:55 +0000581 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
Cong Wangc2e022c2011-11-28 13:26:02 +0800582 kunmap_atomic(src);
Milan Broz34745782011-01-13 19:59:55 +0000583 } else
584 memset(iv, 0, cc->iv_size);
585
586 return r;
587}
588
589static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
590 struct dm_crypt_request *dmreq)
591{
592 u8 *dst;
593 int r;
594
595 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
596 return 0;
597
Cong Wangc2e022c2011-11-28 13:26:02 +0800598 dst = kmap_atomic(sg_page(&dmreq->sg_out));
Milan Broz34745782011-01-13 19:59:55 +0000599 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
600
601 /* Tweak the first block of plaintext sector */
602 if (!r)
603 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
604
Cong Wangc2e022c2011-11-28 13:26:02 +0800605 kunmap_atomic(dst);
Milan Broz34745782011-01-13 19:59:55 +0000606 return r;
607}
608
Milan Brozed04d982013-10-28 23:21:04 +0100609static void crypt_iv_tcw_dtr(struct crypt_config *cc)
610{
611 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
612
613 kzfree(tcw->iv_seed);
614 tcw->iv_seed = NULL;
615 kzfree(tcw->whitening);
616 tcw->whitening = NULL;
617
618 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
619 crypto_free_shash(tcw->crc32_tfm);
620 tcw->crc32_tfm = NULL;
621}
622
623static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
624 const char *opts)
625{
626 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
627
628 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
629 ti->error = "Wrong key size for TCW";
630 return -EINVAL;
631 }
632
633 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
634 if (IS_ERR(tcw->crc32_tfm)) {
635 ti->error = "Error initializing CRC32 in TCW";
636 return PTR_ERR(tcw->crc32_tfm);
637 }
638
639 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
640 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
641 if (!tcw->iv_seed || !tcw->whitening) {
642 crypt_iv_tcw_dtr(cc);
643 ti->error = "Error allocating seed storage in TCW";
644 return -ENOMEM;
645 }
646
647 return 0;
648}
649
650static int crypt_iv_tcw_init(struct crypt_config *cc)
651{
652 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
653 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
654
655 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
656 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
657 TCW_WHITENING_SIZE);
658
659 return 0;
660}
661
662static int crypt_iv_tcw_wipe(struct crypt_config *cc)
663{
664 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
665
666 memset(tcw->iv_seed, 0, cc->iv_size);
667 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
668
669 return 0;
670}
671
672static int crypt_iv_tcw_whitening(struct crypt_config *cc,
673 struct dm_crypt_request *dmreq,
674 u8 *data)
675{
676 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
677 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
678 u8 buf[TCW_WHITENING_SIZE];
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200679 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
Milan Brozed04d982013-10-28 23:21:04 +0100680 int i, r;
681
682 /* xor whitening with sector number */
683 memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
684 crypto_xor(buf, (u8 *)&sector, 8);
685 crypto_xor(&buf[8], (u8 *)&sector, 8);
686
687 /* calculate crc32 for every 32bit part and xor it */
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200688 desc->tfm = tcw->crc32_tfm;
689 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
Milan Brozed04d982013-10-28 23:21:04 +0100690 for (i = 0; i < 4; i++) {
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200691 r = crypto_shash_init(desc);
Milan Brozed04d982013-10-28 23:21:04 +0100692 if (r)
693 goto out;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200694 r = crypto_shash_update(desc, &buf[i * 4], 4);
Milan Brozed04d982013-10-28 23:21:04 +0100695 if (r)
696 goto out;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200697 r = crypto_shash_final(desc, &buf[i * 4]);
Milan Brozed04d982013-10-28 23:21:04 +0100698 if (r)
699 goto out;
700 }
701 crypto_xor(&buf[0], &buf[12], 4);
702 crypto_xor(&buf[4], &buf[8], 4);
703
704 /* apply whitening (8 bytes) to whole sector */
705 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
706 crypto_xor(data + i * 8, buf, 8);
707out:
708 memset(buf, 0, sizeof(buf));
709 return r;
710}
711
712static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
713 struct dm_crypt_request *dmreq)
714{
715 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
716 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
717 u8 *src;
718 int r = 0;
719
720 /* Remove whitening from ciphertext */
721 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
722 src = kmap_atomic(sg_page(&dmreq->sg_in));
723 r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset);
724 kunmap_atomic(src);
725 }
726
727 /* Calculate IV */
728 memcpy(iv, tcw->iv_seed, cc->iv_size);
729 crypto_xor(iv, (u8 *)&sector, 8);
730 if (cc->iv_size > 8)
731 crypto_xor(&iv[8], (u8 *)&sector, cc->iv_size - 8);
732
733 return r;
734}
735
736static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
737 struct dm_crypt_request *dmreq)
738{
739 u8 *dst;
740 int r;
741
742 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
743 return 0;
744
745 /* Apply whitening on ciphertext */
746 dst = kmap_atomic(sg_page(&dmreq->sg_out));
747 r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset);
748 kunmap_atomic(dst);
749
750 return r;
751}
752
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753static struct crypt_iv_operations crypt_iv_plain_ops = {
754 .generator = crypt_iv_plain_gen
755};
756
Milan Broz61afef62009-12-10 23:52:25 +0000757static struct crypt_iv_operations crypt_iv_plain64_ops = {
758 .generator = crypt_iv_plain64_gen
759};
760
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761static struct crypt_iv_operations crypt_iv_essiv_ops = {
762 .ctr = crypt_iv_essiv_ctr,
763 .dtr = crypt_iv_essiv_dtr,
Milan Brozb95bf2d2009-12-10 23:51:56 +0000764 .init = crypt_iv_essiv_init,
Milan Broz542da312009-12-10 23:51:57 +0000765 .wipe = crypt_iv_essiv_wipe,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 .generator = crypt_iv_essiv_gen
767};
768
Rik Snel48527fa2006-09-03 08:56:39 +1000769static struct crypt_iv_operations crypt_iv_benbi_ops = {
770 .ctr = crypt_iv_benbi_ctr,
771 .dtr = crypt_iv_benbi_dtr,
772 .generator = crypt_iv_benbi_gen
773};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
Ludwig Nussel46b47732007-05-09 02:32:55 -0700775static struct crypt_iv_operations crypt_iv_null_ops = {
776 .generator = crypt_iv_null_gen
777};
778
Milan Broz34745782011-01-13 19:59:55 +0000779static struct crypt_iv_operations crypt_iv_lmk_ops = {
780 .ctr = crypt_iv_lmk_ctr,
781 .dtr = crypt_iv_lmk_dtr,
782 .init = crypt_iv_lmk_init,
783 .wipe = crypt_iv_lmk_wipe,
784 .generator = crypt_iv_lmk_gen,
785 .post = crypt_iv_lmk_post
786};
787
Milan Brozed04d982013-10-28 23:21:04 +0100788static struct crypt_iv_operations crypt_iv_tcw_ops = {
789 .ctr = crypt_iv_tcw_ctr,
790 .dtr = crypt_iv_tcw_dtr,
791 .init = crypt_iv_tcw_init,
792 .wipe = crypt_iv_tcw_wipe,
793 .generator = crypt_iv_tcw_gen,
794 .post = crypt_iv_tcw_post
795};
796
Milan Brozd469f842007-10-19 22:42:37 +0100797static void crypt_convert_init(struct crypt_config *cc,
798 struct convert_context *ctx,
799 struct bio *bio_out, struct bio *bio_in,
Milan Brozfcd369d2008-02-08 02:10:41 +0000800 sector_t sector)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801{
802 ctx->bio_in = bio_in;
803 ctx->bio_out = bio_out;
Kent Overstreet003b5c52013-10-11 15:45:43 -0700804 if (bio_in)
805 ctx->iter_in = bio_in->bi_iter;
806 if (bio_out)
807 ctx->iter_out = bio_out->bi_iter;
Mikulas Patockac66029f2012-07-27 15:08:05 +0100808 ctx->cc_sector = sector + cc->iv_offset;
Milan Broz43d69032008-02-08 02:11:09 +0000809 init_completion(&ctx->restart);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810}
811
Huang Yingb2174ee2009-03-16 17:44:33 +0000812static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
813 struct ablkcipher_request *req)
814{
815 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
816}
817
818static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
819 struct dm_crypt_request *dmreq)
820{
821 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
822}
823
Milan Broz2dc53272011-01-13 19:59:54 +0000824static u8 *iv_of_dmreq(struct crypt_config *cc,
825 struct dm_crypt_request *dmreq)
826{
827 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
828 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
829}
830
Milan Broz01482b72008-02-08 02:11:04 +0000831static int crypt_convert_block(struct crypt_config *cc,
Milan Broz3a7f6c92008-02-08 02:11:14 +0000832 struct convert_context *ctx,
833 struct ablkcipher_request *req)
Milan Broz01482b72008-02-08 02:11:04 +0000834{
Kent Overstreet003b5c52013-10-11 15:45:43 -0700835 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
836 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000837 struct dm_crypt_request *dmreq;
838 u8 *iv;
Mikulas Patocka40b62292012-07-27 15:08:04 +0100839 int r;
Milan Broz01482b72008-02-08 02:11:04 +0000840
Huang Yingb2174ee2009-03-16 17:44:33 +0000841 dmreq = dmreq_of_req(cc, req);
Milan Broz2dc53272011-01-13 19:59:54 +0000842 iv = iv_of_dmreq(cc, dmreq);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000843
Mikulas Patockac66029f2012-07-27 15:08:05 +0100844 dmreq->iv_sector = ctx->cc_sector;
Huang Yingb2174ee2009-03-16 17:44:33 +0000845 dmreq->ctx = ctx;
Milan Broz3a7f6c92008-02-08 02:11:14 +0000846 sg_init_table(&dmreq->sg_in, 1);
Kent Overstreet003b5c52013-10-11 15:45:43 -0700847 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
848 bv_in.bv_offset);
Milan Broz01482b72008-02-08 02:11:04 +0000849
Milan Broz3a7f6c92008-02-08 02:11:14 +0000850 sg_init_table(&dmreq->sg_out, 1);
Kent Overstreet003b5c52013-10-11 15:45:43 -0700851 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
852 bv_out.bv_offset);
Milan Broz01482b72008-02-08 02:11:04 +0000853
Kent Overstreet003b5c52013-10-11 15:45:43 -0700854 bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
855 bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
Milan Broz01482b72008-02-08 02:11:04 +0000856
Milan Broz3a7f6c92008-02-08 02:11:14 +0000857 if (cc->iv_gen_ops) {
Milan Broz2dc53272011-01-13 19:59:54 +0000858 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000859 if (r < 0)
860 return r;
861 }
862
863 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
864 1 << SECTOR_SHIFT, iv);
865
866 if (bio_data_dir(ctx->bio_in) == WRITE)
867 r = crypto_ablkcipher_encrypt(req);
868 else
869 r = crypto_ablkcipher_decrypt(req);
870
Milan Broz2dc53272011-01-13 19:59:54 +0000871 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
872 r = cc->iv_gen_ops->post(cc, iv, dmreq);
873
Milan Broz3a7f6c92008-02-08 02:11:14 +0000874 return r;
Milan Broz01482b72008-02-08 02:11:04 +0000875}
876
Milan Broz95497a92008-02-08 02:11:12 +0000877static void kcryptd_async_done(struct crypto_async_request *async_req,
878 int error);
Andi Kleenc0297722011-01-13 19:59:53 +0000879
Milan Brozddd42ed2008-02-08 02:11:07 +0000880static void crypt_alloc_req(struct crypt_config *cc,
881 struct convert_context *ctx)
882{
Mikulas Patockac66029f2012-07-27 15:08:05 +0100883 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
Andi Kleenc0297722011-01-13 19:59:53 +0000884
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500885 if (!ctx->req)
886 ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
Andi Kleenc0297722011-01-13 19:59:53 +0000887
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500888 ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
889 ablkcipher_request_set_callback(ctx->req,
Andi Kleenc0297722011-01-13 19:59:53 +0000890 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500891 kcryptd_async_done, dmreq_of_req(cc, ctx->req));
Milan Brozddd42ed2008-02-08 02:11:07 +0000892}
893
Mikulas Patocka298a9fa2014-03-28 15:51:55 -0400894static void crypt_free_req(struct crypt_config *cc,
895 struct ablkcipher_request *req, struct bio *base_bio)
896{
897 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
898
899 if ((struct ablkcipher_request *)(io + 1) != req)
900 mempool_free(req, cc->req_pool);
901}
902
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903/*
904 * Encrypt / decrypt data from one bio to another one (can be the same one)
905 */
906static int crypt_convert(struct crypt_config *cc,
Milan Brozd469f842007-10-19 22:42:37 +0100907 struct convert_context *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908{
Milan Broz3f1e9072008-03-28 14:16:07 -0700909 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910
Mikulas Patocka40b62292012-07-27 15:08:04 +0100911 atomic_set(&ctx->cc_pending, 1);
Milan Brozc8081612008-10-10 13:37:08 +0100912
Kent Overstreet003b5c52013-10-11 15:45:43 -0700913 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914
Milan Broz3a7f6c92008-02-08 02:11:14 +0000915 crypt_alloc_req(cc, ctx);
916
Mikulas Patocka40b62292012-07-27 15:08:04 +0100917 atomic_inc(&ctx->cc_pending);
Milan Broz3f1e9072008-03-28 14:16:07 -0700918
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500919 r = crypt_convert_block(cc, ctx, ctx->req);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000920
921 switch (r) {
Milan Broz3f1e9072008-03-28 14:16:07 -0700922 /* async */
Milan Broz3a7f6c92008-02-08 02:11:14 +0000923 case -EBUSY:
924 wait_for_completion(&ctx->restart);
Wolfram Sang16735d02013-11-14 14:32:02 -0800925 reinit_completion(&ctx->restart);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000926 /* fall through*/
927 case -EINPROGRESS:
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500928 ctx->req = NULL;
Mikulas Patockac66029f2012-07-27 15:08:05 +0100929 ctx->cc_sector++;
Milan Broz3a7f6c92008-02-08 02:11:14 +0000930 continue;
Milan Broz3a7f6c92008-02-08 02:11:14 +0000931
Milan Broz3f1e9072008-03-28 14:16:07 -0700932 /* sync */
933 case 0:
Mikulas Patocka40b62292012-07-27 15:08:04 +0100934 atomic_dec(&ctx->cc_pending);
Mikulas Patockac66029f2012-07-27 15:08:05 +0100935 ctx->cc_sector++;
Milan Brozc7f1b202008-07-02 09:34:28 +0100936 cond_resched();
Milan Broz3f1e9072008-03-28 14:16:07 -0700937 continue;
938
939 /* error */
940 default:
Mikulas Patocka40b62292012-07-27 15:08:04 +0100941 atomic_dec(&ctx->cc_pending);
Milan Broz3f1e9072008-03-28 14:16:07 -0700942 return r;
943 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 }
945
Milan Broz3f1e9072008-03-28 14:16:07 -0700946 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947}
948
949/*
950 * Generate a new unfragmented bio with the given size
951 * This should never violate the device limitations
Milan Broz933f01d2008-10-10 13:37:08 +0100952 * May return a smaller bio when running out of pages, indicated by
953 * *out_of_pages set to 1.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 */
Milan Broz933f01d2008-10-10 13:37:08 +0100955static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
956 unsigned *out_of_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +0100958 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -0700959 struct bio *clone;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
Al Virob4e3ca12005-10-21 03:22:34 -0400961 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
Milan Broz91e10622007-12-13 14:16:10 +0000962 unsigned i, len;
963 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964
Olaf Kirch2f9941b2007-05-09 02:32:53 -0700965 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
Milan Broz8b004452006-10-03 01:15:37 -0700966 if (!clone)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
Olaf Kirch027581f2007-05-09 02:32:52 -0700969 clone_init(io, clone);
Milan Broz933f01d2008-10-10 13:37:08 +0100970 *out_of_pages = 0;
Milan Broz6a24c712006-10-03 01:15:40 -0700971
Olaf Kirchf97380b2007-05-09 02:32:54 -0700972 for (i = 0; i < nr_iovecs; i++) {
Milan Broz91e10622007-12-13 14:16:10 +0000973 page = mempool_alloc(cc->page_pool, gfp_mask);
Milan Broz933f01d2008-10-10 13:37:08 +0100974 if (!page) {
975 *out_of_pages = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 break;
Milan Broz933f01d2008-10-10 13:37:08 +0100977 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978
979 /*
Mikulas Patockaaeb2dea2012-03-28 18:41:22 +0100980 * If additional pages cannot be allocated without waiting,
981 * return a partially-allocated bio. The caller will then try
982 * to allocate more bios while submitting this partial bio.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 */
Mikulas Patockaaeb2dea2012-03-28 18:41:22 +0100984 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
Milan Broz91e10622007-12-13 14:16:10 +0000986 len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987
Milan Broz91e10622007-12-13 14:16:10 +0000988 if (!bio_add_page(clone, page, len, 0)) {
989 mempool_free(page, cc->page_pool);
990 break;
991 }
992
993 size -= len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 }
995
Kent Overstreet4f024f32013-10-11 15:44:27 -0700996 if (!clone->bi_iter.bi_size) {
Milan Broz8b004452006-10-03 01:15:37 -0700997 bio_put(clone);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 return NULL;
999 }
1000
Milan Broz8b004452006-10-03 01:15:37 -07001001 return clone;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002}
1003
Neil Brown644bd2f2007-10-16 13:48:46 +02001004static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005{
Neil Brown644bd2f2007-10-16 13:48:46 +02001006 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 struct bio_vec *bv;
1008
Kent Overstreetcb34e052012-09-05 15:22:02 -07001009 bio_for_each_segment_all(bv, clone, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 BUG_ON(!bv->bv_page);
1011 mempool_free(bv->bv_page, cc->page_pool);
1012 bv->bv_page = NULL;
1013 }
1014}
1015
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001016static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1017 struct bio *bio, sector_t sector)
Milan Brozdc440d1e2008-10-10 13:37:03 +01001018{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001019 io->cc = cc;
Milan Brozdc440d1e2008-10-10 13:37:03 +01001020 io->base_bio = bio;
1021 io->sector = sector;
1022 io->error = 0;
Milan Broz393b47e2008-10-21 17:45:02 +01001023 io->base_io = NULL;
Mikulas Patocka610f2de2014-02-20 18:01:01 -05001024 io->ctx.req = NULL;
Mikulas Patocka40b62292012-07-27 15:08:04 +01001025 atomic_set(&io->io_pending, 0);
Milan Brozdc440d1e2008-10-10 13:37:03 +01001026}
1027
Milan Broz3e1a8bd2008-10-10 13:37:02 +01001028static void crypt_inc_pending(struct dm_crypt_io *io)
1029{
Mikulas Patocka40b62292012-07-27 15:08:04 +01001030 atomic_inc(&io->io_pending);
Milan Broz3e1a8bd2008-10-10 13:37:02 +01001031}
1032
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033/*
1034 * One of the bios was finished. Check for completion of
1035 * the whole request and correctly clean up the buffer.
Milan Broz393b47e2008-10-21 17:45:02 +01001036 * If base_io is set, wait for the last fragment to complete.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 */
Milan Broz5742fd72008-02-08 02:10:43 +00001038static void crypt_dec_pending(struct dm_crypt_io *io)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001040 struct crypt_config *cc = io->cc;
Milan Brozb35f8ca2009-03-16 17:44:36 +00001041 struct bio *base_bio = io->base_bio;
1042 struct dm_crypt_io *base_io = io->base_io;
1043 int error = io->error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044
Mikulas Patocka40b62292012-07-27 15:08:04 +01001045 if (!atomic_dec_and_test(&io->io_pending))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 return;
1047
Mikulas Patocka610f2de2014-02-20 18:01:01 -05001048 if (io->ctx.req)
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001049 crypt_free_req(cc, io->ctx.req, base_bio);
1050 if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size))
1051 mempool_free(io, cc->io_pool);
Milan Brozb35f8ca2009-03-16 17:44:36 +00001052
1053 if (likely(!base_io))
1054 bio_endio(base_bio, error);
1055 else {
1056 if (error && !base_io->error)
1057 base_io->error = error;
1058 crypt_dec_pending(base_io);
1059 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060}
1061
1062/*
Milan Brozcabf08e2007-10-19 22:38:58 +01001063 * kcryptd/kcryptd_io:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 *
1065 * Needed because it would be very unwise to do decryption in an
Milan Broz23541d22006-10-03 01:15:39 -07001066 * interrupt context.
Milan Brozcabf08e2007-10-19 22:38:58 +01001067 *
1068 * kcryptd performs the actual encryption or decryption.
1069 *
1070 * kcryptd_io performs the IO submission.
1071 *
1072 * They must be separated as otherwise the final stages could be
1073 * starved by new requests which can block in the first stages due
1074 * to memory allocation.
Andi Kleenc0297722011-01-13 19:59:53 +00001075 *
1076 * The work is done per CPU global for all dm-crypt instances.
1077 * They should not depend on each other and do not block.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 */
NeilBrown6712ecf2007-09-27 12:47:43 +02001079static void crypt_endio(struct bio *clone, int error)
Milan Broz8b004452006-10-03 01:15:37 -07001080{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001081 struct dm_crypt_io *io = clone->bi_private;
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001082 struct crypt_config *cc = io->cc;
Milan Brozee7a4912008-02-08 02:10:46 +00001083 unsigned rw = bio_data_dir(clone);
Milan Broz8b004452006-10-03 01:15:37 -07001084
Milan Brozadfe4772007-12-13 14:15:51 +00001085 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
1086 error = -EIO;
1087
Milan Broz8b004452006-10-03 01:15:37 -07001088 /*
NeilBrown6712ecf2007-09-27 12:47:43 +02001089 * free the processed pages
Milan Broz8b004452006-10-03 01:15:37 -07001090 */
Milan Brozee7a4912008-02-08 02:10:46 +00001091 if (rw == WRITE)
Neil Brown644bd2f2007-10-16 13:48:46 +02001092 crypt_free_buffer_pages(cc, clone);
Milan Brozee7a4912008-02-08 02:10:46 +00001093
1094 bio_put(clone);
1095
1096 if (rw == READ && !error) {
1097 kcryptd_queue_crypt(io);
1098 return;
NeilBrown6712ecf2007-09-27 12:47:43 +02001099 }
Milan Broz8b004452006-10-03 01:15:37 -07001100
Milan Brozadfe4772007-12-13 14:15:51 +00001101 if (unlikely(error))
Milan Broz5742fd72008-02-08 02:10:43 +00001102 io->error = error;
1103
1104 crypt_dec_pending(io);
Milan Broz8b004452006-10-03 01:15:37 -07001105}
1106
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001107static void clone_init(struct dm_crypt_io *io, struct bio *clone)
Milan Broz8b004452006-10-03 01:15:37 -07001108{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001109 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -07001110
1111 clone->bi_private = io;
1112 clone->bi_end_io = crypt_endio;
1113 clone->bi_bdev = cc->dev->bdev;
1114 clone->bi_rw = io->base_bio->bi_rw;
1115}
1116
Milan Broz20c82532011-01-13 19:59:53 +00001117static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
Milan Broz8b004452006-10-03 01:15:37 -07001118{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001119 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -07001120 struct bio *base_bio = io->base_bio;
1121 struct bio *clone;
Milan Broz93e605c2006-10-03 01:15:38 -07001122
Milan Broz8b004452006-10-03 01:15:37 -07001123 /*
1124 * The block layer might modify the bvec array, so always
1125 * copy the required bvecs because we need the original
1126 * one in order to decrypt the whole bio data *afterwards*.
1127 */
Kent Overstreetbf800ef2012-09-06 15:35:02 -07001128 clone = bio_clone_bioset(base_bio, gfp, cc->bs);
Jens Axboe7eaceac2011-03-10 08:52:07 +01001129 if (!clone)
Milan Broz20c82532011-01-13 19:59:53 +00001130 return 1;
Milan Broz8b004452006-10-03 01:15:37 -07001131
Milan Broz20c82532011-01-13 19:59:53 +00001132 crypt_inc_pending(io);
1133
Milan Broz8b004452006-10-03 01:15:37 -07001134 clone_init(io, clone);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001135 clone->bi_iter.bi_sector = cc->start + io->sector;
Milan Broz8b004452006-10-03 01:15:37 -07001136
Milan Broz93e605c2006-10-03 01:15:38 -07001137 generic_make_request(clone);
Milan Broz20c82532011-01-13 19:59:53 +00001138 return 0;
Milan Broz8b004452006-10-03 01:15:37 -07001139}
1140
Milan Broz4e4eef62008-02-08 02:10:49 +00001141static void kcryptd_io_write(struct dm_crypt_io *io)
1142{
Milan Broz95497a92008-02-08 02:11:12 +00001143 struct bio *clone = io->ctx.bio_out;
Milan Broz95497a92008-02-08 02:11:12 +00001144 generic_make_request(clone);
Milan Broz4e4eef62008-02-08 02:10:49 +00001145}
1146
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001147static void kcryptd_io(struct work_struct *work)
1148{
1149 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1150
Milan Broz20c82532011-01-13 19:59:53 +00001151 if (bio_data_dir(io->base_bio) == READ) {
1152 crypt_inc_pending(io);
1153 if (kcryptd_io_read(io, GFP_NOIO))
1154 io->error = -ENOMEM;
1155 crypt_dec_pending(io);
1156 } else
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001157 kcryptd_io_write(io);
1158}
1159
1160static void kcryptd_queue_io(struct dm_crypt_io *io)
1161{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001162 struct crypt_config *cc = io->cc;
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001163
1164 INIT_WORK(&io->work, kcryptd_io);
1165 queue_work(cc->io_queue, &io->work);
1166}
1167
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001168static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
Milan Broz4e4eef62008-02-08 02:10:49 +00001169{
Milan Brozdec1ced2008-02-08 02:10:57 +00001170 struct bio *clone = io->ctx.bio_out;
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001171 struct crypt_config *cc = io->cc;
Milan Brozdec1ced2008-02-08 02:10:57 +00001172
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001173 if (unlikely(io->error < 0)) {
Milan Brozdec1ced2008-02-08 02:10:57 +00001174 crypt_free_buffer_pages(cc, clone);
1175 bio_put(clone);
Milan Broz6c031f42008-10-10 13:37:06 +01001176 crypt_dec_pending(io);
Milan Brozdec1ced2008-02-08 02:10:57 +00001177 return;
1178 }
1179
1180 /* crypt_convert should have filled the clone bio */
Kent Overstreet003b5c52013-10-11 15:45:43 -07001181 BUG_ON(io->ctx.iter_out.bi_size);
Milan Brozdec1ced2008-02-08 02:10:57 +00001182
Kent Overstreet4f024f32013-10-11 15:44:27 -07001183 clone->bi_iter.bi_sector = cc->start + io->sector;
Milan Broz899c95d2008-02-08 02:11:02 +00001184
Milan Broz95497a92008-02-08 02:11:12 +00001185 if (async)
1186 kcryptd_queue_io(io);
Alasdair G Kergon1e37bb82008-10-10 13:37:05 +01001187 else
Milan Broz95497a92008-02-08 02:11:12 +00001188 generic_make_request(clone);
Milan Broz4e4eef62008-02-08 02:10:49 +00001189}
1190
Milan Brozfc5a5e92008-10-10 13:37:04 +01001191static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
Milan Broz8b004452006-10-03 01:15:37 -07001192{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001193 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -07001194 struct bio *clone;
Milan Broz393b47e2008-10-21 17:45:02 +01001195 struct dm_crypt_io *new_io;
Milan Brozc8081612008-10-10 13:37:08 +01001196 int crypt_finished;
Milan Broz933f01d2008-10-10 13:37:08 +01001197 unsigned out_of_pages = 0;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001198 unsigned remaining = io->base_bio->bi_iter.bi_size;
Milan Brozb635b002008-10-21 17:45:00 +01001199 sector_t sector = io->sector;
Milan Brozdec1ced2008-02-08 02:10:57 +00001200 int r;
Milan Broz8b004452006-10-03 01:15:37 -07001201
Milan Broz93e605c2006-10-03 01:15:38 -07001202 /*
Milan Brozfc5a5e92008-10-10 13:37:04 +01001203 * Prevent io from disappearing until this function completes.
1204 */
1205 crypt_inc_pending(io);
Milan Brozb635b002008-10-21 17:45:00 +01001206 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
Milan Brozfc5a5e92008-10-10 13:37:04 +01001207
1208 /*
Milan Broz93e605c2006-10-03 01:15:38 -07001209 * The allocated buffers can be smaller than the whole bio,
1210 * so repeat the whole process until all the data can be handled.
1211 */
1212 while (remaining) {
Milan Broz933f01d2008-10-10 13:37:08 +01001213 clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
Milan Broz23541d22006-10-03 01:15:39 -07001214 if (unlikely(!clone)) {
Milan Broz5742fd72008-02-08 02:10:43 +00001215 io->error = -ENOMEM;
Milan Brozfc5a5e92008-10-10 13:37:04 +01001216 break;
Milan Broz23541d22006-10-03 01:15:39 -07001217 }
Milan Broz93e605c2006-10-03 01:15:38 -07001218
Milan Broz53017032008-02-08 02:10:38 +00001219 io->ctx.bio_out = clone;
Kent Overstreet003b5c52013-10-11 15:45:43 -07001220 io->ctx.iter_out = clone->bi_iter;
Milan Broz93e605c2006-10-03 01:15:38 -07001221
Kent Overstreet4f024f32013-10-11 15:44:27 -07001222 remaining -= clone->bi_iter.bi_size;
Milan Brozb635b002008-10-21 17:45:00 +01001223 sector += bio_sectors(clone);
Milan Brozdec1ced2008-02-08 02:10:57 +00001224
Milan Broz4e594092008-10-10 13:37:07 +01001225 crypt_inc_pending(io);
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001226
Milan Brozdec1ced2008-02-08 02:10:57 +00001227 r = crypt_convert(cc, &io->ctx);
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001228 if (r < 0)
1229 io->error = -EIO;
1230
Mikulas Patocka40b62292012-07-27 15:08:04 +01001231 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
Milan Brozdec1ced2008-02-08 02:10:57 +00001232
Milan Brozc8081612008-10-10 13:37:08 +01001233 /* Encryption was already finished, submit io now */
1234 if (crypt_finished) {
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001235 kcryptd_crypt_write_io_submit(io, 0);
Milan Brozc8081612008-10-10 13:37:08 +01001236
1237 /*
1238 * If there was an error, do not try next fragments.
1239 * For async, error is processed in async handler.
1240 */
Milan Broz6c031f42008-10-10 13:37:06 +01001241 if (unlikely(r < 0))
Milan Brozfc5a5e92008-10-10 13:37:04 +01001242 break;
Milan Brozb635b002008-10-21 17:45:00 +01001243
1244 io->sector = sector;
Milan Broz4e594092008-10-10 13:37:07 +01001245 }
Milan Broz93e605c2006-10-03 01:15:38 -07001246
Milan Broz933f01d2008-10-10 13:37:08 +01001247 /*
1248 * Out of memory -> run queues
1249 * But don't wait if split was due to the io size restriction
1250 */
1251 if (unlikely(out_of_pages))
Jens Axboe8aa7e842009-07-09 14:52:32 +02001252 congestion_wait(BLK_RW_ASYNC, HZ/100);
Milan Broz933f01d2008-10-10 13:37:08 +01001253
Milan Broz393b47e2008-10-21 17:45:02 +01001254 /*
1255 * With async crypto it is unsafe to share the crypto context
1256 * between fragments, so switch to a new dm_crypt_io structure.
1257 */
1258 if (unlikely(!crypt_finished && remaining)) {
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001259 new_io = mempool_alloc(cc->io_pool, GFP_NOIO);
1260 crypt_io_init(new_io, io->cc, io->base_bio, sector);
Milan Broz393b47e2008-10-21 17:45:02 +01001261 crypt_inc_pending(new_io);
1262 crypt_convert_init(cc, &new_io->ctx, NULL,
1263 io->base_bio, sector);
Kent Overstreet003b5c52013-10-11 15:45:43 -07001264 new_io->ctx.iter_in = io->ctx.iter_in;
Milan Broz393b47e2008-10-21 17:45:02 +01001265
1266 /*
1267 * Fragments after the first use the base_io
1268 * pending count.
1269 */
1270 if (!io->base_io)
1271 new_io->base_io = io;
1272 else {
1273 new_io->base_io = io->base_io;
1274 crypt_inc_pending(io->base_io);
1275 crypt_dec_pending(io);
1276 }
1277
1278 io = new_io;
1279 }
Milan Broz8b004452006-10-03 01:15:37 -07001280 }
Milan Broz899c95d2008-02-08 02:11:02 +00001281
1282 crypt_dec_pending(io);
Milan Broz84131db2008-02-08 02:10:59 +00001283}
1284
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001285static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
Milan Broz5742fd72008-02-08 02:10:43 +00001286{
Milan Broz5742fd72008-02-08 02:10:43 +00001287 crypt_dec_pending(io);
1288}
1289
Milan Broz4e4eef62008-02-08 02:10:49 +00001290static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
Milan Broz8b004452006-10-03 01:15:37 -07001291{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001292 struct crypt_config *cc = io->cc;
Milan Broz5742fd72008-02-08 02:10:43 +00001293 int r = 0;
Milan Broz8b004452006-10-03 01:15:37 -07001294
Milan Broz3e1a8bd2008-10-10 13:37:02 +01001295 crypt_inc_pending(io);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001296
Milan Broz53017032008-02-08 02:10:38 +00001297 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
Milan Broz0c395b02008-02-08 02:10:54 +00001298 io->sector);
Milan Broz8b004452006-10-03 01:15:37 -07001299
Milan Broz5742fd72008-02-08 02:10:43 +00001300 r = crypt_convert(cc, &io->ctx);
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001301 if (r < 0)
1302 io->error = -EIO;
Milan Broz5742fd72008-02-08 02:10:43 +00001303
Mikulas Patocka40b62292012-07-27 15:08:04 +01001304 if (atomic_dec_and_test(&io->ctx.cc_pending))
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001305 kcryptd_crypt_read_done(io);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001306
1307 crypt_dec_pending(io);
Milan Broz8b004452006-10-03 01:15:37 -07001308}
1309
Milan Broz95497a92008-02-08 02:11:12 +00001310static void kcryptd_async_done(struct crypto_async_request *async_req,
1311 int error)
1312{
Huang Yingb2174ee2009-03-16 17:44:33 +00001313 struct dm_crypt_request *dmreq = async_req->data;
1314 struct convert_context *ctx = dmreq->ctx;
Milan Broz95497a92008-02-08 02:11:12 +00001315 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001316 struct crypt_config *cc = io->cc;
Milan Broz95497a92008-02-08 02:11:12 +00001317
1318 if (error == -EINPROGRESS) {
1319 complete(&ctx->restart);
1320 return;
1321 }
1322
Milan Broz2dc53272011-01-13 19:59:54 +00001323 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1324 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1325
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001326 if (error < 0)
1327 io->error = -EIO;
1328
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001329 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
Milan Broz95497a92008-02-08 02:11:12 +00001330
Mikulas Patocka40b62292012-07-27 15:08:04 +01001331 if (!atomic_dec_and_test(&ctx->cc_pending))
Milan Broz95497a92008-02-08 02:11:12 +00001332 return;
1333
1334 if (bio_data_dir(io->base_bio) == READ)
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001335 kcryptd_crypt_read_done(io);
Milan Broz95497a92008-02-08 02:11:12 +00001336 else
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001337 kcryptd_crypt_write_io_submit(io, 1);
Milan Broz95497a92008-02-08 02:11:12 +00001338}
1339
Milan Broz4e4eef62008-02-08 02:10:49 +00001340static void kcryptd_crypt(struct work_struct *work)
1341{
1342 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1343
1344 if (bio_data_dir(io->base_bio) == READ)
1345 kcryptd_crypt_read_convert(io);
1346 else
1347 kcryptd_crypt_write_convert(io);
Milan Broz8b004452006-10-03 01:15:37 -07001348}
1349
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001350static void kcryptd_queue_crypt(struct dm_crypt_io *io)
1351{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001352 struct crypt_config *cc = io->cc;
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001353
1354 INIT_WORK(&io->work, kcryptd_crypt);
1355 queue_work(cc->crypt_queue, &io->work);
1356}
1357
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358/*
1359 * Decode key from its hex representation
1360 */
1361static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
1362{
1363 char buffer[3];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 unsigned int i;
1365
1366 buffer[2] = '\0';
1367
Milan Broz8b004452006-10-03 01:15:37 -07001368 for (i = 0; i < size; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 buffer[0] = *hex++;
1370 buffer[1] = *hex++;
1371
majianpeng1a66a082012-07-27 15:07:59 +01001372 if (kstrtou8(buffer, 16, &key[i]))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 return -EINVAL;
1374 }
1375
1376 if (*hex != '\0')
1377 return -EINVAL;
1378
1379 return 0;
1380}
1381
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001382static void crypt_free_tfms(struct crypt_config *cc)
Milan Brozd1f96422011-01-13 19:59:54 +00001383{
Milan Brozd1f96422011-01-13 19:59:54 +00001384 unsigned i;
1385
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001386 if (!cc->tfms)
1387 return;
1388
Milan Brozd1f96422011-01-13 19:59:54 +00001389 for (i = 0; i < cc->tfms_count; i++)
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001390 if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
1391 crypto_free_ablkcipher(cc->tfms[i]);
1392 cc->tfms[i] = NULL;
Milan Brozd1f96422011-01-13 19:59:54 +00001393 }
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001394
1395 kfree(cc->tfms);
1396 cc->tfms = NULL;
Milan Brozd1f96422011-01-13 19:59:54 +00001397}
1398
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001399static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
Milan Brozd1f96422011-01-13 19:59:54 +00001400{
Milan Brozd1f96422011-01-13 19:59:54 +00001401 unsigned i;
1402 int err;
1403
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001404 cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
1405 GFP_KERNEL);
1406 if (!cc->tfms)
1407 return -ENOMEM;
1408
Milan Brozd1f96422011-01-13 19:59:54 +00001409 for (i = 0; i < cc->tfms_count; i++) {
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001410 cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
1411 if (IS_ERR(cc->tfms[i])) {
1412 err = PTR_ERR(cc->tfms[i]);
1413 crypt_free_tfms(cc);
Milan Brozd1f96422011-01-13 19:59:54 +00001414 return err;
1415 }
1416 }
1417
1418 return 0;
1419}
1420
Andi Kleenc0297722011-01-13 19:59:53 +00001421static int crypt_setkey_allcpus(struct crypt_config *cc)
1422{
Milan Brozda31a072013-10-28 23:21:03 +01001423 unsigned subkey_size;
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001424 int err = 0, i, r;
Andi Kleenc0297722011-01-13 19:59:53 +00001425
Milan Brozda31a072013-10-28 23:21:03 +01001426 /* Ignore extra keys (which are used for IV etc) */
1427 subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
1428
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001429 for (i = 0; i < cc->tfms_count; i++) {
1430 r = crypto_ablkcipher_setkey(cc->tfms[i],
1431 cc->key + (i * subkey_size),
1432 subkey_size);
1433 if (r)
1434 err = r;
Andi Kleenc0297722011-01-13 19:59:53 +00001435 }
1436
1437 return err;
1438}
1439
Milan Broze48d4bb2006-10-03 01:15:37 -07001440static int crypt_set_key(struct crypt_config *cc, char *key)
1441{
Milan Brozde8be5a2011-03-24 13:54:27 +00001442 int r = -EINVAL;
1443 int key_string_len = strlen(key);
1444
Milan Broz69a8cfc2011-01-13 19:59:49 +00001445 /* The key size may not be changed. */
Milan Brozde8be5a2011-03-24 13:54:27 +00001446 if (cc->key_size != (key_string_len >> 1))
1447 goto out;
Milan Broze48d4bb2006-10-03 01:15:37 -07001448
Milan Broz69a8cfc2011-01-13 19:59:49 +00001449 /* Hyphen (which gives a key_size of zero) means there is no key. */
1450 if (!cc->key_size && strcmp(key, "-"))
Milan Brozde8be5a2011-03-24 13:54:27 +00001451 goto out;
Milan Broze48d4bb2006-10-03 01:15:37 -07001452
Milan Broz69a8cfc2011-01-13 19:59:49 +00001453 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
Milan Brozde8be5a2011-03-24 13:54:27 +00001454 goto out;
Milan Broze48d4bb2006-10-03 01:15:37 -07001455
1456 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1457
Milan Brozde8be5a2011-03-24 13:54:27 +00001458 r = crypt_setkey_allcpus(cc);
1459
1460out:
1461 /* Hex key string not needed after here, so wipe it. */
1462 memset(key, '0', key_string_len);
1463
1464 return r;
Milan Broze48d4bb2006-10-03 01:15:37 -07001465}
1466
1467static int crypt_wipe_key(struct crypt_config *cc)
1468{
1469 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1470 memset(&cc->key, 0, cc->key_size * sizeof(u8));
Andi Kleenc0297722011-01-13 19:59:53 +00001471
1472 return crypt_setkey_allcpus(cc);
Milan Broze48d4bb2006-10-03 01:15:37 -07001473}
1474
Milan Broz28513fc2010-08-12 04:14:06 +01001475static void crypt_dtr(struct dm_target *ti)
1476{
1477 struct crypt_config *cc = ti->private;
1478
1479 ti->private = NULL;
1480
1481 if (!cc)
1482 return;
1483
1484 if (cc->io_queue)
1485 destroy_workqueue(cc->io_queue);
1486 if (cc->crypt_queue)
1487 destroy_workqueue(cc->crypt_queue);
1488
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001489 crypt_free_tfms(cc);
1490
Milan Broz28513fc2010-08-12 04:14:06 +01001491 if (cc->bs)
1492 bioset_free(cc->bs);
1493
1494 if (cc->page_pool)
1495 mempool_destroy(cc->page_pool);
1496 if (cc->req_pool)
1497 mempool_destroy(cc->req_pool);
1498 if (cc->io_pool)
1499 mempool_destroy(cc->io_pool);
1500
1501 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1502 cc->iv_gen_ops->dtr(cc);
1503
Milan Broz28513fc2010-08-12 04:14:06 +01001504 if (cc->dev)
1505 dm_put_device(ti, cc->dev);
1506
Milan Broz5ebaee62010-08-12 04:14:07 +01001507 kzfree(cc->cipher);
Milan Broz7dbcd132011-01-13 19:59:52 +00001508 kzfree(cc->cipher_string);
Milan Broz28513fc2010-08-12 04:14:06 +01001509
1510 /* Must zero key material before freeing */
1511 kzfree(cc);
1512}
1513
Milan Broz5ebaee62010-08-12 04:14:07 +01001514static int crypt_ctr_cipher(struct dm_target *ti,
1515 char *cipher_in, char *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516{
Milan Broz5ebaee62010-08-12 04:14:07 +01001517 struct crypt_config *cc = ti->private;
Milan Brozd1f96422011-01-13 19:59:54 +00001518 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
Milan Broz5ebaee62010-08-12 04:14:07 +01001519 char *cipher_api = NULL;
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001520 int ret = -EINVAL;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001521 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522
Milan Broz5ebaee62010-08-12 04:14:07 +01001523 /* Convert to crypto api definition? */
1524 if (strchr(cipher_in, '(')) {
1525 ti->error = "Bad cipher specification";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 return -EINVAL;
1527 }
1528
Milan Broz7dbcd132011-01-13 19:59:52 +00001529 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
1530 if (!cc->cipher_string)
1531 goto bad_mem;
1532
Milan Broz5ebaee62010-08-12 04:14:07 +01001533 /*
1534 * Legacy dm-crypt cipher specification
Milan Brozd1f96422011-01-13 19:59:54 +00001535 * cipher[:keycount]-mode-iv:ivopts
Milan Broz5ebaee62010-08-12 04:14:07 +01001536 */
1537 tmp = cipher_in;
Milan Brozd1f96422011-01-13 19:59:54 +00001538 keycount = strsep(&tmp, "-");
1539 cipher = strsep(&keycount, ":");
1540
1541 if (!keycount)
1542 cc->tfms_count = 1;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001543 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
Milan Brozd1f96422011-01-13 19:59:54 +00001544 !is_power_of_2(cc->tfms_count)) {
1545 ti->error = "Bad cipher key count specification";
1546 return -EINVAL;
1547 }
1548 cc->key_parts = cc->tfms_count;
Milan Brozda31a072013-10-28 23:21:03 +01001549 cc->key_extra_size = 0;
Milan Broz5ebaee62010-08-12 04:14:07 +01001550
1551 cc->cipher = kstrdup(cipher, GFP_KERNEL);
1552 if (!cc->cipher)
1553 goto bad_mem;
1554
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 chainmode = strsep(&tmp, "-");
1556 ivopts = strsep(&tmp, "-");
1557 ivmode = strsep(&ivopts, ":");
1558
1559 if (tmp)
Milan Broz5ebaee62010-08-12 04:14:07 +01001560 DMWARN("Ignoring unexpected additional cipher options");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561
Milan Broz7dbcd132011-01-13 19:59:52 +00001562 /*
1563 * For compatibility with the original dm-crypt mapping format, if
1564 * only the cipher name is supplied, use cbc-plain.
1565 */
Milan Broz5ebaee62010-08-12 04:14:07 +01001566 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 chainmode = "cbc";
1568 ivmode = "plain";
1569 }
1570
Herbert Xud1806f62006-08-22 20:29:17 +10001571 if (strcmp(chainmode, "ecb") && !ivmode) {
Milan Broz5ebaee62010-08-12 04:14:07 +01001572 ti->error = "IV mechanism required";
1573 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 }
1575
Milan Broz5ebaee62010-08-12 04:14:07 +01001576 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
1577 if (!cipher_api)
1578 goto bad_mem;
1579
1580 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
1581 "%s(%s)", chainmode, cipher);
1582 if (ret < 0) {
1583 kfree(cipher_api);
1584 goto bad_mem;
Herbert Xud1806f62006-08-22 20:29:17 +10001585 }
1586
Milan Broz5ebaee62010-08-12 04:14:07 +01001587 /* Allocate cipher */
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001588 ret = crypt_alloc_tfms(cc, cipher_api);
1589 if (ret < 0) {
1590 ti->error = "Error allocating crypto tfm";
1591 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593
Milan Broz5ebaee62010-08-12 04:14:07 +01001594 /* Initialize IV */
Andi Kleenc0297722011-01-13 19:59:53 +00001595 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
Milan Broz5ebaee62010-08-12 04:14:07 +01001596 if (cc->iv_size)
1597 /* at least a 64 bit sector number should fit in our buffer */
1598 cc->iv_size = max(cc->iv_size,
1599 (unsigned int)(sizeof(u64) / sizeof(u8)));
1600 else if (ivmode) {
1601 DMWARN("Selected cipher does not support IVs");
1602 ivmode = NULL;
1603 }
1604
1605 /* Choose ivmode, see comments at iv code. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 if (ivmode == NULL)
1607 cc->iv_gen_ops = NULL;
1608 else if (strcmp(ivmode, "plain") == 0)
1609 cc->iv_gen_ops = &crypt_iv_plain_ops;
Milan Broz61afef62009-12-10 23:52:25 +00001610 else if (strcmp(ivmode, "plain64") == 0)
1611 cc->iv_gen_ops = &crypt_iv_plain64_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612 else if (strcmp(ivmode, "essiv") == 0)
1613 cc->iv_gen_ops = &crypt_iv_essiv_ops;
Rik Snel48527fa2006-09-03 08:56:39 +10001614 else if (strcmp(ivmode, "benbi") == 0)
1615 cc->iv_gen_ops = &crypt_iv_benbi_ops;
Ludwig Nussel46b47732007-05-09 02:32:55 -07001616 else if (strcmp(ivmode, "null") == 0)
1617 cc->iv_gen_ops = &crypt_iv_null_ops;
Milan Broz34745782011-01-13 19:59:55 +00001618 else if (strcmp(ivmode, "lmk") == 0) {
1619 cc->iv_gen_ops = &crypt_iv_lmk_ops;
Milan Brozed04d982013-10-28 23:21:04 +01001620 /*
1621 * Version 2 and 3 is recognised according
Milan Broz34745782011-01-13 19:59:55 +00001622 * to length of provided multi-key string.
1623 * If present (version 3), last key is used as IV seed.
Milan Brozed04d982013-10-28 23:21:04 +01001624 * All keys (including IV seed) are always the same size.
Milan Broz34745782011-01-13 19:59:55 +00001625 */
Milan Brozda31a072013-10-28 23:21:03 +01001626 if (cc->key_size % cc->key_parts) {
Milan Broz34745782011-01-13 19:59:55 +00001627 cc->key_parts++;
Milan Brozda31a072013-10-28 23:21:03 +01001628 cc->key_extra_size = cc->key_size / cc->key_parts;
1629 }
Milan Brozed04d982013-10-28 23:21:04 +01001630 } else if (strcmp(ivmode, "tcw") == 0) {
1631 cc->iv_gen_ops = &crypt_iv_tcw_ops;
1632 cc->key_parts += 2; /* IV + whitening */
1633 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
Milan Broz34745782011-01-13 19:59:55 +00001634 } else {
Milan Broz5ebaee62010-08-12 04:14:07 +01001635 ret = -EINVAL;
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001636 ti->error = "Invalid IV mode";
Milan Broz28513fc2010-08-12 04:14:06 +01001637 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 }
1639
Milan Brozda31a072013-10-28 23:21:03 +01001640 /* Initialize and set key */
1641 ret = crypt_set_key(cc, key);
1642 if (ret < 0) {
1643 ti->error = "Error decoding and setting key";
1644 goto bad;
1645 }
1646
Milan Broz28513fc2010-08-12 04:14:06 +01001647 /* Allocate IV */
1648 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
1649 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
1650 if (ret < 0) {
1651 ti->error = "Error creating IV";
1652 goto bad;
1653 }
Milan Brozb95bf2d2009-12-10 23:51:56 +00001654 }
1655
Milan Broz28513fc2010-08-12 04:14:06 +01001656 /* Initialize IV (set keys for ESSIV etc) */
1657 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
1658 ret = cc->iv_gen_ops->init(cc);
1659 if (ret < 0) {
1660 ti->error = "Error initialising IV";
1661 goto bad;
1662 }
1663 }
1664
Milan Broz5ebaee62010-08-12 04:14:07 +01001665 ret = 0;
1666bad:
1667 kfree(cipher_api);
1668 return ret;
1669
1670bad_mem:
1671 ti->error = "Cannot allocate cipher strings";
1672 return -ENOMEM;
1673}
1674
1675/*
1676 * Construct an encryption mapping:
1677 * <cipher> <key> <iv_offset> <dev_path> <start>
1678 */
1679static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1680{
1681 struct crypt_config *cc;
Milan Broz772ae5f2011-08-02 12:32:08 +01001682 unsigned int key_size, opt_params;
Milan Broz5ebaee62010-08-12 04:14:07 +01001683 unsigned long long tmpll;
1684 int ret;
Mikulas Patockad49ec522014-08-28 11:09:31 -04001685 size_t iv_size_padding;
Milan Broz772ae5f2011-08-02 12:32:08 +01001686 struct dm_arg_set as;
1687 const char *opt_string;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001688 char dummy;
Milan Broz5ebaee62010-08-12 04:14:07 +01001689
Milan Broz772ae5f2011-08-02 12:32:08 +01001690 static struct dm_arg _args[] = {
1691 {0, 1, "Invalid number of feature args"},
1692 };
1693
1694 if (argc < 5) {
Milan Broz5ebaee62010-08-12 04:14:07 +01001695 ti->error = "Not enough arguments";
1696 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 }
1698
Milan Broz5ebaee62010-08-12 04:14:07 +01001699 key_size = strlen(argv[1]) >> 1;
1700
1701 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1702 if (!cc) {
1703 ti->error = "Cannot allocate encryption context";
1704 return -ENOMEM;
1705 }
Milan Broz69a8cfc2011-01-13 19:59:49 +00001706 cc->key_size = key_size;
Milan Broz5ebaee62010-08-12 04:14:07 +01001707
1708 ti->private = cc;
1709 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
1710 if (ret < 0)
1711 goto bad;
1712
Milan Broz28513fc2010-08-12 04:14:06 +01001713 ret = -ENOMEM;
Matthew Dobson93d23412006-03-26 01:37:50 -08001714 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 if (!cc->io_pool) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001716 ti->error = "Cannot allocate crypt io mempool";
Milan Broz28513fc2010-08-12 04:14:06 +01001717 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 }
1719
Milan Brozddd42ed2008-02-08 02:11:07 +00001720 cc->dmreq_start = sizeof(struct ablkcipher_request);
Andi Kleenc0297722011-01-13 19:59:53 +00001721 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
Mikulas Patockad49ec522014-08-28 11:09:31 -04001722 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
1723
1724 if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
1725 /* Allocate the padding exactly */
1726 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
1727 & crypto_ablkcipher_alignmask(any_tfm(cc));
1728 } else {
1729 /*
1730 * If the cipher requires greater alignment than kmalloc
1731 * alignment, we don't know the exact position of the
1732 * initialization vector. We must assume worst case.
1733 */
1734 iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
1735 }
Milan Brozddd42ed2008-02-08 02:11:07 +00001736
1737 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
Mikulas Patockad49ec522014-08-28 11:09:31 -04001738 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
Milan Brozddd42ed2008-02-08 02:11:07 +00001739 if (!cc->req_pool) {
1740 ti->error = "Cannot allocate crypt request mempool";
Milan Broz28513fc2010-08-12 04:14:06 +01001741 goto bad;
Milan Brozddd42ed2008-02-08 02:11:07 +00001742 }
Milan Brozddd42ed2008-02-08 02:11:07 +00001743
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001744 cc->per_bio_data_size = ti->per_bio_data_size =
Mikulas Patockad49ec522014-08-28 11:09:31 -04001745 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start +
1746 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
1747 ARCH_KMALLOC_MINALIGN);
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001748
Matthew Dobsona19b27c2006-03-26 01:37:45 -08001749 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 if (!cc->page_pool) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001751 ti->error = "Cannot allocate page mempool";
Milan Broz28513fc2010-08-12 04:14:06 +01001752 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 }
1754
Jens Axboebb799ca2008-12-10 15:35:05 +01001755 cc->bs = bioset_create(MIN_IOS, 0);
Milan Broz6a24c712006-10-03 01:15:40 -07001756 if (!cc->bs) {
1757 ti->error = "Cannot allocate crypt bioset";
Milan Broz28513fc2010-08-12 04:14:06 +01001758 goto bad;
Milan Broz6a24c712006-10-03 01:15:40 -07001759 }
1760
Milan Broz28513fc2010-08-12 04:14:06 +01001761 ret = -EINVAL;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001762 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001763 ti->error = "Invalid iv_offset sector";
Milan Broz28513fc2010-08-12 04:14:06 +01001764 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 }
Andrew Morton4ee218c2006-03-27 01:17:48 -08001766 cc->iv_offset = tmpll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767
Milan Broz28513fc2010-08-12 04:14:06 +01001768 if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
1769 ti->error = "Device lookup failed";
1770 goto bad;
1771 }
1772
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001773 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001774 ti->error = "Invalid device sector";
Milan Broz28513fc2010-08-12 04:14:06 +01001775 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 }
Andrew Morton4ee218c2006-03-27 01:17:48 -08001777 cc->start = tmpll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778
Milan Broz772ae5f2011-08-02 12:32:08 +01001779 argv += 5;
1780 argc -= 5;
1781
1782 /* Optional parameters */
1783 if (argc) {
1784 as.argc = argc;
1785 as.argv = argv;
1786
1787 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
1788 if (ret)
1789 goto bad;
1790
1791 opt_string = dm_shift_arg(&as);
1792
1793 if (opt_params == 1 && opt_string &&
1794 !strcasecmp(opt_string, "allow_discards"))
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001795 ti->num_discard_bios = 1;
Milan Broz772ae5f2011-08-02 12:32:08 +01001796 else if (opt_params) {
1797 ret = -EINVAL;
1798 ti->error = "Invalid feature arguments";
1799 goto bad;
1800 }
1801 }
1802
Milan Broz28513fc2010-08-12 04:14:06 +01001803 ret = -ENOMEM;
Tejun Heo670368a2013-07-30 08:40:21 -04001804 cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
Milan Brozcabf08e2007-10-19 22:38:58 +01001805 if (!cc->io_queue) {
1806 ti->error = "Couldn't create kcryptd io queue";
Milan Broz28513fc2010-08-12 04:14:06 +01001807 goto bad;
Milan Brozcabf08e2007-10-19 22:38:58 +01001808 }
1809
Andi Kleenc0297722011-01-13 19:59:53 +00001810 cc->crypt_queue = alloc_workqueue("kcryptd",
Tejun Heo670368a2013-07-30 08:40:21 -04001811 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
Milan Brozcabf08e2007-10-19 22:38:58 +01001812 if (!cc->crypt_queue) {
Milan Broz9934a8b2007-10-19 22:38:57 +01001813 ti->error = "Couldn't create kcryptd queue";
Milan Broz28513fc2010-08-12 04:14:06 +01001814 goto bad;
Milan Broz9934a8b2007-10-19 22:38:57 +01001815 }
1816
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001817 ti->num_flush_bios = 1;
Alasdair G Kergon0ac55482012-07-27 15:08:08 +01001818 ti->discard_zeroes_data_unsupported = true;
Milan Broz983c7db2011-09-25 23:26:21 +01001819
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 return 0;
1821
Milan Broz28513fc2010-08-12 04:14:06 +01001822bad:
1823 crypt_dtr(ti);
1824 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825}
1826
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00001827static int crypt_map(struct dm_target *ti, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001829 struct dm_crypt_io *io;
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001830 struct crypt_config *cc = ti->private;
Mikulas Patocka647c7db2009-06-22 10:12:23 +01001831
Milan Broz772ae5f2011-08-02 12:32:08 +01001832 /*
1833 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
1834 * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
1835 * - for REQ_DISCARD caller must use flush if IO ordering matters
1836 */
1837 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
Mikulas Patocka647c7db2009-06-22 10:12:23 +01001838 bio->bi_bdev = cc->dev->bdev;
Milan Broz772ae5f2011-08-02 12:32:08 +01001839 if (bio_sectors(bio))
Kent Overstreet4f024f32013-10-11 15:44:27 -07001840 bio->bi_iter.bi_sector = cc->start +
1841 dm_target_offset(ti, bio->bi_iter.bi_sector);
Mikulas Patocka647c7db2009-06-22 10:12:23 +01001842 return DM_MAPIO_REMAPPED;
1843 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001845 io = dm_per_bio_data(bio, cc->per_bio_data_size);
1846 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
1847 io->ctx.req = (struct ablkcipher_request *)(io + 1);
Milan Brozcabf08e2007-10-19 22:38:58 +01001848
Milan Broz20c82532011-01-13 19:59:53 +00001849 if (bio_data_dir(io->base_bio) == READ) {
1850 if (kcryptd_io_read(io, GFP_NOWAIT))
1851 kcryptd_queue_io(io);
1852 } else
Milan Brozcabf08e2007-10-19 22:38:58 +01001853 kcryptd_queue_crypt(io);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001855 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856}
1857
Mikulas Patockafd7c0922013-03-01 22:45:44 +00001858static void crypt_status(struct dm_target *ti, status_type_t type,
1859 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860{
Milan Broz5ebaee62010-08-12 04:14:07 +01001861 struct crypt_config *cc = ti->private;
Mikulas Patockafd7c0922013-03-01 22:45:44 +00001862 unsigned i, sz = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863
1864 switch (type) {
1865 case STATUSTYPE_INFO:
1866 result[0] = '\0';
1867 break;
1868
1869 case STATUSTYPE_TABLE:
Milan Broz7dbcd132011-01-13 19:59:52 +00001870 DMEMIT("%s ", cc->cipher_string);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871
Mikulas Patockafd7c0922013-03-01 22:45:44 +00001872 if (cc->key_size > 0)
1873 for (i = 0; i < cc->key_size; i++)
1874 DMEMIT("%02x", cc->key[i]);
1875 else
1876 DMEMIT("-");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877
Andrew Morton4ee218c2006-03-27 01:17:48 -08001878 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1879 cc->dev->name, (unsigned long long)cc->start);
Milan Broz772ae5f2011-08-02 12:32:08 +01001880
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001881 if (ti->num_discard_bios)
Milan Broz772ae5f2011-08-02 12:32:08 +01001882 DMEMIT(" 1 allow_discards");
1883
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 break;
1885 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886}
1887
Milan Broze48d4bb2006-10-03 01:15:37 -07001888static void crypt_postsuspend(struct dm_target *ti)
1889{
1890 struct crypt_config *cc = ti->private;
1891
1892 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1893}
1894
1895static int crypt_preresume(struct dm_target *ti)
1896{
1897 struct crypt_config *cc = ti->private;
1898
1899 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1900 DMERR("aborting resume - crypt key is not set.");
1901 return -EAGAIN;
1902 }
1903
1904 return 0;
1905}
1906
1907static void crypt_resume(struct dm_target *ti)
1908{
1909 struct crypt_config *cc = ti->private;
1910
1911 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1912}
1913
1914/* Message interface
1915 * key set <key>
1916 * key wipe
1917 */
1918static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1919{
1920 struct crypt_config *cc = ti->private;
Milan Broz542da312009-12-10 23:51:57 +00001921 int ret = -EINVAL;
Milan Broze48d4bb2006-10-03 01:15:37 -07001922
1923 if (argc < 2)
1924 goto error;
1925
Mike Snitzer498f0102011-08-02 12:32:04 +01001926 if (!strcasecmp(argv[0], "key")) {
Milan Broze48d4bb2006-10-03 01:15:37 -07001927 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
1928 DMWARN("not suspended during key manipulation.");
1929 return -EINVAL;
1930 }
Mike Snitzer498f0102011-08-02 12:32:04 +01001931 if (argc == 3 && !strcasecmp(argv[1], "set")) {
Milan Broz542da312009-12-10 23:51:57 +00001932 ret = crypt_set_key(cc, argv[2]);
1933 if (ret)
1934 return ret;
1935 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
1936 ret = cc->iv_gen_ops->init(cc);
1937 return ret;
1938 }
Mike Snitzer498f0102011-08-02 12:32:04 +01001939 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
Milan Broz542da312009-12-10 23:51:57 +00001940 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
1941 ret = cc->iv_gen_ops->wipe(cc);
1942 if (ret)
1943 return ret;
1944 }
Milan Broze48d4bb2006-10-03 01:15:37 -07001945 return crypt_wipe_key(cc);
Milan Broz542da312009-12-10 23:51:57 +00001946 }
Milan Broze48d4bb2006-10-03 01:15:37 -07001947 }
1948
1949error:
1950 DMWARN("unrecognised message received.");
1951 return -EINVAL;
1952}
1953
Milan Brozd41e26b2008-07-21 12:00:40 +01001954static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
1955 struct bio_vec *biovec, int max_size)
1956{
1957 struct crypt_config *cc = ti->private;
1958 struct request_queue *q = bdev_get_queue(cc->dev->bdev);
1959
1960 if (!q->merge_bvec_fn)
1961 return max_size;
1962
1963 bvm->bi_bdev = cc->dev->bdev;
Alasdair G Kergonb441a2622010-08-12 04:14:11 +01001964 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
Milan Brozd41e26b2008-07-21 12:00:40 +01001965
1966 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
1967}
1968
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001969static int crypt_iterate_devices(struct dm_target *ti,
1970 iterate_devices_callout_fn fn, void *data)
1971{
1972 struct crypt_config *cc = ti->private;
1973
Mike Snitzer5dea2712009-07-23 20:30:42 +01001974 return fn(ti, cc->dev, cc->start, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001975}
1976
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977static struct target_type crypt_target = {
1978 .name = "crypt",
Milan Brozed04d982013-10-28 23:21:04 +01001979 .version = {1, 13, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 .module = THIS_MODULE,
1981 .ctr = crypt_ctr,
1982 .dtr = crypt_dtr,
1983 .map = crypt_map,
1984 .status = crypt_status,
Milan Broze48d4bb2006-10-03 01:15:37 -07001985 .postsuspend = crypt_postsuspend,
1986 .preresume = crypt_preresume,
1987 .resume = crypt_resume,
1988 .message = crypt_message,
Milan Brozd41e26b2008-07-21 12:00:40 +01001989 .merge = crypt_merge,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001990 .iterate_devices = crypt_iterate_devices,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991};
1992
1993static int __init dm_crypt_init(void)
1994{
1995 int r;
1996
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001997 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 if (!_crypt_io_pool)
1999 return -ENOMEM;
2000
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 r = dm_register_target(&crypt_target);
2002 if (r < 0) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07002003 DMERR("register failed %d", r);
Milan Broz9934a8b2007-10-19 22:38:57 +01002004 kmem_cache_destroy(_crypt_io_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 }
2006
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 return r;
2008}
2009
2010static void __exit dm_crypt_exit(void)
2011{
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00002012 dm_unregister_target(&crypt_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 kmem_cache_destroy(_crypt_io_pool);
2014}
2015
2016module_init(dm_crypt_init);
2017module_exit(dm_crypt_exit);
2018
Jana Saoutbf142992014-06-24 14:27:04 -04002019MODULE_AUTHOR("Jana Saout <jana@saout.de>");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
2021MODULE_LICENSE("GPL");