blob: a8f0d18c9a8468e0b296c0324b4831e9f6ad326a [file] [log] [blame]
Dinesh K Garg93b3cb02014-01-13 13:55:00 -08001/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 and
4 * only version 2 as published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 */
11
12#include <linux/completion.h>
13#include <linux/err.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/kernel.h>
17#include <linux/bio.h>
18#include <linux/blkdev.h>
19#include <linux/mempool.h>
20#include <linux/slab.h>
21#include <linux/crypto.h>
22#include <linux/workqueue.h>
23#include <linux/backing-dev.h>
24#include <linux/atomic.h>
25#include <linux/scatterlist.h>
Amir Samuelov5c79cd22014-06-18 18:49:19 +030026#include <linux/device-mapper.h>
27#include <linux/printk.h>
28#include <linux/pft.h>
29
Dinesh K Garg93b3cb02014-01-13 13:55:00 -080030#include <crypto/scatterwalk.h>
31#include <asm/page.h>
32#include <asm/unaligned.h>
33#include <crypto/hash.h>
34#include <crypto/md5.h>
35#include <crypto/algapi.h>
36#include <mach/qcrypto.h>
37
Dinesh K Garg93b3cb02014-01-13 13:55:00 -080038#define DM_MSG_PREFIX "req-crypt"
39
40#define MAX_SG_LIST 1024
41#define REQ_DM_512_KB (512*1024)
42#define MAX_ENCRYPTION_BUFFERS 1
43#define MIN_IOS 16
44#define MIN_POOL_PAGES 32
Dinesh K Garg3fb98292014-02-26 11:26:43 -080045#define KEY_SIZE_XTS 64
Dinesh K Garg93b3cb02014-01-13 13:55:00 -080046#define AES_XTS_IV_LEN 16
47
48#define DM_REQ_CRYPT_ERROR -1
Dinesh K Gargf32b37b2014-02-26 11:32:25 -080049#define DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC -2
Dinesh K Garg93b3cb02014-01-13 13:55:00 -080050
51struct req_crypt_result {
52 struct completion completion;
53 int err;
54};
55
Amir Samuelov5c79cd22014-06-18 18:49:19 +030056#define FDE_KEY_ID 0
57#define PFE_KEY_ID 1
58
59static struct dm_dev *dev;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -080060static struct kmem_cache *_req_crypt_io_pool;
Amir Samuelov5c79cd22014-06-18 18:49:19 +030061static sector_t start_sector_orig;
62static struct workqueue_struct *req_crypt_queue;
63static mempool_t *req_io_pool;
64static mempool_t *req_page_pool;
65static bool is_fde_enabled;
66static struct crypto_ablkcipher *tfm;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -080067
68struct req_dm_crypt_io {
69 struct work_struct work;
70 struct request *cloned_request;
71 int error;
72 atomic_t pending;
73 struct timespec start_time;
Amir Samuelov5c79cd22014-06-18 18:49:19 +030074 bool should_encrypt;
75 bool should_decrypt;
76 u32 key_id;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -080077};
78
79static void req_crypt_cipher_complete
80 (struct crypto_async_request *req, int err);
81
82
Amir Samuelov5c79cd22014-06-18 18:49:19 +030083static bool req_crypt_should_encrypt(struct req_dm_crypt_io *req)
84{
85 int ret;
86 bool should_encrypt = false;
87 struct bio *bio = NULL;
Amir Samuelov5c79cd22014-06-18 18:49:19 +030088 u32 key_id = 0;
89 bool is_encrypted = false;
90 bool is_inplace = false;
91
92 if (!req || !req->cloned_request || !req->cloned_request->bio)
93 return false;
94
95 bio = req->cloned_request->bio;
96
Amir Samuelovacc2a3b2014-06-22 16:43:52 +030097 ret = pft_get_key_index(bio, &key_id, &is_encrypted, &is_inplace);
Amir Samuelov5c79cd22014-06-18 18:49:19 +030098 /* req->key_id = key_id; @todo support more than 1 pfe key */
99 if ((ret == 0) && (is_encrypted || is_inplace)) {
100 should_encrypt = true;
101 req->key_id = PFE_KEY_ID;
102 } else if (is_fde_enabled) {
103 should_encrypt = true;
104 req->key_id = FDE_KEY_ID;
105 }
106
107 return should_encrypt;
108}
109
110static bool req_crypt_should_deccrypt(struct req_dm_crypt_io *req)
111{
112 int ret;
113 bool should_deccrypt = false;
114 struct bio *bio = NULL;
Amir Samuelov5c79cd22014-06-18 18:49:19 +0300115 u32 key_id = 0;
116 bool is_encrypted = false;
117 bool is_inplace = false;
118
119 if (!req || !req->cloned_request || !req->cloned_request->bio)
120 return false;
121
122 bio = req->cloned_request->bio;
123
Amir Samuelovacc2a3b2014-06-22 16:43:52 +0300124 ret = pft_get_key_index(bio, &key_id, &is_encrypted, &is_inplace);
Amir Samuelov5c79cd22014-06-18 18:49:19 +0300125 /* req->key_id = key_id; @todo support more than 1 pfe key */
126 if ((ret == 0) && (is_encrypted && !is_inplace)) {
127 should_deccrypt = true;
128 req->key_id = PFE_KEY_ID;
129 } else if (is_fde_enabled) {
130 should_deccrypt = true;
131 req->key_id = FDE_KEY_ID;
132 }
133
134 return should_deccrypt;
135}
136
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800137static void req_crypt_inc_pending(struct req_dm_crypt_io *io)
138{
139 atomic_inc(&io->pending);
140}
141
142static void req_crypt_dec_pending_encrypt(struct req_dm_crypt_io *io)
143{
144 int error = 0;
145 struct request *clone = NULL;
146
147 if (io) {
148 error = io->error;
149 if (io->cloned_request) {
150 clone = io->cloned_request;
151 } else {
152 DMERR("%s io->cloned_request is NULL\n",
153 __func__);
154 /*
155 * If Clone is NULL we cannot do anything,
156 * this should never happen
157 */
158 BUG();
159 }
160 } else {
161 DMERR("%s io is NULL\n", __func__);
162 /*
163 * If Clone is NULL we cannot do anything,
164 * this should never happen
165 */
166 BUG();
167 }
168
169 atomic_dec(&io->pending);
170
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800171 if (error < 0) {
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800172 dm_kill_unmapped_request(clone, error);
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800173 mempool_free(io, req_io_pool);
174 } else
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800175 dm_dispatch_request(clone);
176}
177
178static void req_crypt_dec_pending_decrypt(struct req_dm_crypt_io *io)
179{
180 int error = 0;
181 struct request *clone = NULL;
182
183 if (io) {
184 error = io->error;
185 if (io->cloned_request) {
186 clone = io->cloned_request;
187 } else {
188 DMERR("%s io->cloned_request is NULL\n",
189 __func__);
190 /*
191 * If Clone is NULL we cannot do anything,
192 * this should never happen
193 */
194 BUG();
195 }
196 } else {
197 DMERR("%s io is NULL\n",
198 __func__);
199 /*
200 * If Clone is NULL we cannot do anything,
201 * this should never happen
202 */
203 BUG();
204 }
205
206 /* Should never get here if io or Clone is NULL */
207 dm_end_request(clone, error);
208 atomic_dec(&io->pending);
209 mempool_free(io, req_io_pool);
210}
211
212/*
213 * The callback that will be called by the worker queue to perform Decryption
214 * for reads and use the dm function to complete the bios and requests.
215 */
216static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io)
217{
218 struct request *clone = NULL;
219 int error = 0;
220 int total_sg_len = 0, rc = 0, total_bytes_in_req = 0;
221 struct ablkcipher_request *req = NULL;
222 struct req_crypt_result result;
223 struct scatterlist *req_sg_read = NULL;
224 int err = 0;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800225 u8 IV[AES_XTS_IV_LEN];
226
227 if (io) {
228 error = io->error;
229 if (io->cloned_request) {
230 clone = io->cloned_request;
231 } else {
232 DMERR("%s io->cloned_request is NULL\n",
233 __func__);
234 error = DM_REQ_CRYPT_ERROR;
235 goto submit_request;
236 }
237 } else {
238 DMERR("%s io is NULL\n",
239 __func__);
240 error = DM_REQ_CRYPT_ERROR;
241 goto submit_request;
242 }
243
244 req_crypt_inc_pending(io);
245
246 if (error != 0) {
247 err = error;
248 goto submit_request;
249 }
250
251 req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
252 if (!req) {
253 DMERR("%s ablkcipher request allocation failed\n", __func__);
254 err = DM_REQ_CRYPT_ERROR;
255 goto ablkcipher_req_alloc_failure;
256 }
257
258 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
259 req_crypt_cipher_complete, &result);
260 init_completion(&result.completion);
Amir Samuelov5c79cd22014-06-18 18:49:19 +0300261 err = qcrypto_cipher_set_device(req, io->key_id);
Dinesh K Garg5cbf6b02014-05-15 14:46:31 -0700262 if (err != 0) {
263 DMERR("%s qcrypto_cipher_set_device failed with err %d\n",
264 __func__, err);
265 error = DM_REQ_CRYPT_ERROR;
266 goto ablkcipher_req_alloc_failure;
267 }
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800268 qcrypto_cipher_set_flag(req,
269 QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
270 crypto_ablkcipher_clear_flags(tfm, ~0);
271 crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
272
273 req_sg_read = kzalloc(sizeof(struct scatterlist) *
274 MAX_SG_LIST, GFP_KERNEL);
275 if (!req_sg_read) {
276 DMERR("%s req_sg_read allocation failed\n",
277 __func__);
278 err = DM_REQ_CRYPT_ERROR;
279 goto ablkcipher_req_alloc_failure;
280 }
281
282 total_sg_len = blk_rq_map_sg(clone->q, clone, req_sg_read);
283 if ((total_sg_len <= 0) || (total_sg_len > MAX_SG_LIST)) {
284 DMERR("%s Request Error%d", __func__, total_sg_len);
285 err = DM_REQ_CRYPT_ERROR;
286 goto ablkcipher_req_alloc_failure;
287 }
288
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800289 total_bytes_in_req = clone->__data_len;
290 if (total_bytes_in_req > REQ_DM_512_KB) {
291 DMERR("%s total_bytes_in_req > 512 MB %d",
292 __func__, total_bytes_in_req);
293 err = DM_REQ_CRYPT_ERROR;
294 goto ablkcipher_req_alloc_failure;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800295 }
296
297 memset(IV, 0, AES_XTS_IV_LEN);
298 memcpy(IV, &clone->__sector, sizeof(sector_t));
299
300 ablkcipher_request_set_crypt(req, req_sg_read, req_sg_read,
301 total_bytes_in_req, (void *) IV);
302
303 rc = crypto_ablkcipher_decrypt(req);
304
305 switch (rc) {
306 case 0:
307 break;
308
309 case -EBUSY:
310 /*
311 * Lets make this synchronous request by waiting on
312 * in progress as well
313 */
314 case -EINPROGRESS:
315 wait_for_completion_io(&result.completion);
316 if (result.err) {
317 DMERR("%s error = %d encrypting the request\n",
318 __func__, result.err);
319 err = DM_REQ_CRYPT_ERROR;
320 }
321 break;
322
323 default:
324 err = DM_REQ_CRYPT_ERROR;
325 break;
326 }
327
328ablkcipher_req_alloc_failure:
329
330 if (req)
331 ablkcipher_request_free(req);
332
333 kfree(req_sg_read);
334
335submit_request:
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800336 if (io)
337 io->error = err;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800338 req_crypt_dec_pending_decrypt(io);
339}
340
341/*
Amir Samuelov5c79cd22014-06-18 18:49:19 +0300342 * This callback is called by the worker queue to perform non-decrypt reads
343 * and use the dm function to complete the bios and requests.
344 */
345static void req_cryptd_crypt_read_plain(struct req_dm_crypt_io *io)
346{
347 struct request *clone = NULL;
348 int error = 0;
349
350 if (!io || !io->cloned_request) {
351 DMERR("%s io is invalid\n", __func__);
352 BUG(); /* should not happen */
353 }
354
355 clone = io->cloned_request;
356
357 dm_end_request(clone, error);
358 mempool_free(io, req_io_pool);
359}
360
361/*
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800362 * The callback that will be called by the worker queue to perform Encryption
363 * for writes and submit the request using the elevelator.
364 */
365static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io)
366{
367 struct request *clone = NULL;
368 struct bio *bio_src = NULL;
369 unsigned int total_sg_len_req_in = 0, total_sg_len_req_out = 0,
370 total_bytes_in_req = 0, error = DM_MAPIO_REMAPPED, rc = 0;
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800371 struct req_iterator iter = {0, NULL};
372 struct req_iterator iter1 = {0, NULL};
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800373 struct ablkcipher_request *req = NULL;
374 struct req_crypt_result result;
375 struct bio_vec *bvec = NULL;
376 struct scatterlist *req_sg_in = NULL;
377 struct scatterlist *req_sg_out = NULL;
378 int copy_bio_sector_to_req = 0;
379 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
380 struct page *page = NULL;
381 u8 IV[AES_XTS_IV_LEN];
382 int remaining_size = 0;
Amir Samuelov5c79cd22014-06-18 18:49:19 +0300383 int err = 0;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800384
385 if (io) {
386 if (io->cloned_request) {
387 clone = io->cloned_request;
388 } else {
389 DMERR("%s io->cloned_request is NULL\n",
390 __func__);
391 error = DM_REQ_CRYPT_ERROR;
392 goto submit_request;
393 }
394 } else {
395 DMERR("%s io is NULL\n",
396 __func__);
397 error = DM_REQ_CRYPT_ERROR;
398 goto submit_request;
399 }
400
401 req_crypt_inc_pending(io);
402
403 req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
404 if (!req) {
405 DMERR("%s ablkcipher request allocation failed\n",
406 __func__);
407 error = DM_REQ_CRYPT_ERROR;
408 goto ablkcipher_req_alloc_failure;
409 }
410
411 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
412 req_crypt_cipher_complete, &result);
413
414 init_completion(&result.completion);
Amir Samuelov5c79cd22014-06-18 18:49:19 +0300415 err = qcrypto_cipher_set_device(req, io->key_id);
416 if (err != 0) {
417 DMERR("%s qcrypto_cipher_set_device failed with err %d\n",
418 __func__, err);
Dinesh K Garg5cbf6b02014-05-15 14:46:31 -0700419 error = DM_REQ_CRYPT_ERROR;
420 goto ablkcipher_req_alloc_failure;
421 }
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800422 qcrypto_cipher_set_flag(req,
423 QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
424 crypto_ablkcipher_clear_flags(tfm, ~0);
425 crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
426
427 req_sg_in = kzalloc(sizeof(struct scatterlist) * MAX_SG_LIST,
428 GFP_KERNEL);
429 if (!req_sg_in) {
430 DMERR("%s req_sg_in allocation failed\n",
431 __func__);
432 error = DM_REQ_CRYPT_ERROR;
433 goto ablkcipher_req_alloc_failure;
434 }
435
436 req_sg_out = kzalloc(sizeof(struct scatterlist) * MAX_SG_LIST,
437 GFP_KERNEL);
438 if (!req_sg_out) {
439 DMERR("%s req_sg_out allocation failed\n",
440 __func__);
441 error = DM_REQ_CRYPT_ERROR;
442 goto ablkcipher_req_alloc_failure;
443 }
444
445 total_sg_len_req_in = blk_rq_map_sg(clone->q, clone, req_sg_in);
446 if ((total_sg_len_req_in <= 0) ||
447 (total_sg_len_req_in > MAX_SG_LIST)) {
448 DMERR("%s Request Error%d", __func__, total_sg_len_req_in);
449 error = DM_REQ_CRYPT_ERROR;
450 goto ablkcipher_req_alloc_failure;
451 }
452
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800453 total_bytes_in_req = clone->__data_len;
454 if (total_bytes_in_req > REQ_DM_512_KB) {
455 DMERR("%s total_bytes_in_req > 512 MB %d",
456 __func__, total_bytes_in_req);
457 error = DM_REQ_CRYPT_ERROR;
458 goto ablkcipher_req_alloc_failure;
459 }
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800460
461 rq_for_each_segment(bvec, clone, iter) {
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800462 if (bvec->bv_len > remaining_size) {
463 page = NULL;
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800464 while (page == NULL) {
465 page = mempool_alloc(req_page_pool, gfp_mask);
466 if (!page) {
467 DMERR("%s Crypt page alloc failed",
468 __func__);
469 congestion_wait(BLK_RW_ASYNC, HZ/100);
470 }
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800471 }
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800472
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800473 bvec->bv_page = page;
474 bvec->bv_offset = 0;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800475 remaining_size = PAGE_SIZE - bvec->bv_len;
476 if (remaining_size < 0)
477 BUG();
478 } else {
479 bvec->bv_page = page;
480 bvec->bv_offset = PAGE_SIZE - remaining_size;
481 remaining_size = remaining_size - bvec->bv_len;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800482 }
483 }
484
485 total_sg_len_req_out = blk_rq_map_sg(clone->q, clone, req_sg_out);
486 if ((total_sg_len_req_out <= 0) ||
487 (total_sg_len_req_out > MAX_SG_LIST)) {
488 DMERR("%s Request Error %d", __func__, total_sg_len_req_out);
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800489 error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800490 goto ablkcipher_req_alloc_failure;
491 }
492
493 memset(IV, 0, AES_XTS_IV_LEN);
494 memcpy(IV, &clone->__sector, sizeof(sector_t));
495
496 ablkcipher_request_set_crypt(req, req_sg_in, req_sg_out,
497 total_bytes_in_req, (void *) IV);
498
499 rc = crypto_ablkcipher_encrypt(req);
500
501 switch (rc) {
502 case 0:
503 break;
504
505 case -EBUSY:
506 /*
507 * Lets make this synchronous request by waiting on
508 * in progress as well
509 */
510 case -EINPROGRESS:
511 wait_for_completion_interruptible(&result.completion);
512 if (result.err) {
513 DMERR("%s error = %d encrypting the request\n",
514 __func__, result.err);
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800515 error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800516 goto ablkcipher_req_alloc_failure;
517 }
518 break;
519
520 default:
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800521 error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800522 goto ablkcipher_req_alloc_failure;
523 }
524
525 __rq_for_each_bio(bio_src, clone) {
526 if (copy_bio_sector_to_req == 0) {
527 clone->buffer = bio_data(bio_src);
528 copy_bio_sector_to_req++;
529 }
530 blk_queue_bounce(clone->q, &bio_src);
531 }
532
533
534ablkcipher_req_alloc_failure:
535 if (req)
536 ablkcipher_request_free(req);
537
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800538 if (error == DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC) {
539 bvec = NULL;
540 rq_for_each_segment(bvec, clone, iter1) {
541 if (bvec->bv_offset == 0) {
542 mempool_free(bvec->bv_page, req_page_pool);
543 bvec->bv_page = NULL;
544 } else
545 bvec->bv_page = NULL;
546 }
547 }
548
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800549
550 kfree(req_sg_in);
551
552 kfree(req_sg_out);
553
554submit_request:
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800555 if (io)
556 io->error = error;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800557 req_crypt_dec_pending_encrypt(io);
558}
559
Amir Samuelov5c79cd22014-06-18 18:49:19 +0300560/*
561 * This callback is called by the worker queue to perform non-encrypted writes
562 * and submit the request using the elevelator.
563 */
564static void req_cryptd_crypt_write_plain(struct req_dm_crypt_io *io)
565{
566 struct request *clone = NULL;
567
568 if (!io || !io->cloned_request) {
569 DMERR("%s io is invalid\n", __func__);
570 BUG(); /* should not happen */
571 }
572
573 clone = io->cloned_request;
574 io->error = 0;
575 dm_dispatch_request(clone);
576}
577
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800578/* Queue callback function that will get triggered */
579static void req_cryptd_crypt(struct work_struct *work)
580{
581 struct req_dm_crypt_io *io =
582 container_of(work, struct req_dm_crypt_io, work);
583
Amir Samuelov5c79cd22014-06-18 18:49:19 +0300584 if (rq_data_dir(io->cloned_request) == WRITE) {
585 if (io->should_encrypt)
586 req_cryptd_crypt_write_convert(io);
587 else
588 req_cryptd_crypt_write_plain(io);
589 } else if (rq_data_dir(io->cloned_request) == READ) {
590 if (io->should_decrypt)
591 req_cryptd_crypt_read_convert(io);
592 else
593 req_cryptd_crypt_read_plain(io);
594 } else {
595 DMERR("%s received non-write request for Clone %u\n",
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800596 __func__, (unsigned int)io->cloned_request);
Amir Samuelov5c79cd22014-06-18 18:49:19 +0300597 }
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800598}
599
600static void req_cryptd_queue_crypt(struct req_dm_crypt_io *io)
601{
602 INIT_WORK(&io->work, req_cryptd_crypt);
603 queue_work(req_crypt_queue, &io->work);
604}
605
606/*
607 * Cipher complete callback, this is triggered by the Linux crypto api once
608 * the operation is done. This signals the waiting thread that the crypto
609 * operation is complete.
610 */
611static void req_crypt_cipher_complete(struct crypto_async_request *req, int err)
612{
613 struct req_crypt_result *res = req->data;
614
615 if (err == -EINPROGRESS)
616 return;
617
618 res->err = err;
619 complete(&res->completion);
620}
621
622/*
623 * If bio->bi_dev is a partition, remap the location
624 */
625static inline void req_crypt_blk_partition_remap(struct bio *bio)
626{
627 struct block_device *bdev = bio->bi_bdev;
628
629 if (bio_sectors(bio) && bdev != bdev->bd_contains) {
630 struct hd_struct *p = bdev->bd_part;
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800631 /*
632 * Check for integer overflow, should never happen.
633 */
634 if (p->start_sect > (UINT_MAX - bio->bi_sector))
635 BUG();
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800636
637 bio->bi_sector += p->start_sect;
638 bio->bi_bdev = bdev->bd_contains;
639 }
640}
641
642/*
643 * The endio function is called from ksoftirqd context (atomic).
644 * For write operations the new pages created form the mempool
645 * is freed and returned. * For read operations, decryption is
646 * required, since this is called in a atomic * context, the
647 * request is sent to a worker queue to complete decryptiona and
648 * free the request once done.
649 */
650static int req_crypt_endio(struct dm_target *ti, struct request *clone,
651 int error, union map_info *map_context)
652{
653 int err = 0;
654 struct req_iterator iter1;
655 struct bio_vec *bvec = NULL;
656 struct req_dm_crypt_io *req_io = map_context->ptr;
657
658 /* If it is a write request, do nothing just return. */
659 bvec = NULL;
660 if (rq_data_dir(clone) == WRITE) {
661 rq_for_each_segment(bvec, clone, iter1) {
Amir Samuelov5c79cd22014-06-18 18:49:19 +0300662 if (req_io->should_encrypt && bvec->bv_offset == 0) {
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800663 mempool_free(bvec->bv_page, req_page_pool);
664 bvec->bv_page = NULL;
665 } else
666 bvec->bv_page = NULL;
667 }
668 mempool_free(req_io, req_io_pool);
669 goto submit_request;
670 } else if (rq_data_dir(clone) == READ) {
671 req_io->error = error;
672 req_cryptd_queue_crypt(req_io);
673 err = DM_ENDIO_INCOMPLETE;
674 goto submit_request;
675 }
676
677submit_request:
678 return err;
679}
680
681/*
682 * This function is called with interrupts disabled
683 * The function remaps the clone for the underlying device.
684 * If it is a write request, it calls into the worker queue to
685 * encrypt the data
686 * and submit the request directly using the elevator
687 * For a read request no pre-processing is required the request
688 * is returned to dm once mapping is done
689 */
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800690static int req_crypt_map(struct dm_target *ti, struct request *clone,
691 union map_info *map_context)
692{
693 struct req_dm_crypt_io *req_io = NULL;
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800694 int error = DM_REQ_CRYPT_ERROR, copy_bio_sector_to_req = 0;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800695 struct bio *bio_src = NULL;
696
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800697 if ((rq_data_dir(clone) != READ) &&
698 (rq_data_dir(clone) != WRITE)) {
699 error = DM_REQ_CRYPT_ERROR;
700 DMERR("%s Unknown request\n", __func__);
701 goto submit_request;
702 }
703
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800704 req_io = mempool_alloc(req_io_pool, GFP_NOWAIT);
705 if (!req_io) {
706 DMERR("%s req_io allocation failed\n", __func__);
707 error = DM_REQ_CRYPT_ERROR;
708 goto submit_request;
709 }
710
711 /* Save the clone in the req_io, the callback to the worker
712 * queue will get the req_io
713 */
714 req_io->cloned_request = clone;
715 map_context->ptr = req_io;
716 atomic_set(&req_io->pending, 0);
717
Amir Samuelov5c79cd22014-06-18 18:49:19 +0300718 if (rq_data_dir(clone) == WRITE)
719 req_io->should_encrypt = req_crypt_should_encrypt(req_io);
720 if (rq_data_dir(clone) == READ)
721 req_io->should_decrypt = req_crypt_should_deccrypt(req_io);
722
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800723 /* Get the queue of the underlying original device */
724 clone->q = bdev_get_queue(dev->bdev);
725 clone->rq_disk = dev->bdev->bd_disk;
726
727 __rq_for_each_bio(bio_src, clone) {
728 bio_src->bi_bdev = dev->bdev;
729 /* Currently the way req-dm works is that once the underlying
730 * device driver completes the request by calling into the
731 * block layer. The block layer completes the bios (clones) and
732 * then the cloned request. This is undesirable for req-dm-crypt
733 * hence added a flag BIO_DONTFREE, this flag will ensure that
734 * blk layer does not complete the cloned bios before completing
735 * the request. When the crypt endio is called, post-processsing
736 * is done and then the dm layer will complete the bios (clones)
737 * and free them.
738 */
739 bio_src->bi_flags |= 1 << BIO_DONTFREE;
740
741 /*
742 * If this device has partitions, remap block n
743 * of partition p to block n+start(p) of the disk.
744 */
745 req_crypt_blk_partition_remap(bio_src);
746 if (copy_bio_sector_to_req == 0) {
747 clone->__sector = bio_src->bi_sector;
748 clone->buffer = bio_data(bio_src);
749 copy_bio_sector_to_req++;
750 }
751 blk_queue_bounce(clone->q, &bio_src);
752 }
753
754 if (rq_data_dir(clone) == READ) {
755 error = DM_MAPIO_REMAPPED;
756 goto submit_request;
757 } else if (rq_data_dir(clone) == WRITE) {
758 req_cryptd_queue_crypt(req_io);
759 error = DM_MAPIO_SUBMITTED;
760 goto submit_request;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800761 }
762
763submit_request:
764 return error;
765
766}
767
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800768static void req_crypt_dtr(struct dm_target *ti)
769{
Amir Samuelov5c79cd22014-06-18 18:49:19 +0300770 DMDEBUG("dm-req-crypt Destructor.\n");
771
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800772 if (req_crypt_queue) {
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800773 destroy_workqueue(req_crypt_queue);
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800774 req_crypt_queue = NULL;
775 }
776 if (req_io_pool) {
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800777 mempool_destroy(req_io_pool);
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800778 req_io_pool = NULL;
779 }
780 if (req_page_pool) {
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800781 mempool_destroy(req_page_pool);
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800782 req_page_pool = NULL;
783 }
784 if (tfm) {
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800785 crypto_free_ablkcipher(tfm);
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800786 tfm = NULL;
787 }
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800788}
789
790
791/*
792 * Construct an encryption mapping:
793 * <cipher> <key> <iv_offset> <dev_path> <start>
794 */
795static int req_crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
796{
797 unsigned long long tmpll;
798 char dummy;
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800799 int err = DM_REQ_CRYPT_ERROR;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800800
Amir Samuelov5c79cd22014-06-18 18:49:19 +0300801 DMDEBUG("dm-req-crypt Constructor.\n");
802
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800803 if (argc < 5) {
804 DMERR(" %s Not enough args\n", __func__);
805 err = DM_REQ_CRYPT_ERROR;
806 goto ctr_exit;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800807 }
808
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800809 if (argv[3]) {
810 if (dm_get_device(ti, argv[3],
811 dm_table_get_mode(ti->table), &dev)) {
812 DMERR(" %s Device Lookup failed\n", __func__);
813 err = DM_REQ_CRYPT_ERROR;
814 goto ctr_exit;
815 }
816 } else {
817 DMERR(" %s Arg[3] invalid\n", __func__);
818 err = DM_REQ_CRYPT_ERROR;
819 goto ctr_exit;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800820 }
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800821
822 if (argv[4]) {
823 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
824 DMERR("%s Invalid device sector\n", __func__);
825 err = DM_REQ_CRYPT_ERROR;
826 goto ctr_exit;
827 }
828 } else {
Amir Samuelov5c79cd22014-06-18 18:49:19 +0300829 DMERR(" %s Arg[4] invalid\n", __func__);
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800830 err = DM_REQ_CRYPT_ERROR;
831 goto ctr_exit;
832 }
833
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800834 start_sector_orig = tmpll;
835
Amir Samuelov34e7b1a2014-08-17 14:18:28 +0300836 /* Allow backward compatible */
837 if (argc >= 6) {
838 if (argv[5]) {
839 if (!strcmp(argv[5], "fde_enabled"))
840 is_fde_enabled = true;
841 else
842 is_fde_enabled = false;
843 } else {
844 DMERR(" %s Arg[5] invalid\n", __func__);
845 err = DM_REQ_CRYPT_ERROR;
846 goto ctr_exit;
847 }
Amir Samuelov5c79cd22014-06-18 18:49:19 +0300848 } else {
Amir Samuelov34e7b1a2014-08-17 14:18:28 +0300849 DMERR(" %s Arg[5] missing, set FDE enabled.\n", __func__);
Amir Samuelov5c79cd22014-06-18 18:49:19 +0300850 is_fde_enabled = true; /* backward compatible */
851 }
852 DMDEBUG("%s is_fde_enabled=%d\n", __func__, is_fde_enabled);
853
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800854 req_crypt_queue = alloc_workqueue("req_cryptd",
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800855 WQ_NON_REENTRANT |
856 WQ_HIGHPRI |
857 WQ_CPU_INTENSIVE|
858 WQ_MEM_RECLAIM,
859 1);
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800860 if (!req_crypt_queue) {
861 DMERR("%s req_crypt_queue not allocated\n", __func__);
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800862 err = DM_REQ_CRYPT_ERROR;
863 goto ctr_exit;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800864 }
865
866 /* Allocate the crypto alloc blk cipher and keep the handle */
Dinesh K Garg0fb87fd2014-01-22 16:30:53 -0800867 tfm = crypto_alloc_ablkcipher("qcom-xts(aes)", 0, 0);
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800868 if (IS_ERR(tfm)) {
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800869 DMERR("%s ablkcipher tfm allocation failed : error\n",
870 __func__);
871 err = DM_REQ_CRYPT_ERROR;
872 goto ctr_exit;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800873 }
874
875 req_io_pool = mempool_create_slab_pool(MIN_IOS, _req_crypt_io_pool);
Amir Samuelov5c79cd22014-06-18 18:49:19 +0300876 BUG_ON(!req_io_pool);
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800877 if (!req_io_pool) {
878 DMERR("%s req_io_pool not allocated\n", __func__);
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800879 err = DM_REQ_CRYPT_ERROR;
880 goto ctr_exit;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800881 }
882
883 req_page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
884 if (!req_page_pool) {
885 DMERR("%s req_page_pool not allocated\n", __func__);
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800886 err = DM_REQ_CRYPT_ERROR;
887 goto ctr_exit;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800888 }
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800889 err = 0;
890ctr_exit:
891 if (err != 0) {
892 if (req_crypt_queue) {
893 destroy_workqueue(req_crypt_queue);
894 req_crypt_queue = NULL;
895 }
896 if (req_io_pool) {
897 mempool_destroy(req_io_pool);
898 req_io_pool = NULL;
899 }
900 if (req_page_pool) {
901 mempool_destroy(req_page_pool);
902 req_page_pool = NULL;
903 }
904 if (tfm) {
905 crypto_free_ablkcipher(tfm);
906 tfm = NULL;
907 }
908 }
909 return err;
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800910}
911
912static int req_crypt_iterate_devices(struct dm_target *ti,
913 iterate_devices_callout_fn fn, void *data)
914{
915 return fn(ti, dev, start_sector_orig, ti->len, data);
916}
917
918static struct target_type req_crypt_target = {
919 .name = "req-crypt",
920 .version = {1, 0, 0},
921 .module = THIS_MODULE,
922 .ctr = req_crypt_ctr,
923 .dtr = req_crypt_dtr,
924 .map_rq = req_crypt_map,
925 .rq_end_io = req_crypt_endio,
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800926 .iterate_devices = req_crypt_iterate_devices,
927};
928
929static int __init req_dm_crypt_init(void)
930{
931 int r;
932
933 _req_crypt_io_pool = KMEM_CACHE(req_dm_crypt_io, 0);
934 if (!_req_crypt_io_pool)
935 return -ENOMEM;
936
937 r = dm_register_target(&req_crypt_target);
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800938 if (r < 0) {
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800939 DMERR("register failed %d", r);
Dinesh K Gargf32b37b2014-02-26 11:32:25 -0800940 kmem_cache_destroy(_req_crypt_io_pool);
941 }
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800942
Amir Samuelov5c79cd22014-06-18 18:49:19 +0300943 DMINFO("dm-req-crypt successfully initalized.\n");
944
Dinesh K Garg93b3cb02014-01-13 13:55:00 -0800945 return r;
946}
947
948static void __exit req_dm_crypt_exit(void)
949{
950 kmem_cache_destroy(_req_crypt_io_pool);
951 dm_unregister_target(&req_crypt_target);
952}
953
954module_init(req_dm_crypt_init);
955module_exit(req_dm_crypt_exit);
956
957MODULE_DESCRIPTION(DM_NAME " target for request based transparent encryption / decryption");
958MODULE_LICENSE("GPL v2");