Dinesh K Garg | 93b3cb0 | 2014-01-13 13:55:00 -0800 | [diff] [blame] | 1 | /* Copyright (c) 2014, The Linux Foundation. All rights reserved. |
| 2 | * This program is free software; you can redistribute it and/or modify |
| 3 | * it under the terms of the GNU General Public License version 2 and |
| 4 | * only version 2 as published by the Free Software Foundation. |
| 5 | * |
| 6 | * This program is distributed in the hope that it will be useful, |
| 7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 9 | * GNU General Public License for more details. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/completion.h> |
| 13 | #include <linux/err.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/init.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/bio.h> |
| 18 | #include <linux/blkdev.h> |
| 19 | #include <linux/mempool.h> |
| 20 | #include <linux/slab.h> |
| 21 | #include <linux/crypto.h> |
| 22 | #include <linux/workqueue.h> |
| 23 | #include <linux/backing-dev.h> |
| 24 | #include <linux/atomic.h> |
| 25 | #include <linux/scatterlist.h> |
| 26 | #include <crypto/scatterwalk.h> |
| 27 | #include <asm/page.h> |
| 28 | #include <asm/unaligned.h> |
| 29 | #include <crypto/hash.h> |
| 30 | #include <crypto/md5.h> |
| 31 | #include <crypto/algapi.h> |
| 32 | #include <mach/qcrypto.h> |
| 33 | |
| 34 | #include <linux/device-mapper.h> |
| 35 | |
| 36 | |
| 37 | #define DM_MSG_PREFIX "req-crypt" |
| 38 | |
| 39 | #define MAX_SG_LIST 1024 |
| 40 | #define REQ_DM_512_KB (512*1024) |
| 41 | #define MAX_ENCRYPTION_BUFFERS 1 |
| 42 | #define MIN_IOS 16 |
| 43 | #define MIN_POOL_PAGES 32 |
| 44 | #define KEY_SIZE_XTS 32 |
| 45 | #define AES_XTS_IV_LEN 16 |
| 46 | |
| 47 | #define DM_REQ_CRYPT_ERROR -1 |
| 48 | |
| 49 | struct req_crypt_result { |
| 50 | struct completion completion; |
| 51 | int err; |
| 52 | }; |
| 53 | |
| 54 | struct dm_dev *dev; |
| 55 | static struct kmem_cache *_req_crypt_io_pool; |
| 56 | sector_t start_sector_orig; |
| 57 | struct workqueue_struct *req_crypt_queue; |
| 58 | mempool_t *req_io_pool; |
| 59 | mempool_t *req_page_pool; |
| 60 | struct crypto_ablkcipher *tfm; |
| 61 | |
| 62 | struct req_dm_crypt_io { |
| 63 | struct work_struct work; |
| 64 | struct request *cloned_request; |
| 65 | int error; |
| 66 | atomic_t pending; |
| 67 | struct timespec start_time; |
| 68 | }; |
| 69 | |
| 70 | static void req_crypt_cipher_complete |
| 71 | (struct crypto_async_request *req, int err); |
| 72 | |
| 73 | |
| 74 | static void req_crypt_inc_pending(struct req_dm_crypt_io *io) |
| 75 | { |
| 76 | atomic_inc(&io->pending); |
| 77 | } |
| 78 | |
| 79 | static void req_crypt_dec_pending_encrypt(struct req_dm_crypt_io *io) |
| 80 | { |
| 81 | int error = 0; |
| 82 | struct request *clone = NULL; |
| 83 | |
| 84 | if (io) { |
| 85 | error = io->error; |
| 86 | if (io->cloned_request) { |
| 87 | clone = io->cloned_request; |
| 88 | } else { |
| 89 | DMERR("%s io->cloned_request is NULL\n", |
| 90 | __func__); |
| 91 | /* |
| 92 | * If Clone is NULL we cannot do anything, |
| 93 | * this should never happen |
| 94 | */ |
| 95 | BUG(); |
| 96 | } |
| 97 | } else { |
| 98 | DMERR("%s io is NULL\n", __func__); |
| 99 | /* |
| 100 | * If Clone is NULL we cannot do anything, |
| 101 | * this should never happen |
| 102 | */ |
| 103 | BUG(); |
| 104 | } |
| 105 | |
| 106 | atomic_dec(&io->pending); |
| 107 | |
| 108 | if (error < 0) |
| 109 | dm_kill_unmapped_request(clone, error); |
| 110 | else |
| 111 | dm_dispatch_request(clone); |
| 112 | } |
| 113 | |
| 114 | static void req_crypt_dec_pending_decrypt(struct req_dm_crypt_io *io) |
| 115 | { |
| 116 | int error = 0; |
| 117 | struct request *clone = NULL; |
| 118 | |
| 119 | if (io) { |
| 120 | error = io->error; |
| 121 | if (io->cloned_request) { |
| 122 | clone = io->cloned_request; |
| 123 | } else { |
| 124 | DMERR("%s io->cloned_request is NULL\n", |
| 125 | __func__); |
| 126 | /* |
| 127 | * If Clone is NULL we cannot do anything, |
| 128 | * this should never happen |
| 129 | */ |
| 130 | BUG(); |
| 131 | } |
| 132 | } else { |
| 133 | DMERR("%s io is NULL\n", |
| 134 | __func__); |
| 135 | /* |
| 136 | * If Clone is NULL we cannot do anything, |
| 137 | * this should never happen |
| 138 | */ |
| 139 | BUG(); |
| 140 | } |
| 141 | |
| 142 | /* Should never get here if io or Clone is NULL */ |
| 143 | dm_end_request(clone, error); |
| 144 | atomic_dec(&io->pending); |
| 145 | mempool_free(io, req_io_pool); |
| 146 | } |
| 147 | |
| 148 | /* |
| 149 | * The callback that will be called by the worker queue to perform Decryption |
| 150 | * for reads and use the dm function to complete the bios and requests. |
| 151 | */ |
| 152 | static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io) |
| 153 | { |
| 154 | struct request *clone = NULL; |
| 155 | int error = 0; |
| 156 | int total_sg_len = 0, rc = 0, total_bytes_in_req = 0; |
| 157 | struct ablkcipher_request *req = NULL; |
| 158 | struct req_crypt_result result; |
| 159 | struct scatterlist *req_sg_read = NULL; |
| 160 | int err = 0; |
| 161 | struct req_iterator iter2; |
| 162 | struct bio_vec *bvec = NULL; |
| 163 | u8 IV[AES_XTS_IV_LEN]; |
| 164 | |
| 165 | if (io) { |
| 166 | error = io->error; |
| 167 | if (io->cloned_request) { |
| 168 | clone = io->cloned_request; |
| 169 | } else { |
| 170 | DMERR("%s io->cloned_request is NULL\n", |
| 171 | __func__); |
| 172 | error = DM_REQ_CRYPT_ERROR; |
| 173 | goto submit_request; |
| 174 | } |
| 175 | } else { |
| 176 | DMERR("%s io is NULL\n", |
| 177 | __func__); |
| 178 | error = DM_REQ_CRYPT_ERROR; |
| 179 | goto submit_request; |
| 180 | } |
| 181 | |
| 182 | req_crypt_inc_pending(io); |
| 183 | |
| 184 | if (error != 0) { |
| 185 | err = error; |
| 186 | goto submit_request; |
| 187 | } |
| 188 | |
| 189 | req = ablkcipher_request_alloc(tfm, GFP_KERNEL); |
| 190 | if (!req) { |
| 191 | DMERR("%s ablkcipher request allocation failed\n", __func__); |
| 192 | err = DM_REQ_CRYPT_ERROR; |
| 193 | goto ablkcipher_req_alloc_failure; |
| 194 | } |
| 195 | |
| 196 | ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
| 197 | req_crypt_cipher_complete, &result); |
| 198 | init_completion(&result.completion); |
| 199 | qcrypto_cipher_set_flag(req, |
| 200 | QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B); |
| 201 | crypto_ablkcipher_clear_flags(tfm, ~0); |
| 202 | crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS); |
| 203 | |
| 204 | req_sg_read = kzalloc(sizeof(struct scatterlist) * |
| 205 | MAX_SG_LIST, GFP_KERNEL); |
| 206 | if (!req_sg_read) { |
| 207 | DMERR("%s req_sg_read allocation failed\n", |
| 208 | __func__); |
| 209 | err = DM_REQ_CRYPT_ERROR; |
| 210 | goto ablkcipher_req_alloc_failure; |
| 211 | } |
| 212 | |
| 213 | total_sg_len = blk_rq_map_sg(clone->q, clone, req_sg_read); |
| 214 | if ((total_sg_len <= 0) || (total_sg_len > MAX_SG_LIST)) { |
| 215 | DMERR("%s Request Error%d", __func__, total_sg_len); |
| 216 | err = DM_REQ_CRYPT_ERROR; |
| 217 | goto ablkcipher_req_alloc_failure; |
| 218 | } |
| 219 | |
| 220 | |
| 221 | /* total bytes to copy */ |
| 222 | bvec = NULL; |
| 223 | rq_for_each_segment(bvec, clone, iter2) { |
| 224 | total_bytes_in_req = total_bytes_in_req + bvec->bv_len; |
| 225 | } |
| 226 | |
| 227 | memset(IV, 0, AES_XTS_IV_LEN); |
| 228 | memcpy(IV, &clone->__sector, sizeof(sector_t)); |
| 229 | |
| 230 | ablkcipher_request_set_crypt(req, req_sg_read, req_sg_read, |
| 231 | total_bytes_in_req, (void *) IV); |
| 232 | |
| 233 | rc = crypto_ablkcipher_decrypt(req); |
| 234 | |
| 235 | switch (rc) { |
| 236 | case 0: |
| 237 | break; |
| 238 | |
| 239 | case -EBUSY: |
| 240 | /* |
| 241 | * Lets make this synchronous request by waiting on |
| 242 | * in progress as well |
| 243 | */ |
| 244 | case -EINPROGRESS: |
| 245 | wait_for_completion_io(&result.completion); |
| 246 | if (result.err) { |
| 247 | DMERR("%s error = %d encrypting the request\n", |
| 248 | __func__, result.err); |
| 249 | err = DM_REQ_CRYPT_ERROR; |
| 250 | } |
| 251 | break; |
| 252 | |
| 253 | default: |
| 254 | err = DM_REQ_CRYPT_ERROR; |
| 255 | break; |
| 256 | } |
| 257 | |
| 258 | ablkcipher_req_alloc_failure: |
| 259 | |
| 260 | if (req) |
| 261 | ablkcipher_request_free(req); |
| 262 | |
| 263 | kfree(req_sg_read); |
| 264 | |
| 265 | submit_request: |
| 266 | io->error = err; |
| 267 | req_crypt_dec_pending_decrypt(io); |
| 268 | } |
| 269 | |
| 270 | /* |
| 271 | * The callback that will be called by the worker queue to perform Encryption |
| 272 | * for writes and submit the request using the elevelator. |
| 273 | */ |
| 274 | static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io) |
| 275 | { |
| 276 | struct request *clone = NULL; |
| 277 | struct bio *bio_src = NULL; |
| 278 | unsigned int total_sg_len_req_in = 0, total_sg_len_req_out = 0, |
| 279 | total_bytes_in_req = 0, error = DM_MAPIO_REMAPPED, rc = 0; |
| 280 | struct req_iterator iter; |
| 281 | struct ablkcipher_request *req = NULL; |
| 282 | struct req_crypt_result result; |
| 283 | struct bio_vec *bvec = NULL; |
| 284 | struct scatterlist *req_sg_in = NULL; |
| 285 | struct scatterlist *req_sg_out = NULL; |
| 286 | int copy_bio_sector_to_req = 0; |
| 287 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; |
| 288 | struct page *page = NULL; |
| 289 | u8 IV[AES_XTS_IV_LEN]; |
| 290 | int remaining_size = 0; |
| 291 | |
| 292 | if (io) { |
| 293 | if (io->cloned_request) { |
| 294 | clone = io->cloned_request; |
| 295 | } else { |
| 296 | DMERR("%s io->cloned_request is NULL\n", |
| 297 | __func__); |
| 298 | error = DM_REQ_CRYPT_ERROR; |
| 299 | goto submit_request; |
| 300 | } |
| 301 | } else { |
| 302 | DMERR("%s io is NULL\n", |
| 303 | __func__); |
| 304 | error = DM_REQ_CRYPT_ERROR; |
| 305 | goto submit_request; |
| 306 | } |
| 307 | |
| 308 | req_crypt_inc_pending(io); |
| 309 | |
| 310 | req = ablkcipher_request_alloc(tfm, GFP_KERNEL); |
| 311 | if (!req) { |
| 312 | DMERR("%s ablkcipher request allocation failed\n", |
| 313 | __func__); |
| 314 | error = DM_REQ_CRYPT_ERROR; |
| 315 | goto ablkcipher_req_alloc_failure; |
| 316 | } |
| 317 | |
| 318 | ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
| 319 | req_crypt_cipher_complete, &result); |
| 320 | |
| 321 | init_completion(&result.completion); |
| 322 | qcrypto_cipher_set_flag(req, |
| 323 | QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B); |
| 324 | crypto_ablkcipher_clear_flags(tfm, ~0); |
| 325 | crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS); |
| 326 | |
| 327 | req_sg_in = kzalloc(sizeof(struct scatterlist) * MAX_SG_LIST, |
| 328 | GFP_KERNEL); |
| 329 | if (!req_sg_in) { |
| 330 | DMERR("%s req_sg_in allocation failed\n", |
| 331 | __func__); |
| 332 | error = DM_REQ_CRYPT_ERROR; |
| 333 | goto ablkcipher_req_alloc_failure; |
| 334 | } |
| 335 | |
| 336 | req_sg_out = kzalloc(sizeof(struct scatterlist) * MAX_SG_LIST, |
| 337 | GFP_KERNEL); |
| 338 | if (!req_sg_out) { |
| 339 | DMERR("%s req_sg_out allocation failed\n", |
| 340 | __func__); |
| 341 | error = DM_REQ_CRYPT_ERROR; |
| 342 | goto ablkcipher_req_alloc_failure; |
| 343 | } |
| 344 | |
| 345 | total_sg_len_req_in = blk_rq_map_sg(clone->q, clone, req_sg_in); |
| 346 | if ((total_sg_len_req_in <= 0) || |
| 347 | (total_sg_len_req_in > MAX_SG_LIST)) { |
| 348 | DMERR("%s Request Error%d", __func__, total_sg_len_req_in); |
| 349 | error = DM_REQ_CRYPT_ERROR; |
| 350 | goto ablkcipher_req_alloc_failure; |
| 351 | } |
| 352 | |
| 353 | |
| 354 | rq_for_each_segment(bvec, clone, iter) { |
| 355 | try_again: |
| 356 | if (bvec->bv_len > remaining_size) { |
| 357 | page = NULL; |
| 358 | page = mempool_alloc(req_page_pool, gfp_mask); |
| 359 | if (!page) { |
| 360 | DMERR("%s Crypt page alloc failed", __func__); |
| 361 | congestion_wait(BLK_RW_ASYNC, HZ/100); |
| 362 | goto try_again; |
| 363 | } |
| 364 | bvec->bv_page = page; |
| 365 | bvec->bv_offset = 0; |
| 366 | total_bytes_in_req = total_bytes_in_req + bvec->bv_len; |
| 367 | remaining_size = PAGE_SIZE - bvec->bv_len; |
| 368 | if (remaining_size < 0) |
| 369 | BUG(); |
| 370 | } else { |
| 371 | bvec->bv_page = page; |
| 372 | bvec->bv_offset = PAGE_SIZE - remaining_size; |
| 373 | remaining_size = remaining_size - bvec->bv_len; |
| 374 | total_bytes_in_req = total_bytes_in_req + bvec->bv_len; |
| 375 | } |
| 376 | } |
| 377 | |
| 378 | total_sg_len_req_out = blk_rq_map_sg(clone->q, clone, req_sg_out); |
| 379 | if ((total_sg_len_req_out <= 0) || |
| 380 | (total_sg_len_req_out > MAX_SG_LIST)) { |
| 381 | DMERR("%s Request Error %d", __func__, total_sg_len_req_out); |
| 382 | error = DM_REQ_CRYPT_ERROR; |
| 383 | goto ablkcipher_req_alloc_failure; |
| 384 | } |
| 385 | |
| 386 | memset(IV, 0, AES_XTS_IV_LEN); |
| 387 | memcpy(IV, &clone->__sector, sizeof(sector_t)); |
| 388 | |
| 389 | ablkcipher_request_set_crypt(req, req_sg_in, req_sg_out, |
| 390 | total_bytes_in_req, (void *) IV); |
| 391 | |
| 392 | rc = crypto_ablkcipher_encrypt(req); |
| 393 | |
| 394 | switch (rc) { |
| 395 | case 0: |
| 396 | break; |
| 397 | |
| 398 | case -EBUSY: |
| 399 | /* |
| 400 | * Lets make this synchronous request by waiting on |
| 401 | * in progress as well |
| 402 | */ |
| 403 | case -EINPROGRESS: |
| 404 | wait_for_completion_interruptible(&result.completion); |
| 405 | if (result.err) { |
| 406 | DMERR("%s error = %d encrypting the request\n", |
| 407 | __func__, result.err); |
| 408 | error = DM_REQ_CRYPT_ERROR; |
| 409 | goto ablkcipher_req_alloc_failure; |
| 410 | } |
| 411 | break; |
| 412 | |
| 413 | default: |
| 414 | error = DM_REQ_CRYPT_ERROR; |
| 415 | goto ablkcipher_req_alloc_failure; |
| 416 | } |
| 417 | |
| 418 | __rq_for_each_bio(bio_src, clone) { |
| 419 | if (copy_bio_sector_to_req == 0) { |
| 420 | clone->buffer = bio_data(bio_src); |
| 421 | copy_bio_sector_to_req++; |
| 422 | } |
| 423 | blk_queue_bounce(clone->q, &bio_src); |
| 424 | } |
| 425 | |
| 426 | |
| 427 | ablkcipher_req_alloc_failure: |
| 428 | if (req) |
| 429 | ablkcipher_request_free(req); |
| 430 | |
| 431 | |
| 432 | kfree(req_sg_in); |
| 433 | |
| 434 | kfree(req_sg_out); |
| 435 | |
| 436 | submit_request: |
| 437 | io->error = error; |
| 438 | req_crypt_dec_pending_encrypt(io); |
| 439 | } |
| 440 | |
| 441 | /* Queue callback function that will get triggered */ |
| 442 | static void req_cryptd_crypt(struct work_struct *work) |
| 443 | { |
| 444 | struct req_dm_crypt_io *io = |
| 445 | container_of(work, struct req_dm_crypt_io, work); |
| 446 | |
| 447 | if (rq_data_dir(io->cloned_request) == WRITE) |
| 448 | req_cryptd_crypt_write_convert(io); |
| 449 | else if (rq_data_dir(io->cloned_request) == READ) |
| 450 | req_cryptd_crypt_read_convert(io); |
| 451 | else |
| 452 | DMERR("%s received non-write request for Clone %u\n", |
| 453 | __func__, (unsigned int)io->cloned_request); |
| 454 | } |
| 455 | |
| 456 | static void req_cryptd_queue_crypt(struct req_dm_crypt_io *io) |
| 457 | { |
| 458 | INIT_WORK(&io->work, req_cryptd_crypt); |
| 459 | queue_work(req_crypt_queue, &io->work); |
| 460 | } |
| 461 | |
| 462 | /* |
| 463 | * Cipher complete callback, this is triggered by the Linux crypto api once |
| 464 | * the operation is done. This signals the waiting thread that the crypto |
| 465 | * operation is complete. |
| 466 | */ |
| 467 | static void req_crypt_cipher_complete(struct crypto_async_request *req, int err) |
| 468 | { |
| 469 | struct req_crypt_result *res = req->data; |
| 470 | |
| 471 | if (err == -EINPROGRESS) |
| 472 | return; |
| 473 | |
| 474 | res->err = err; |
| 475 | complete(&res->completion); |
| 476 | } |
| 477 | |
| 478 | /* |
| 479 | * If bio->bi_dev is a partition, remap the location |
| 480 | */ |
| 481 | static inline void req_crypt_blk_partition_remap(struct bio *bio) |
| 482 | { |
| 483 | struct block_device *bdev = bio->bi_bdev; |
| 484 | |
| 485 | if (bio_sectors(bio) && bdev != bdev->bd_contains) { |
| 486 | struct hd_struct *p = bdev->bd_part; |
| 487 | |
| 488 | bio->bi_sector += p->start_sect; |
| 489 | bio->bi_bdev = bdev->bd_contains; |
| 490 | } |
| 491 | } |
| 492 | |
| 493 | /* |
| 494 | * The endio function is called from ksoftirqd context (atomic). |
| 495 | * For write operations the new pages created form the mempool |
| 496 | * is freed and returned. * For read operations, decryption is |
| 497 | * required, since this is called in a atomic * context, the |
| 498 | * request is sent to a worker queue to complete decryptiona and |
| 499 | * free the request once done. |
| 500 | */ |
| 501 | static int req_crypt_endio(struct dm_target *ti, struct request *clone, |
| 502 | int error, union map_info *map_context) |
| 503 | { |
| 504 | int err = 0; |
| 505 | struct req_iterator iter1; |
| 506 | struct bio_vec *bvec = NULL; |
| 507 | struct req_dm_crypt_io *req_io = map_context->ptr; |
| 508 | |
| 509 | /* If it is a write request, do nothing just return. */ |
| 510 | bvec = NULL; |
| 511 | if (rq_data_dir(clone) == WRITE) { |
| 512 | rq_for_each_segment(bvec, clone, iter1) { |
| 513 | if (bvec->bv_offset == 0) { |
| 514 | mempool_free(bvec->bv_page, req_page_pool); |
| 515 | bvec->bv_page = NULL; |
| 516 | } else |
| 517 | bvec->bv_page = NULL; |
| 518 | } |
| 519 | mempool_free(req_io, req_io_pool); |
| 520 | goto submit_request; |
| 521 | } else if (rq_data_dir(clone) == READ) { |
| 522 | req_io->error = error; |
| 523 | req_cryptd_queue_crypt(req_io); |
| 524 | err = DM_ENDIO_INCOMPLETE; |
| 525 | goto submit_request; |
| 526 | } |
| 527 | |
| 528 | submit_request: |
| 529 | return err; |
| 530 | } |
| 531 | |
| 532 | /* |
| 533 | * This function is called with interrupts disabled |
| 534 | * The function remaps the clone for the underlying device. |
| 535 | * If it is a write request, it calls into the worker queue to |
| 536 | * encrypt the data |
| 537 | * and submit the request directly using the elevator |
| 538 | * For a read request no pre-processing is required the request |
| 539 | * is returned to dm once mapping is done |
| 540 | */ |
| 541 | |
| 542 | static int req_crypt_map(struct dm_target *ti, struct request *clone, |
| 543 | union map_info *map_context) |
| 544 | { |
| 545 | struct req_dm_crypt_io *req_io = NULL; |
| 546 | int error = DM_MAPIO_REMAPPED, copy_bio_sector_to_req = 0; |
| 547 | struct bio *bio_src = NULL; |
| 548 | |
| 549 | req_io = mempool_alloc(req_io_pool, GFP_NOWAIT); |
| 550 | if (!req_io) { |
| 551 | DMERR("%s req_io allocation failed\n", __func__); |
| 552 | error = DM_REQ_CRYPT_ERROR; |
| 553 | goto submit_request; |
| 554 | } |
| 555 | |
| 556 | /* Save the clone in the req_io, the callback to the worker |
| 557 | * queue will get the req_io |
| 558 | */ |
| 559 | req_io->cloned_request = clone; |
| 560 | map_context->ptr = req_io; |
| 561 | atomic_set(&req_io->pending, 0); |
| 562 | |
| 563 | /* Get the queue of the underlying original device */ |
| 564 | clone->q = bdev_get_queue(dev->bdev); |
| 565 | clone->rq_disk = dev->bdev->bd_disk; |
| 566 | |
| 567 | __rq_for_each_bio(bio_src, clone) { |
| 568 | bio_src->bi_bdev = dev->bdev; |
| 569 | /* Currently the way req-dm works is that once the underlying |
| 570 | * device driver completes the request by calling into the |
| 571 | * block layer. The block layer completes the bios (clones) and |
| 572 | * then the cloned request. This is undesirable for req-dm-crypt |
| 573 | * hence added a flag BIO_DONTFREE, this flag will ensure that |
| 574 | * blk layer does not complete the cloned bios before completing |
| 575 | * the request. When the crypt endio is called, post-processsing |
| 576 | * is done and then the dm layer will complete the bios (clones) |
| 577 | * and free them. |
| 578 | */ |
| 579 | bio_src->bi_flags |= 1 << BIO_DONTFREE; |
| 580 | |
| 581 | /* |
| 582 | * If this device has partitions, remap block n |
| 583 | * of partition p to block n+start(p) of the disk. |
| 584 | */ |
| 585 | req_crypt_blk_partition_remap(bio_src); |
| 586 | if (copy_bio_sector_to_req == 0) { |
| 587 | clone->__sector = bio_src->bi_sector; |
| 588 | clone->buffer = bio_data(bio_src); |
| 589 | copy_bio_sector_to_req++; |
| 590 | } |
| 591 | blk_queue_bounce(clone->q, &bio_src); |
| 592 | } |
| 593 | |
| 594 | if (rq_data_dir(clone) == READ) { |
| 595 | error = DM_MAPIO_REMAPPED; |
| 596 | goto submit_request; |
| 597 | } else if (rq_data_dir(clone) == WRITE) { |
| 598 | req_cryptd_queue_crypt(req_io); |
| 599 | error = DM_MAPIO_SUBMITTED; |
| 600 | goto submit_request; |
| 601 | } else { |
| 602 | error = DM_REQ_CRYPT_ERROR; |
| 603 | DMERR("%s Unknown request\n", __func__); |
| 604 | } |
| 605 | |
| 606 | submit_request: |
| 607 | return error; |
| 608 | |
| 609 | } |
| 610 | |
| 611 | static int req_crypt_status(struct dm_target *ti, status_type_t type, |
| 612 | char *result, unsigned maxlen) |
| 613 | { |
| 614 | return 0; |
| 615 | } |
| 616 | |
| 617 | static void req_crypt_dtr(struct dm_target *ti) |
| 618 | { |
| 619 | if (req_crypt_queue) |
| 620 | destroy_workqueue(req_crypt_queue); |
| 621 | if (req_io_pool) |
| 622 | mempool_destroy(req_io_pool); |
| 623 | if (req_page_pool) |
| 624 | mempool_destroy(req_page_pool); |
| 625 | if (tfm) |
| 626 | crypto_free_ablkcipher(tfm); |
| 627 | } |
| 628 | |
| 629 | |
| 630 | /* |
| 631 | * Construct an encryption mapping: |
| 632 | * <cipher> <key> <iv_offset> <dev_path> <start> |
| 633 | */ |
| 634 | static int req_crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
| 635 | { |
| 636 | unsigned long long tmpll; |
| 637 | char dummy; |
| 638 | |
| 639 | if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &dev)) { |
| 640 | DMERR(" %s Device Lookup failed\n", __func__); |
| 641 | return DM_REQ_CRYPT_ERROR; |
| 642 | } |
| 643 | |
| 644 | if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) { |
| 645 | DMERR("%s Invalid device sector\n", __func__); |
| 646 | return DM_REQ_CRYPT_ERROR; |
| 647 | } |
| 648 | start_sector_orig = tmpll; |
| 649 | |
| 650 | req_crypt_queue = alloc_workqueue("req_cryptd", |
| 651 | WQ_HIGHPRI | |
| 652 | WQ_CPU_INTENSIVE| |
| 653 | WQ_MEM_RECLAIM, |
| 654 | 1); |
| 655 | if (!req_crypt_queue) { |
| 656 | DMERR("%s req_crypt_queue not allocated\n", __func__); |
| 657 | return DM_REQ_CRYPT_ERROR; |
| 658 | } |
| 659 | |
| 660 | /* Allocate the crypto alloc blk cipher and keep the handle */ |
Dinesh K Garg | 0fb87fd | 2014-01-22 16:30:53 -0800 | [diff] [blame^] | 661 | tfm = crypto_alloc_ablkcipher("qcom-xts(aes)", 0, 0); |
Dinesh K Garg | 93b3cb0 | 2014-01-13 13:55:00 -0800 | [diff] [blame] | 662 | if (IS_ERR(tfm)) { |
| 663 | DMERR("%s ablkcipher tfm allocation failed : error = %lu\n", |
| 664 | __func__, PTR_ERR(tfm)); |
| 665 | return DM_REQ_CRYPT_ERROR; |
| 666 | } |
| 667 | |
| 668 | req_io_pool = mempool_create_slab_pool(MIN_IOS, _req_crypt_io_pool); |
| 669 | if (!req_io_pool) { |
| 670 | DMERR("%s req_io_pool not allocated\n", __func__); |
| 671 | return DM_REQ_CRYPT_ERROR; |
| 672 | } |
| 673 | |
| 674 | req_page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); |
| 675 | if (!req_page_pool) { |
| 676 | DMERR("%s req_page_pool not allocated\n", __func__); |
| 677 | return DM_REQ_CRYPT_ERROR; |
| 678 | } |
| 679 | |
| 680 | return 0; |
| 681 | } |
| 682 | |
| 683 | static void req_crypt_postsuspend(struct dm_target *ti) |
| 684 | { |
| 685 | } |
| 686 | |
| 687 | static int req_crypt_preresume(struct dm_target *ti) |
| 688 | { |
| 689 | return 0; |
| 690 | } |
| 691 | |
| 692 | static void req_crypt_resume(struct dm_target *ti) |
| 693 | { |
| 694 | } |
| 695 | |
| 696 | /* Message interface |
| 697 | * key set <key> |
| 698 | * key wipe |
| 699 | */ |
| 700 | static int req_crypt_message(struct dm_target *ti, unsigned argc, char **argv) |
| 701 | { |
| 702 | return 0; |
| 703 | } |
| 704 | |
| 705 | static int req_crypt_iterate_devices(struct dm_target *ti, |
| 706 | iterate_devices_callout_fn fn, void *data) |
| 707 | { |
| 708 | return fn(ti, dev, start_sector_orig, ti->len, data); |
| 709 | } |
| 710 | |
| 711 | static struct target_type req_crypt_target = { |
| 712 | .name = "req-crypt", |
| 713 | .version = {1, 0, 0}, |
| 714 | .module = THIS_MODULE, |
| 715 | .ctr = req_crypt_ctr, |
| 716 | .dtr = req_crypt_dtr, |
| 717 | .map_rq = req_crypt_map, |
| 718 | .rq_end_io = req_crypt_endio, |
| 719 | .status = req_crypt_status, |
| 720 | .postsuspend = req_crypt_postsuspend, |
| 721 | .preresume = req_crypt_preresume, |
| 722 | .resume = req_crypt_resume, |
| 723 | .message = req_crypt_message, |
| 724 | .iterate_devices = req_crypt_iterate_devices, |
| 725 | }; |
| 726 | |
| 727 | static int __init req_dm_crypt_init(void) |
| 728 | { |
| 729 | int r; |
| 730 | |
| 731 | _req_crypt_io_pool = KMEM_CACHE(req_dm_crypt_io, 0); |
| 732 | if (!_req_crypt_io_pool) |
| 733 | return -ENOMEM; |
| 734 | |
| 735 | r = dm_register_target(&req_crypt_target); |
| 736 | if (r < 0) |
| 737 | DMERR("register failed %d", r); |
| 738 | |
| 739 | return r; |
| 740 | } |
| 741 | |
| 742 | static void __exit req_dm_crypt_exit(void) |
| 743 | { |
| 744 | kmem_cache_destroy(_req_crypt_io_pool); |
| 745 | dm_unregister_target(&req_crypt_target); |
| 746 | } |
| 747 | |
| 748 | module_init(req_dm_crypt_init); |
| 749 | module_exit(req_dm_crypt_exit); |
| 750 | |
| 751 | MODULE_DESCRIPTION(DM_NAME " target for request based transparent encryption / decryption"); |
| 752 | MODULE_LICENSE("GPL v2"); |