blob: 3ffe7e5e1197299e3d29456ba670b6d0710614a3 [file] [log] [blame]
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -07001/*
2 * DM request based crypto driver
3 *
4 * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/completion.h>
17#include <linux/err.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/bio.h>
22#include <linux/blkdev.h>
23#include <linux/mempool.h>
24#include <linux/slab.h>
25#include <linux/crypto.h>
26#include <linux/qcrypto.h>
27#include <linux/workqueue.h>
28#include <linux/backing-dev.h>
29#include <linux/atomic.h>
30#include <linux/scatterlist.h>
31#include <linux/device-mapper.h>
32#include <linux/printk.h>
33
34#include <asm/page.h>
35#include <asm/unaligned.h>
AnilKumar Chimataddc48122017-06-23 03:12:57 -070036#include <crypto/skcipher.h>
37#include <crypto/internal/skcipher.h>
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -070038#include <crypto/scatterwalk.h>
39#include <crypto/hash.h>
40#include <crypto/md5.h>
41#include <crypto/algapi.h>
42#include <crypto/ice.h>
43
44#define DM_MSG_PREFIX "req-crypt"
45
46#define MAX_SG_LIST 1024
47#define REQ_DM_512_KB (512*1024)
48#define MAX_ENCRYPTION_BUFFERS 1
49#define MIN_IOS 256
50#define MIN_POOL_PAGES 32
51#define KEY_SIZE_XTS 32
52#define AES_XTS_IV_LEN 16
53#define MAX_MSM_ICE_KEY_LUT_SIZE 32
54#define SECTOR_SIZE 512
55#define MIN_CRYPTO_TRANSFER_SIZE (4 * 1024)
56
57#define DM_REQ_CRYPT_ERROR -1
58#define DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC -2
59
60/*
61 * ENCRYPTION_MODE_CRYPTO means dm-req-crypt would invoke crypto operations
62 * for all of the requests. Crypto operations are performed by crypto engine
63 * plugged with Linux Kernel Crypto APIs
64 */
65#define DM_REQ_CRYPT_ENCRYPTION_MODE_CRYPTO 0
66/*
67 * ENCRYPTION_MODE_TRANSPARENT means dm-req-crypt would not invoke crypto
68 * operations for any of the requests. Data would be encrypted or decrypted
69 * using Inline Crypto Engine(ICE) embedded in storage hardware
70 */
71#define DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT 1
72
73#define DM_REQ_CRYPT_QUEUE_SIZE 256
74
75struct req_crypt_result {
76 struct completion completion;
77 int err;
78};
79
80#define FDE_KEY_ID 0
81#define PFE_KEY_ID 1
82
83static struct dm_dev *dev;
84static struct kmem_cache *_req_crypt_io_pool;
85static struct kmem_cache *_req_dm_scatterlist_pool;
86static sector_t start_sector_orig;
87static struct workqueue_struct *req_crypt_queue;
88static struct workqueue_struct *req_crypt_split_io_queue;
89static mempool_t *req_io_pool;
90static mempool_t *req_page_pool;
91static mempool_t *req_scatterlist_pool;
92static bool is_fde_enabled;
AnilKumar Chimataddc48122017-06-23 03:12:57 -070093static struct crypto_skcipher *tfm;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -070094static unsigned int encryption_mode;
95static struct ice_crypto_setting *ice_settings;
96
97unsigned int num_engines;
98unsigned int num_engines_fde, fde_cursor;
99unsigned int num_engines_pfe, pfe_cursor;
100struct crypto_engine_entry *fde_eng, *pfe_eng;
101DEFINE_MUTEX(engine_list_mutex);
102
103struct req_dm_crypt_io {
104 struct ice_crypto_setting ice_settings;
105 struct work_struct work;
106 struct request *cloned_request;
107 int error;
108 atomic_t pending;
109 struct timespec start_time;
110 bool should_encrypt;
111 bool should_decrypt;
112 u32 key_id;
113};
114
115struct req_dm_split_req_io {
116 struct work_struct work;
117 struct scatterlist *req_split_sg_read;
118 struct req_crypt_result result;
119 struct crypto_engine_entry *engine;
120 u8 IV[AES_XTS_IV_LEN];
121 int size;
122 struct request *clone;
123};
124
125#ifdef CONFIG_FIPS_ENABLE
126static struct qcrypto_func_set dm_qcrypto_func;
127#else
128static struct qcrypto_func_set dm_qcrypto_func = {
129 qcrypto_cipher_set_device_hw,
130 qcrypto_cipher_set_flag,
131 qcrypto_get_num_engines,
132 qcrypto_get_engine_list
133};
134#endif
135static void req_crypt_cipher_complete
136 (struct crypto_async_request *req, int err);
137static void req_cryptd_split_req_queue_cb
138 (struct work_struct *work);
139static void req_cryptd_split_req_queue
140 (struct req_dm_split_req_io *io);
141static void req_crypt_split_io_complete
142 (struct req_crypt_result *res, int err);
143
144static bool req_crypt_should_encrypt(struct req_dm_crypt_io *req)
145{
146 int ret = 0;
147 bool should_encrypt = false;
148 struct bio *bio = NULL;
149 bool is_encrypted = false;
150 bool is_inplace = false;
151
152 if (!req || !req->cloned_request || !req->cloned_request->bio)
153 return false;
154
155 if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT)
156 return false;
157 bio = req->cloned_request->bio;
158
159 /* req->key_id = key_id; @todo support more than 1 pfe key */
160 if ((ret == 0) && (is_encrypted || is_inplace)) {
161 should_encrypt = true;
162 req->key_id = PFE_KEY_ID;
163 } else if (is_fde_enabled) {
164 should_encrypt = true;
165 req->key_id = FDE_KEY_ID;
166 }
167
168 return should_encrypt;
169}
170
171static bool req_crypt_should_deccrypt(struct req_dm_crypt_io *req)
172{
173 int ret = 0;
174 bool should_deccrypt = false;
175 struct bio *bio = NULL;
176 bool is_encrypted = false;
177 bool is_inplace = false;
178
179 if (!req || !req->cloned_request || !req->cloned_request->bio)
180 return false;
181 if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT)
182 return false;
183
184 bio = req->cloned_request->bio;
185
186 /* req->key_id = key_id; @todo support more than 1 pfe key */
187 if ((ret == 0) && (is_encrypted && !is_inplace)) {
188 should_deccrypt = true;
189 req->key_id = PFE_KEY_ID;
190 } else if (is_fde_enabled) {
191 should_deccrypt = true;
192 req->key_id = FDE_KEY_ID;
193 }
194
195 return should_deccrypt;
196}
197
198static void req_crypt_inc_pending(struct req_dm_crypt_io *io)
199{
200 atomic_inc(&io->pending);
201}
202
203static void req_crypt_dec_pending_encrypt(struct req_dm_crypt_io *io)
204{
205 int error = 0;
206 struct request *clone = NULL;
207
208 if (io) {
209 error = io->error;
210 if (io->cloned_request) {
211 clone = io->cloned_request;
212 } else {
213 DMERR("%s io->cloned_request is NULL\n",
214 __func__);
215 /*
216 * If Clone is NULL we cannot do anything,
217 * this should never happen
218 */
219 WARN_ON(1);
220 }
221 } else {
222 DMERR("%s io is NULL\n", __func__);
223 /*
224 * If Clone is NULL we cannot do anything,
225 * this should never happen
226 */
227 WARN_ON(1);
228 }
229
230 atomic_dec(&io->pending);
231
232 if (error < 0) {
233 dm_kill_unmapped_request(clone, error);
234 mempool_free(io, req_io_pool);
235 } else
236 dm_dispatch_request(clone);
237}
238
239static void req_crypt_dec_pending_decrypt(struct req_dm_crypt_io *io)
240{
241 int error = 0;
242 struct request *clone = NULL;
243
244 if (io) {
245 error = io->error;
246 if (io->cloned_request) {
247 clone = io->cloned_request;
248 } else {
249 DMERR("%s io->cloned_request is NULL\n",
250 __func__);
251 /*
252 * If Clone is NULL we cannot do anything,
253 * this should never happen
254 */
255 WARN_ON(1);
256 }
257 } else {
258 DMERR("%s io is NULL\n",
259 __func__);
260 /*
261 * If Clone is NULL we cannot do anything,
262 * this should never happen
263 */
264 WARN_ON(1);
265 }
266
267 /* Should never get here if io or Clone is NULL */
268 dm_end_request(clone, error);
269 atomic_dec(&io->pending);
270 mempool_free(io, req_io_pool);
271}
272
273/*
274 * The callback that will be called by the worker queue to perform Decryption
275 * for reads and use the dm function to complete the bios and requests.
276 */
277static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io)
278{
279 struct request *clone = NULL;
280 int error = DM_REQ_CRYPT_ERROR;
281 int total_sg_len = 0, total_bytes_in_req = 0, temp_size = 0, i = 0;
282 struct scatterlist *sg = NULL;
283 struct scatterlist *req_sg_read = NULL;
284
285 unsigned int engine_list_total = 0;
286 struct crypto_engine_entry *curr_engine_list = NULL;
287 bool split_transfers = 0;
288 sector_t tempiv;
289 struct req_dm_split_req_io *split_io = NULL;
290
291 if (io) {
292 error = io->error;
293 if (io->cloned_request) {
294 clone = io->cloned_request;
295 } else {
296 DMERR("%s io->cloned_request is NULL\n",
297 __func__);
298 error = DM_REQ_CRYPT_ERROR;
299 goto submit_request;
300 }
301 } else {
302 DMERR("%s io is NULL\n",
303 __func__);
304 error = DM_REQ_CRYPT_ERROR;
305 goto submit_request;
306 }
307
308 req_crypt_inc_pending(io);
309
310 mutex_lock(&engine_list_mutex);
311
312 engine_list_total = (io->key_id == FDE_KEY_ID ? num_engines_fde :
313 (io->key_id == PFE_KEY_ID ?
314 num_engines_pfe : 0));
315
316 curr_engine_list = (io->key_id == FDE_KEY_ID ? fde_eng :
317 (io->key_id == PFE_KEY_ID ?
318 pfe_eng : NULL));
319
320 mutex_unlock(&engine_list_mutex);
321
322 req_sg_read = (struct scatterlist *)mempool_alloc(req_scatterlist_pool,
323 GFP_KERNEL);
324 if (!req_sg_read) {
325 DMERR("%s req_sg_read allocation failed\n",
326 __func__);
327 error = DM_REQ_CRYPT_ERROR;
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700328 goto skcipher_req_alloc_failure;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700329 }
330 memset(req_sg_read, 0, sizeof(struct scatterlist) * MAX_SG_LIST);
331
332 total_sg_len = blk_rq_map_sg_no_cluster(clone->q, clone, req_sg_read);
333 if ((total_sg_len <= 0) || (total_sg_len > MAX_SG_LIST)) {
334 DMERR("%s Request Error%d", __func__, total_sg_len);
335 error = DM_REQ_CRYPT_ERROR;
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700336 goto skcipher_req_alloc_failure;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700337 }
338
339 total_bytes_in_req = clone->__data_len;
340 if (total_bytes_in_req > REQ_DM_512_KB) {
341 DMERR("%s total_bytes_in_req > 512 MB %d",
342 __func__, total_bytes_in_req);
343 error = DM_REQ_CRYPT_ERROR;
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700344 goto skcipher_req_alloc_failure;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700345 }
346
347
348 if ((clone->__data_len >= (MIN_CRYPTO_TRANSFER_SIZE *
349 engine_list_total))
350 && (engine_list_total > 1))
351 split_transfers = 1;
352
353 if (split_transfers) {
354 split_io = kzalloc(sizeof(struct req_dm_split_req_io)
355 * engine_list_total, GFP_KERNEL);
356 if (!split_io) {
357 DMERR("%s split_io allocation failed\n", __func__);
358 error = DM_REQ_CRYPT_ERROR;
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700359 goto skcipher_req_alloc_failure;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700360 }
361
362 split_io[0].req_split_sg_read = sg = req_sg_read;
363 split_io[engine_list_total - 1].size = total_bytes_in_req;
364 for (i = 0; i < (engine_list_total); i++) {
365 while ((sg) && i < (engine_list_total - 1)) {
366 split_io[i].size += sg->length;
367 split_io[engine_list_total - 1].size -=
368 sg->length;
369 if (split_io[i].size >=
370 (total_bytes_in_req /
371 engine_list_total)) {
372 split_io[i + 1].req_split_sg_read =
373 sg_next(sg);
374 sg_mark_end(sg);
375 break;
376 }
377 sg = sg_next(sg);
378 }
379 split_io[i].engine = &curr_engine_list[i];
380 init_completion(&split_io[i].result.completion);
381 memset(&split_io[i].IV, 0, AES_XTS_IV_LEN);
382 tempiv = clone->__sector + (temp_size / SECTOR_SIZE);
383 memcpy(&split_io[i].IV, &tempiv, sizeof(sector_t));
384 temp_size += split_io[i].size;
385 split_io[i].clone = clone;
386 req_cryptd_split_req_queue(&split_io[i]);
387 }
388 } else {
389 split_io = kzalloc(sizeof(struct req_dm_split_req_io),
390 GFP_KERNEL);
391 if (!split_io) {
392 DMERR("%s split_io allocation failed\n", __func__);
393 error = DM_REQ_CRYPT_ERROR;
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700394 goto skcipher_req_alloc_failure;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700395 }
396 split_io->engine = &curr_engine_list[0];
397 init_completion(&split_io->result.completion);
398 memcpy(split_io->IV, &clone->__sector, sizeof(sector_t));
399 split_io->req_split_sg_read = req_sg_read;
400 split_io->size = total_bytes_in_req;
401 split_io->clone = clone;
402 req_cryptd_split_req_queue(split_io);
403 }
404
405 if (!split_transfers) {
406 wait_for_completion_interruptible(&split_io->result.completion);
407 if (split_io->result.err) {
408 DMERR("%s error = %d for request\n",
409 __func__, split_io->result.err);
410 error = DM_REQ_CRYPT_ERROR;
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700411 goto skcipher_req_alloc_failure;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700412 }
413 } else {
414 for (i = 0; i < (engine_list_total); i++) {
415 wait_for_completion_interruptible(
416 &split_io[i].result.completion);
417 if (split_io[i].result.err) {
418 DMERR("%s error = %d for %dst request\n",
419 __func__, split_io[i].result.err, i);
420 error = DM_REQ_CRYPT_ERROR;
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700421 goto skcipher_req_alloc_failure;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700422 }
423 }
424 }
425 error = 0;
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700426skcipher_req_alloc_failure:
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700427
428 mempool_free(req_sg_read, req_scatterlist_pool);
429 kfree(split_io);
430submit_request:
431 if (io)
432 io->error = error;
433 req_crypt_dec_pending_decrypt(io);
434}
435
436/*
437 * This callback is called by the worker queue to perform non-decrypt reads
438 * and use the dm function to complete the bios and requests.
439 */
440static void req_cryptd_crypt_read_plain(struct req_dm_crypt_io *io)
441{
442 struct request *clone = NULL;
443 int error = 0;
444
445 if (!io || !io->cloned_request) {
446 DMERR("%s io is invalid\n", __func__);
447 WARN_ON(1); /* should not happen */
448 }
449
450 clone = io->cloned_request;
451
452 dm_end_request(clone, error);
453 mempool_free(io, req_io_pool);
454}
455
456/*
457 * The callback that will be called by the worker queue to perform Encryption
458 * for writes and submit the request using the elevelator.
459 */
460static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io)
461{
462 struct request *clone = NULL;
463 struct bio *bio_src = NULL;
464 unsigned int total_sg_len_req_in = 0, total_sg_len_req_out = 0,
465 total_bytes_in_req = 0, error = DM_MAPIO_REMAPPED, rc = 0;
466 struct req_iterator iter;
467 struct req_iterator iter1;
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700468 struct skcipher_request *req = NULL;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700469 struct req_crypt_result result;
470 struct bio_vec bvec;
471 struct scatterlist *req_sg_in = NULL;
472 struct scatterlist *req_sg_out = NULL;
473 int copy_bio_sector_to_req = 0;
474 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
475 struct page *page = NULL;
476 u8 IV[AES_XTS_IV_LEN];
477 int remaining_size = 0, err = 0;
478 struct crypto_engine_entry engine;
479 unsigned int engine_list_total = 0;
480 struct crypto_engine_entry *curr_engine_list = NULL;
481 unsigned int *engine_cursor = NULL;
482
483
484 if (io) {
485 if (io->cloned_request) {
486 clone = io->cloned_request;
487 } else {
488 DMERR("%s io->cloned_request is NULL\n",
489 __func__);
490 error = DM_REQ_CRYPT_ERROR;
491 goto submit_request;
492 }
493 } else {
494 DMERR("%s io is NULL\n",
495 __func__);
496 error = DM_REQ_CRYPT_ERROR;
497 goto submit_request;
498 }
499
500 req_crypt_inc_pending(io);
501
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700502 req = skcipher_request_alloc(tfm, GFP_KERNEL);
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700503 if (!req) {
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700504 DMERR("%s skcipher request allocation failed\n",
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700505 __func__);
506 error = DM_REQ_CRYPT_ERROR;
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700507 goto skcipher_req_alloc_failure;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700508 }
509
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700510 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700511 req_crypt_cipher_complete, &result);
512
513 mutex_lock(&engine_list_mutex);
514 engine_list_total = (io->key_id == FDE_KEY_ID ? num_engines_fde :
515 (io->key_id == PFE_KEY_ID ?
516 num_engines_pfe : 0));
517
518 curr_engine_list = (io->key_id == FDE_KEY_ID ? fde_eng :
519 (io->key_id == PFE_KEY_ID ?
520 pfe_eng : NULL));
521
522 engine_cursor = (io->key_id == FDE_KEY_ID ? &fde_cursor :
523 (io->key_id == PFE_KEY_ID ? &pfe_cursor
524 : NULL));
525 if ((engine_list_total < 1) || (curr_engine_list == NULL) ||
526 (engine_cursor == NULL)) {
527 DMERR("%s Unknown Key ID!\n", __func__);
528 error = DM_REQ_CRYPT_ERROR;
529 mutex_unlock(&engine_list_mutex);
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700530 goto skcipher_req_alloc_failure;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700531 }
532
533 engine = curr_engine_list[*engine_cursor];
534 (*engine_cursor)++;
535 (*engine_cursor) %= engine_list_total;
536
537 err = (dm_qcrypto_func.cipher_set)(req, engine.ce_device,
538 engine.hw_instance);
539 if (err) {
540 DMERR("%s qcrypto_cipher_set_device_hw failed with err %d\n",
541 __func__, err);
542 mutex_unlock(&engine_list_mutex);
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700543 goto skcipher_req_alloc_failure;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700544 }
545 mutex_unlock(&engine_list_mutex);
546
547 init_completion(&result.completion);
548
549 (dm_qcrypto_func.cipher_flag)(req,
550 QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700551 crypto_skcipher_clear_flags(tfm, ~0);
552 crypto_skcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700553
554 req_sg_in = (struct scatterlist *)mempool_alloc(req_scatterlist_pool,
555 GFP_KERNEL);
556 if (!req_sg_in) {
557 DMERR("%s req_sg_in allocation failed\n",
558 __func__);
559 error = DM_REQ_CRYPT_ERROR;
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700560 goto skcipher_req_alloc_failure;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700561 }
562 memset(req_sg_in, 0, sizeof(struct scatterlist) * MAX_SG_LIST);
563
564 req_sg_out = (struct scatterlist *)mempool_alloc(req_scatterlist_pool,
565 GFP_KERNEL);
566 if (!req_sg_out) {
567 DMERR("%s req_sg_out allocation failed\n",
568 __func__);
569 error = DM_REQ_CRYPT_ERROR;
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700570 goto skcipher_req_alloc_failure;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700571 }
572 memset(req_sg_out, 0, sizeof(struct scatterlist) * MAX_SG_LIST);
573
574 total_sg_len_req_in = blk_rq_map_sg(clone->q, clone, req_sg_in);
575 if ((total_sg_len_req_in <= 0) ||
576 (total_sg_len_req_in > MAX_SG_LIST)) {
577 DMERR("%s Request Error%d", __func__, total_sg_len_req_in);
578 error = DM_REQ_CRYPT_ERROR;
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700579 goto skcipher_req_alloc_failure;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700580 }
581
582 total_bytes_in_req = clone->__data_len;
583 if (total_bytes_in_req > REQ_DM_512_KB) {
584 DMERR("%s total_bytes_in_req > 512 MB %d",
585 __func__, total_bytes_in_req);
586 error = DM_REQ_CRYPT_ERROR;
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700587 goto skcipher_req_alloc_failure;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700588 }
589
590 rq_for_each_segment(bvec, clone, iter) {
591 if (bvec.bv_len > remaining_size) {
592 page = NULL;
593 while (page == NULL) {
594 page = mempool_alloc(req_page_pool, gfp_mask);
595 if (!page) {
596 DMERR("%s Crypt page alloc failed",
597 __func__);
598 congestion_wait(BLK_RW_ASYNC, HZ/100);
599 }
600 }
601
602 bvec.bv_page = page;
603 bvec.bv_offset = 0;
604 remaining_size = PAGE_SIZE - bvec.bv_len;
605 if (remaining_size < 0)
606 WARN_ON(1);
607 } else {
608 bvec.bv_page = page;
609 bvec.bv_offset = PAGE_SIZE - remaining_size;
610 remaining_size = remaining_size - bvec.bv_len;
611 }
612 }
613
614 total_sg_len_req_out = blk_rq_map_sg(clone->q, clone, req_sg_out);
615 if ((total_sg_len_req_out <= 0) ||
616 (total_sg_len_req_out > MAX_SG_LIST)) {
617 DMERR("%s Request Error %d", __func__, total_sg_len_req_out);
618 error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700619 goto skcipher_req_alloc_failure;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700620 }
621
622 memset(IV, 0, AES_XTS_IV_LEN);
623 memcpy(IV, &clone->__sector, sizeof(sector_t));
624
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700625 skcipher_request_set_crypt(req, req_sg_in, req_sg_out,
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700626 total_bytes_in_req, (void *) IV);
627
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700628 rc = crypto_skcipher_encrypt(req);
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700629
630 switch (rc) {
631 case 0:
632 break;
633
634 case -EBUSY:
635 /*
636 * Lets make this synchronous request by waiting on
637 * in progress as well
638 */
639 case -EINPROGRESS:
640 wait_for_completion_interruptible(&result.completion);
641 if (result.err) {
642 DMERR("%s error = %d encrypting the request\n",
643 __func__, result.err);
644 error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700645 goto skcipher_req_alloc_failure;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700646 }
647 break;
648
649 default:
650 error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700651 goto skcipher_req_alloc_failure;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700652 }
653
654 __rq_for_each_bio(bio_src, clone) {
655 if (copy_bio_sector_to_req == 0)
656 copy_bio_sector_to_req++;
657 blk_queue_bounce(clone->q, &bio_src);
658 }
659
660 /*
661 * Recalculate the phy_segments as we allocate new pages
662 * This is used by storage driver to fill the sg list.
663 */
664 blk_recalc_rq_segments(clone);
665
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700666skcipher_req_alloc_failure:
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700667 if (req)
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700668 skcipher_request_free(req);
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700669
670 if (error == DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC) {
671 rq_for_each_segment(bvec, clone, iter1) {
672 if (bvec.bv_offset == 0) {
673 mempool_free(bvec.bv_page, req_page_pool);
674 bvec.bv_page = NULL;
675 } else
676 bvec.bv_page = NULL;
677 }
678 }
679
680 mempool_free(req_sg_in, req_scatterlist_pool);
681 mempool_free(req_sg_out, req_scatterlist_pool);
682submit_request:
683 if (io)
684 io->error = error;
685 req_crypt_dec_pending_encrypt(io);
686}
687
688/*
689 * This callback is called by the worker queue to perform non-encrypted writes
690 * and submit the request using the elevelator.
691 */
692static void req_cryptd_crypt_write_plain(struct req_dm_crypt_io *io)
693{
694 struct request *clone = NULL;
695
696 if (!io || !io->cloned_request) {
697 DMERR("%s io is invalid\n", __func__);
698 WARN_ON(1); /* should not happen */
699 }
700
701 clone = io->cloned_request;
702 io->error = 0;
703 dm_dispatch_request(clone);
704}
705
706/* Queue callback function that will get triggered */
707static void req_cryptd_crypt(struct work_struct *work)
708{
709 struct req_dm_crypt_io *io =
710 container_of(work, struct req_dm_crypt_io, work);
711
712 if (rq_data_dir(io->cloned_request) == WRITE) {
713 if (io->should_encrypt)
714 req_cryptd_crypt_write_convert(io);
715 else
716 req_cryptd_crypt_write_plain(io);
717 } else if (rq_data_dir(io->cloned_request) == READ) {
718 if (io->should_decrypt)
719 req_cryptd_crypt_read_convert(io);
720 else
721 req_cryptd_crypt_read_plain(io);
722 } else {
723 DMERR("%s received non-write request for Clone 0x%p\n",
724 __func__, io->cloned_request);
725 }
726}
727
728static void req_cryptd_split_req_queue_cb(struct work_struct *work)
729{
730 struct req_dm_split_req_io *io =
731 container_of(work, struct req_dm_split_req_io, work);
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700732 struct skcipher_request *req = NULL;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700733 struct req_crypt_result result;
734 int err = 0;
735 struct crypto_engine_entry *engine = NULL;
736
737 if ((!io) || (!io->req_split_sg_read) || (!io->engine)) {
738 DMERR("%s Input invalid\n",
739 __func__);
740 err = DM_REQ_CRYPT_ERROR;
741 /* If io is not populated this should not be called */
742 WARN_ON(1);
743 }
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700744 req = skcipher_request_alloc(tfm, GFP_KERNEL);
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700745 if (!req) {
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700746 DMERR("%s skcipher request allocation failed\n", __func__);
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700747 err = DM_REQ_CRYPT_ERROR;
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700748 goto skcipher_req_alloc_failure;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700749 }
750
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700751 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700752 req_crypt_cipher_complete, &result);
753
754 engine = io->engine;
755
756 err = (dm_qcrypto_func.cipher_set)(req, engine->ce_device,
757 engine->hw_instance);
758 if (err) {
759 DMERR("%s qcrypto_cipher_set_device_hw failed with err %d\n",
760 __func__, err);
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700761 goto skcipher_req_alloc_failure;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700762 }
763 init_completion(&result.completion);
764 (dm_qcrypto_func.cipher_flag)(req,
765 QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
766
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700767 crypto_skcipher_clear_flags(tfm, ~0);
768 crypto_skcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700769
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700770 skcipher_request_set_crypt(req, io->req_split_sg_read,
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700771 io->req_split_sg_read, io->size, (void *) io->IV);
772
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700773 err = crypto_skcipher_decrypt(req);
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700774 switch (err) {
775 case 0:
776 break;
777
778 case -EBUSY:
779 /*
780 * Lets make this synchronous request by waiting on
781 * in progress as well
782 */
783 case -EINPROGRESS:
784 wait_for_completion_io(&result.completion);
785 if (result.err) {
786 DMERR("%s error = %d encrypting the request\n",
787 __func__, result.err);
788 err = DM_REQ_CRYPT_ERROR;
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700789 goto skcipher_req_alloc_failure;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700790 }
791 break;
792
793 default:
794 err = DM_REQ_CRYPT_ERROR;
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700795 goto skcipher_req_alloc_failure;
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700796 }
797 err = 0;
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700798skcipher_req_alloc_failure:
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700799 if (req)
AnilKumar Chimataddc48122017-06-23 03:12:57 -0700800 skcipher_request_free(req);
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700801
802 req_crypt_split_io_complete(&io->result, err);
803}
804
805static void req_cryptd_split_req_queue(struct req_dm_split_req_io *io)
806{
807 INIT_WORK(&io->work, req_cryptd_split_req_queue_cb);
808 queue_work(req_crypt_split_io_queue, &io->work);
809}
810
811static void req_cryptd_queue_crypt(struct req_dm_crypt_io *io)
812{
813 INIT_WORK(&io->work, req_cryptd_crypt);
814 queue_work(req_crypt_queue, &io->work);
815}
816
817/*
818 * Cipher complete callback, this is triggered by the Linux crypto api once
819 * the operation is done. This signals the waiting thread that the crypto
820 * operation is complete.
821 */
822static void req_crypt_cipher_complete(struct crypto_async_request *req, int err)
823{
824 struct req_crypt_result *res = req->data;
825
826 if (err == -EINPROGRESS)
827 return;
828
829 res->err = err;
830 complete(&res->completion);
831}
832
833static void req_crypt_split_io_complete(struct req_crypt_result *res, int err)
834{
835 if (err == -EINPROGRESS)
836 return;
837
838 res->err = err;
839 complete(&res->completion);
840}
841/*
842 * If bio->bi_dev is a partition, remap the location
843 */
844static inline void req_crypt_blk_partition_remap(struct bio *bio)
845{
846 struct block_device *bdev = bio->bi_bdev;
847
848 if (bio_sectors(bio) && bdev != bdev->bd_contains) {
849 struct hd_struct *p = bdev->bd_part;
850 /*
851 * Check for integer overflow, should never happen.
852 */
853 if (p->start_sect > (UINT_MAX - bio->bi_iter.bi_sector))
854 WARN_ON(1);
855
856 bio->bi_iter.bi_sector += p->start_sect;
857 bio->bi_bdev = bdev->bd_contains;
858 }
859}
860
861/*
862 * The endio function is called from ksoftirqd context (atomic).
863 * For write operations the new pages created form the mempool
864 * is freed and returned. * For read operations, decryption is
865 * required, since this is called in a atomic * context, the
866 * request is sent to a worker queue to complete decryptiona and
867 * free the request once done.
868 */
869static int req_crypt_endio(struct dm_target *ti, struct request *clone,
870 int error, union map_info *map_context)
871{
872 int err = 0;
873 struct req_iterator iter1;
874 struct bio_vec bvec;
875 struct req_dm_crypt_io *req_io = map_context->ptr;
876
877 /* If it is for ICE, free up req_io and return */
878 if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) {
879 mempool_free(req_io, req_io_pool);
880 err = error;
881 goto submit_request;
882 }
883
884 if (rq_data_dir(clone) == WRITE) {
885 rq_for_each_segment(bvec, clone, iter1) {
886 if (req_io->should_encrypt && bvec.bv_offset == 0) {
887 mempool_free(bvec.bv_page, req_page_pool);
888 bvec.bv_page = NULL;
889 } else
890 bvec.bv_page = NULL;
891 }
892 mempool_free(req_io, req_io_pool);
893 goto submit_request;
894 } else if (rq_data_dir(clone) == READ) {
895 req_io->error = error;
896 req_cryptd_queue_crypt(req_io);
897 err = DM_ENDIO_INCOMPLETE;
898 goto submit_request;
899 }
900
901submit_request:
902 return err;
903}
904
905/*
906 * This function is called with interrupts disabled
907 * The function remaps the clone for the underlying device.
908 * If it is a write request, it calls into the worker queue to
909 * encrypt the data
910 * and submit the request directly using the elevator
911 * For a read request no pre-processing is required the request
912 * is returned to dm once mapping is done
913 */
914static int req_crypt_map(struct dm_target *ti, struct request *clone,
915 union map_info *map_context)
916{
917 struct req_dm_crypt_io *req_io = NULL;
918 int error = DM_REQ_CRYPT_ERROR, copy_bio_sector_to_req = 0;
919 struct bio *bio_src = NULL;
920 gfp_t gfp_flag = GFP_KERNEL;
921
922 if (in_interrupt() || irqs_disabled())
923 gfp_flag = GFP_NOWAIT;
924
925 req_io = mempool_alloc(req_io_pool, gfp_flag);
926 if (!req_io) {
927 WARN_ON(1);
928 error = DM_REQ_CRYPT_ERROR;
929 goto submit_request;
930 }
931
932 /* Save the clone in the req_io, the callback to the worker
933 * queue will get the req_io
934 */
935 req_io->cloned_request = clone;
936 map_context->ptr = req_io;
937 atomic_set(&req_io->pending, 0);
938
939 if (rq_data_dir(clone) == WRITE)
940 req_io->should_encrypt = req_crypt_should_encrypt(req_io);
941 if (rq_data_dir(clone) == READ)
942 req_io->should_decrypt = req_crypt_should_deccrypt(req_io);
943
944 /* Get the queue of the underlying original device */
945 clone->q = bdev_get_queue(dev->bdev);
946 clone->rq_disk = dev->bdev->bd_disk;
947
948 __rq_for_each_bio(bio_src, clone) {
949 bio_src->bi_bdev = dev->bdev;
950 /* Currently the way req-dm works is that once the underlying
951 * device driver completes the request by calling into the
952 * block layer. The block layer completes the bios (clones) and
953 * then the cloned request. This is undesirable for req-dm-crypt
954 * hence added a flag BIO_DONTFREE, this flag will ensure that
955 * blk layer does not complete the cloned bios before completing
956 * the request. When the crypt endio is called, post-processing
957 * is done and then the dm layer will complete the bios (clones)
958 * and free them.
959 */
960 if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT)
961 bio_src->bi_flags |= 1 << BIO_INLINECRYPT;
962 else
963 bio_src->bi_flags |= 1 << BIO_DONTFREE;
964
965 /*
966 * If this device has partitions, remap block n
967 * of partition p to block n+start(p) of the disk.
968 */
969 req_crypt_blk_partition_remap(bio_src);
970 if (copy_bio_sector_to_req == 0) {
971 clone->__sector = bio_src->bi_iter.bi_sector;
972 copy_bio_sector_to_req++;
973 }
974 blk_queue_bounce(clone->q, &bio_src);
975 }
976
977 if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) {
978 /* Set all crypto parameters for inline crypto engine */
979 memcpy(&req_io->ice_settings, ice_settings,
980 sizeof(struct ice_crypto_setting));
981 } else {
982 /* ICE checks for key_index which could be >= 0. If a chip has
983 * both ICE and GPCE and wanted to use GPCE, there could be
984 * issue. Storage driver send all requests to ICE driver. If
985 * it sees key_index as 0, it would assume it is for ICE while
986 * it is not. Hence set invalid key index by default.
987 */
988 req_io->ice_settings.key_index = -1;
989
990 }
991
992 if (rq_data_dir(clone) == READ ||
993 encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) {
994 error = DM_MAPIO_REMAPPED;
995 goto submit_request;
996 } else if (rq_data_dir(clone) == WRITE) {
997 req_cryptd_queue_crypt(req_io);
998 error = DM_MAPIO_SUBMITTED;
999 goto submit_request;
1000 }
1001
1002submit_request:
1003 return error;
1004
1005}
1006
1007static void deconfigure_qcrypto(void)
1008{
1009 mempool_destroy(req_page_pool);
1010 req_page_pool = NULL;
1011
1012 mempool_destroy(req_scatterlist_pool);
1013 req_scatterlist_pool = NULL;
1014
1015 if (req_crypt_split_io_queue) {
1016 destroy_workqueue(req_crypt_split_io_queue);
1017 req_crypt_split_io_queue = NULL;
1018 }
1019 if (req_crypt_queue) {
1020 destroy_workqueue(req_crypt_queue);
1021 req_crypt_queue = NULL;
1022 }
1023
1024 kmem_cache_destroy(_req_dm_scatterlist_pool);
1025
1026 mutex_lock(&engine_list_mutex);
1027 kfree(pfe_eng);
1028 pfe_eng = NULL;
1029 kfree(fde_eng);
1030 fde_eng = NULL;
1031 mutex_unlock(&engine_list_mutex);
1032
1033 if (tfm) {
AnilKumar Chimataddc48122017-06-23 03:12:57 -07001034 crypto_free_skcipher(tfm);
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -07001035 tfm = NULL;
1036 }
1037}
1038
1039static void req_crypt_dtr(struct dm_target *ti)
1040{
1041 DMDEBUG("dm-req-crypt Destructor.\n");
1042
1043 mempool_destroy(req_io_pool);
1044 req_io_pool = NULL;
1045
1046 if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) {
1047 kfree(ice_settings);
1048 ice_settings = NULL;
1049 } else {
1050 deconfigure_qcrypto();
1051 }
1052
1053 kmem_cache_destroy(_req_crypt_io_pool);
1054
1055 if (dev) {
1056 dm_put_device(ti, dev);
1057 dev = NULL;
1058 }
1059}
1060
1061static int configure_qcrypto(void)
1062{
1063 struct crypto_engine_entry *eng_list = NULL;
1064 struct block_device *bdev = NULL;
1065 int err = DM_REQ_CRYPT_ERROR, i;
1066 struct request_queue *q = NULL;
1067
1068 bdev = dev->bdev;
1069 q = bdev_get_queue(bdev);
1070 blk_queue_max_hw_sectors(q, DM_REQ_CRYPT_QUEUE_SIZE);
1071
1072 /* Allocate the crypto alloc blk cipher and keep the handle */
AnilKumar Chimataddc48122017-06-23 03:12:57 -07001073 tfm = crypto_alloc_skcipher("qcom-xts(aes)", 0, 0);
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -07001074 if (IS_ERR(tfm)) {
AnilKumar Chimataddc48122017-06-23 03:12:57 -07001075 DMERR("%s skcipher tfm allocation failed : error\n",
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -07001076 __func__);
1077 tfm = NULL;
1078 goto exit_err;
1079 }
1080
1081 num_engines_fde = num_engines_pfe = 0;
1082
1083 mutex_lock(&engine_list_mutex);
1084 num_engines = (dm_qcrypto_func.get_num_engines)();
1085 if (!num_engines) {
1086 DMERR(KERN_INFO "%s qcrypto_get_num_engines failed\n",
1087 __func__);
1088 err = DM_REQ_CRYPT_ERROR;
1089 mutex_unlock(&engine_list_mutex);
1090 goto exit_err;
1091 }
1092
1093 eng_list = kcalloc(num_engines, sizeof(*eng_list), GFP_KERNEL);
1094 if (eng_list == NULL) {
1095 DMERR("%s engine list allocation failed\n", __func__);
1096 err = DM_REQ_CRYPT_ERROR;
1097 mutex_unlock(&engine_list_mutex);
1098 goto exit_err;
1099 }
1100
1101 (dm_qcrypto_func.get_engine_list)(num_engines, eng_list);
1102
1103 for (i = 0; i < num_engines; i++) {
1104 if (eng_list[i].ce_device == FDE_KEY_ID)
1105 num_engines_fde++;
1106 if (eng_list[i].ce_device == PFE_KEY_ID)
1107 num_engines_pfe++;
1108 }
1109
1110 fde_eng = kcalloc(num_engines_fde, sizeof(*fde_eng), GFP_KERNEL);
1111 if (fde_eng == NULL) {
1112 DMERR("%s fde engine list allocation failed\n", __func__);
1113 mutex_unlock(&engine_list_mutex);
1114 goto exit_err;
1115 }
1116
1117 pfe_eng = kcalloc(num_engines_pfe, sizeof(*pfe_eng), GFP_KERNEL);
1118 if (pfe_eng == NULL) {
1119 DMERR("%s pfe engine list allocation failed\n", __func__);
1120 mutex_unlock(&engine_list_mutex);
1121 goto exit_err;
1122 }
1123
1124 fde_cursor = 0;
1125 pfe_cursor = 0;
1126
1127 for (i = 0; i < num_engines; i++) {
1128 if (eng_list[i].ce_device == FDE_KEY_ID)
1129 fde_eng[fde_cursor++] = eng_list[i];
1130 if (eng_list[i].ce_device == PFE_KEY_ID)
1131 pfe_eng[pfe_cursor++] = eng_list[i];
1132 }
1133
1134 fde_cursor = 0;
1135 pfe_cursor = 0;
1136 mutex_unlock(&engine_list_mutex);
1137
1138 _req_dm_scatterlist_pool = kmem_cache_create("req_dm_scatterlist",
1139 sizeof(struct scatterlist) * MAX_SG_LIST,
1140 __alignof__(struct scatterlist), 0, NULL);
1141 if (!_req_dm_scatterlist_pool)
1142 goto exit_err;
1143
1144 req_crypt_queue = alloc_workqueue("req_cryptd",
1145 WQ_UNBOUND |
1146 WQ_CPU_INTENSIVE |
1147 WQ_MEM_RECLAIM,
1148 0);
1149 if (!req_crypt_queue) {
1150 DMERR("%s req_crypt_queue not allocated\n", __func__);
1151 goto exit_err;
1152 }
1153
1154 req_crypt_split_io_queue = alloc_workqueue("req_crypt_split",
1155 WQ_UNBOUND |
1156 WQ_CPU_INTENSIVE |
1157 WQ_MEM_RECLAIM,
1158 0);
1159 if (!req_crypt_split_io_queue) {
1160 DMERR("%s req_crypt_split_io_queue not allocated\n", __func__);
1161 goto exit_err;
1162 }
1163 req_scatterlist_pool = mempool_create_slab_pool(MIN_IOS,
1164 _req_dm_scatterlist_pool);
1165 if (!req_scatterlist_pool) {
1166 DMERR("%s req_scatterlist_pool is not allocated\n", __func__);
1167 err = -ENOMEM;
1168 goto exit_err;
1169 }
1170
1171 req_page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
1172 if (!req_page_pool) {
1173 DMERR("%s req_page_pool not allocated\n", __func__);
1174 goto exit_err;
1175 }
1176
1177 err = 0;
1178
1179exit_err:
1180 kfree(eng_list);
1181 return err;
1182}
1183
1184/*
1185 * Construct an encryption mapping:
1186 * <cipher> <key> <iv_offset> <dev_path> <start>
1187 */
1188static int req_crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1189{
1190 int err = DM_REQ_CRYPT_ERROR;
1191 unsigned long long tmpll;
1192 char dummy;
1193 int ret;
1194
1195 DMDEBUG("dm-req-crypt Constructor.\n");
1196
1197 if (argc < 5) {
1198 DMERR(" %s Not enough args\n", __func__);
1199 err = DM_REQ_CRYPT_ERROR;
1200 goto ctr_exit;
1201 }
1202
1203 if (argv[3]) {
1204 if (dm_get_device(ti, argv[3],
1205 dm_table_get_mode(ti->table), &dev)) {
1206 DMERR(" %s Device Lookup failed\n", __func__);
1207 err = DM_REQ_CRYPT_ERROR;
1208 goto ctr_exit;
1209 }
1210 } else {
1211 DMERR(" %s Arg[3] invalid\n", __func__);
1212 err = DM_REQ_CRYPT_ERROR;
1213 goto ctr_exit;
1214 }
1215
1216 if (argv[4]) {
1217 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
1218 DMERR("%s Invalid device sector\n", __func__);
1219 err = DM_REQ_CRYPT_ERROR;
1220 goto ctr_exit;
1221 }
1222 } else {
1223 DMERR(" %s Arg[4] invalid\n", __func__);
1224 err = DM_REQ_CRYPT_ERROR;
1225 goto ctr_exit;
1226 }
1227 start_sector_orig = tmpll;
1228
1229 /* Allow backward compatible */
1230 if (argc >= 6) {
1231 if (argv[5]) {
1232 if (!strcmp(argv[5], "fde_enabled"))
1233 is_fde_enabled = true;
1234 else
1235 is_fde_enabled = false;
1236 } else {
1237 DMERR(" %s Arg[5] invalid\n", __func__);
1238 err = DM_REQ_CRYPT_ERROR;
1239 goto ctr_exit;
1240 }
1241 } else {
1242 DMERR(" %s Arg[5] missing, set FDE enabled.\n", __func__);
1243 is_fde_enabled = true; /* backward compatible */
1244 }
1245
1246 _req_crypt_io_pool = KMEM_CACHE(req_dm_crypt_io, 0);
1247 if (!_req_crypt_io_pool) {
1248 err = DM_REQ_CRYPT_ERROR;
1249 goto ctr_exit;
1250 }
1251
1252 encryption_mode = DM_REQ_CRYPT_ENCRYPTION_MODE_CRYPTO;
1253 if (argc >= 7 && argv[6]) {
1254 if (!strcmp(argv[6], "ice"))
1255 encryption_mode =
1256 DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT;
1257 }
1258
1259 if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) {
1260 /* configure ICE settings */
1261 ice_settings =
1262 kzalloc(sizeof(struct ice_crypto_setting), GFP_KERNEL);
1263 if (!ice_settings) {
1264 err = -ENOMEM;
1265 goto ctr_exit;
1266 }
1267 ice_settings->key_size = ICE_CRYPTO_KEY_SIZE_128;
1268 ice_settings->algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
1269 ice_settings->key_mode = ICE_CRYPTO_USE_LUT_SW_KEY;
1270 if (kstrtou16(argv[1], 0, &ice_settings->key_index) ||
1271 ice_settings->key_index < 0 ||
1272 ice_settings->key_index > MAX_MSM_ICE_KEY_LUT_SIZE) {
1273 DMERR("%s Err: key index %d received for ICE\n",
1274 __func__, ice_settings->key_index);
1275 err = DM_REQ_CRYPT_ERROR;
1276 goto ctr_exit;
1277 }
1278 } else {
1279 ret = configure_qcrypto();
1280 if (ret) {
1281 DMERR("%s failed to configure qcrypto\n", __func__);
1282 err = ret;
1283 goto ctr_exit;
1284 }
1285 }
1286
1287 req_io_pool = mempool_create_slab_pool(MIN_IOS, _req_crypt_io_pool);
1288 if (!req_io_pool) {
1289 DMERR("%s req_io_pool not allocated\n", __func__);
1290 err = -ENOMEM;
1291 goto ctr_exit;
1292 }
1293
1294 /*
1295 * If underlying device supports flush/discard, mapped target
1296 * should also allow it
1297 */
1298 ti->num_flush_bios = 1;
1299 ti->num_discard_bios = 1;
1300
1301 err = 0;
1302 DMINFO("%s: Mapping block_device %s to dm-req-crypt ok!\n",
1303 __func__, argv[3]);
1304ctr_exit:
1305 if (err)
1306 req_crypt_dtr(ti);
1307
1308 return err;
1309}
1310
1311static int req_crypt_iterate_devices(struct dm_target *ti,
1312 iterate_devices_callout_fn fn, void *data)
1313{
1314 return fn(ti, dev, start_sector_orig, ti->len, data);
1315}
1316void set_qcrypto_func_dm(void *dev,
1317 void *flag,
1318 void *engines,
1319 void *engine_list)
1320{
1321 dm_qcrypto_func.cipher_set = dev;
1322 dm_qcrypto_func.cipher_flag = flag;
1323 dm_qcrypto_func.get_num_engines = engines;
1324 dm_qcrypto_func.get_engine_list = engine_list;
1325}
1326EXPORT_SYMBOL(set_qcrypto_func_dm);
1327
1328static struct target_type req_crypt_target = {
1329 .name = "req-crypt",
1330 .version = {1, 0, 0},
1331 .module = THIS_MODULE,
1332 .ctr = req_crypt_ctr,
1333 .dtr = req_crypt_dtr,
1334 .map_rq = req_crypt_map,
1335 .rq_end_io = req_crypt_endio,
1336 .iterate_devices = req_crypt_iterate_devices,
1337};
1338
1339static int __init req_dm_crypt_init(void)
1340{
1341 int r;
1342
1343
1344 r = dm_register_target(&req_crypt_target);
1345 if (r < 0) {
1346 DMERR("register failed %d", r);
1347 return r;
1348 }
1349
1350 DMINFO("dm-req-crypt successfully initalized.\n");
1351
1352 return r;
1353}
1354
1355static void __exit req_dm_crypt_exit(void)
1356{
1357 dm_unregister_target(&req_crypt_target);
1358}
1359
1360module_init(req_dm_crypt_init);
1361module_exit(req_dm_crypt_exit);
1362
1363MODULE_DESCRIPTION(DM_NAME " target for request based transparent encryption / decryption");
1364MODULE_LICENSE("GPL v2");