md: dm-req-crypt: Fix compilation errors

During the kernel upgrade few functions are deprecated which
results in compilation errors on dm-req-crypt driver, this
patch fixes those compilation errors.

Change-Id: I8fadd8ca81e35292174199afd3c4508ecabf55b5
Signed-off-by: AnilKumar Chimata <anilc@codeaurora.org>
diff --git a/drivers/md/dm-req-crypt.c b/drivers/md/dm-req-crypt.c
index 56f214a..3ffe7e5 100644
--- a/drivers/md/dm-req-crypt.c
+++ b/drivers/md/dm-req-crypt.c
@@ -33,6 +33,8 @@
 
 #include <asm/page.h>
 #include <asm/unaligned.h>
+#include <crypto/skcipher.h>
+#include <crypto/internal/skcipher.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/hash.h>
 #include <crypto/md5.h>
@@ -88,7 +90,7 @@
 static mempool_t *req_page_pool;
 static mempool_t *req_scatterlist_pool;
 static bool is_fde_enabled;
-static struct crypto_ablkcipher *tfm;
+static struct crypto_skcipher *tfm;
 static unsigned int encryption_mode;
 static struct ice_crypto_setting *ice_settings;
 
@@ -323,7 +325,7 @@
 		DMERR("%s req_sg_read allocation failed\n",
 						__func__);
 		error = DM_REQ_CRYPT_ERROR;
-		goto ablkcipher_req_alloc_failure;
+		goto skcipher_req_alloc_failure;
 	}
 	memset(req_sg_read, 0, sizeof(struct scatterlist) * MAX_SG_LIST);
 
@@ -331,7 +333,7 @@
 	if ((total_sg_len <= 0) || (total_sg_len > MAX_SG_LIST)) {
 		DMERR("%s Request Error%d", __func__, total_sg_len);
 		error = DM_REQ_CRYPT_ERROR;
-		goto ablkcipher_req_alloc_failure;
+		goto skcipher_req_alloc_failure;
 	}
 
 	total_bytes_in_req = clone->__data_len;
@@ -339,7 +341,7 @@
 		DMERR("%s total_bytes_in_req > 512 MB %d",
 				__func__, total_bytes_in_req);
 		error = DM_REQ_CRYPT_ERROR;
-		goto ablkcipher_req_alloc_failure;
+		goto skcipher_req_alloc_failure;
 	}
 
 
@@ -354,7 +356,7 @@
 		if (!split_io) {
 			DMERR("%s split_io allocation failed\n", __func__);
 			error = DM_REQ_CRYPT_ERROR;
-			goto ablkcipher_req_alloc_failure;
+			goto skcipher_req_alloc_failure;
 		}
 
 		split_io[0].req_split_sg_read = sg = req_sg_read;
@@ -389,7 +391,7 @@
 		if (!split_io) {
 			DMERR("%s split_io allocation failed\n", __func__);
 			error = DM_REQ_CRYPT_ERROR;
-			goto ablkcipher_req_alloc_failure;
+			goto skcipher_req_alloc_failure;
 		}
 		split_io->engine = &curr_engine_list[0];
 		init_completion(&split_io->result.completion);
@@ -406,7 +408,7 @@
 			DMERR("%s error = %d for request\n",
 				 __func__, split_io->result.err);
 			error = DM_REQ_CRYPT_ERROR;
-			goto ablkcipher_req_alloc_failure;
+			goto skcipher_req_alloc_failure;
 		}
 	} else {
 		for (i = 0; i < (engine_list_total); i++) {
@@ -416,12 +418,12 @@
 				DMERR("%s error = %d for %dst request\n",
 					 __func__, split_io[i].result.err, i);
 				error = DM_REQ_CRYPT_ERROR;
-				goto ablkcipher_req_alloc_failure;
+				goto skcipher_req_alloc_failure;
 			}
 		}
 	}
 	error = 0;
-ablkcipher_req_alloc_failure:
+skcipher_req_alloc_failure:
 
 	mempool_free(req_sg_read, req_scatterlist_pool);
 	kfree(split_io);
@@ -463,7 +465,7 @@
 		total_bytes_in_req = 0, error = DM_MAPIO_REMAPPED, rc = 0;
 	struct req_iterator iter;
 	struct req_iterator iter1;
-	struct ablkcipher_request *req = NULL;
+	struct skcipher_request *req = NULL;
 	struct req_crypt_result result;
 	struct bio_vec bvec;
 	struct scatterlist *req_sg_in = NULL;
@@ -497,15 +499,15 @@
 
 	req_crypt_inc_pending(io);
 
-	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
+	req = skcipher_request_alloc(tfm, GFP_KERNEL);
 	if (!req) {
-		DMERR("%s ablkcipher request allocation failed\n",
+		DMERR("%s skcipher request allocation failed\n",
 					__func__);
 		error = DM_REQ_CRYPT_ERROR;
-		goto ablkcipher_req_alloc_failure;
+		goto skcipher_req_alloc_failure;
 	}
 
-	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
 				req_crypt_cipher_complete, &result);
 
 	mutex_lock(&engine_list_mutex);
@@ -525,7 +527,7 @@
 		DMERR("%s Unknown Key ID!\n", __func__);
 		error = DM_REQ_CRYPT_ERROR;
 		mutex_unlock(&engine_list_mutex);
-		goto ablkcipher_req_alloc_failure;
+		goto skcipher_req_alloc_failure;
 	}
 
 	engine = curr_engine_list[*engine_cursor];
@@ -538,7 +540,7 @@
 		DMERR("%s qcrypto_cipher_set_device_hw failed with err %d\n",
 				__func__, err);
 		mutex_unlock(&engine_list_mutex);
-		goto ablkcipher_req_alloc_failure;
+		goto skcipher_req_alloc_failure;
 	}
 	mutex_unlock(&engine_list_mutex);
 
@@ -546,8 +548,8 @@
 
 	(dm_qcrypto_func.cipher_flag)(req,
 		QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
-	crypto_ablkcipher_clear_flags(tfm, ~0);
-	crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
+	crypto_skcipher_clear_flags(tfm, ~0);
+	crypto_skcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
 
 	req_sg_in = (struct scatterlist *)mempool_alloc(req_scatterlist_pool,
 								GFP_KERNEL);
@@ -555,7 +557,7 @@
 		DMERR("%s req_sg_in allocation failed\n",
 					__func__);
 		error = DM_REQ_CRYPT_ERROR;
-		goto ablkcipher_req_alloc_failure;
+		goto skcipher_req_alloc_failure;
 	}
 	memset(req_sg_in, 0, sizeof(struct scatterlist) * MAX_SG_LIST);
 
@@ -565,7 +567,7 @@
 		DMERR("%s req_sg_out allocation failed\n",
 					__func__);
 		error = DM_REQ_CRYPT_ERROR;
-		goto ablkcipher_req_alloc_failure;
+		goto skcipher_req_alloc_failure;
 	}
 	memset(req_sg_out, 0, sizeof(struct scatterlist) * MAX_SG_LIST);
 
@@ -574,7 +576,7 @@
 			(total_sg_len_req_in > MAX_SG_LIST)) {
 		DMERR("%s Request Error%d", __func__, total_sg_len_req_in);
 		error = DM_REQ_CRYPT_ERROR;
-		goto ablkcipher_req_alloc_failure;
+		goto skcipher_req_alloc_failure;
 	}
 
 	total_bytes_in_req = clone->__data_len;
@@ -582,7 +584,7 @@
 		DMERR("%s total_bytes_in_req > 512 MB %d",
 				__func__, total_bytes_in_req);
 		error = DM_REQ_CRYPT_ERROR;
-		goto ablkcipher_req_alloc_failure;
+		goto skcipher_req_alloc_failure;
 	}
 
 	rq_for_each_segment(bvec, clone, iter) {
@@ -614,16 +616,16 @@
 			(total_sg_len_req_out > MAX_SG_LIST)) {
 		DMERR("%s Request Error %d", __func__, total_sg_len_req_out);
 		error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
-		goto ablkcipher_req_alloc_failure;
+		goto skcipher_req_alloc_failure;
 	}
 
 	memset(IV, 0, AES_XTS_IV_LEN);
 	memcpy(IV, &clone->__sector, sizeof(sector_t));
 
-	ablkcipher_request_set_crypt(req, req_sg_in, req_sg_out,
+	skcipher_request_set_crypt(req, req_sg_in, req_sg_out,
 			total_bytes_in_req, (void *) IV);
 
-	rc = crypto_ablkcipher_encrypt(req);
+	rc = crypto_skcipher_encrypt(req);
 
 	switch (rc) {
 	case 0:
@@ -640,13 +642,13 @@
 			DMERR("%s error = %d encrypting the request\n",
 				 __func__, result.err);
 			error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
-			goto ablkcipher_req_alloc_failure;
+			goto skcipher_req_alloc_failure;
 		}
 		break;
 
 	default:
 		error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
-		goto ablkcipher_req_alloc_failure;
+		goto skcipher_req_alloc_failure;
 	}
 
 	__rq_for_each_bio(bio_src, clone) {
@@ -661,9 +663,9 @@
 	 */
 	blk_recalc_rq_segments(clone);
 
-ablkcipher_req_alloc_failure:
+skcipher_req_alloc_failure:
 	if (req)
-		ablkcipher_request_free(req);
+		skcipher_request_free(req);
 
 	if (error == DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC) {
 		rq_for_each_segment(bvec, clone, iter1) {
@@ -727,7 +729,7 @@
 {
 	struct req_dm_split_req_io *io =
 			container_of(work, struct req_dm_split_req_io, work);
-	struct ablkcipher_request *req = NULL;
+	struct skcipher_request *req = NULL;
 	struct req_crypt_result result;
 	int err = 0;
 	struct crypto_engine_entry *engine = NULL;
@@ -739,14 +741,14 @@
 		/* If io is not populated this should not be called */
 		WARN_ON(1);
 	}
-	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
+	req = skcipher_request_alloc(tfm, GFP_KERNEL);
 	if (!req) {
-		DMERR("%s ablkcipher request allocation failed\n", __func__);
+		DMERR("%s skcipher request allocation failed\n", __func__);
 		err = DM_REQ_CRYPT_ERROR;
-		goto ablkcipher_req_alloc_failure;
+		goto skcipher_req_alloc_failure;
 	}
 
-	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
 					req_crypt_cipher_complete, &result);
 
 	engine = io->engine;
@@ -756,19 +758,19 @@
 	if (err) {
 		DMERR("%s qcrypto_cipher_set_device_hw failed with err %d\n",
 				__func__, err);
-		goto ablkcipher_req_alloc_failure;
+		goto skcipher_req_alloc_failure;
 	}
 	init_completion(&result.completion);
 	(dm_qcrypto_func.cipher_flag)(req,
 		QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
 
-	crypto_ablkcipher_clear_flags(tfm, ~0);
-	crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
+	crypto_skcipher_clear_flags(tfm, ~0);
+	crypto_skcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
 
-	ablkcipher_request_set_crypt(req, io->req_split_sg_read,
+	skcipher_request_set_crypt(req, io->req_split_sg_read,
 			io->req_split_sg_read, io->size, (void *) io->IV);
 
-	err = crypto_ablkcipher_decrypt(req);
+	err = crypto_skcipher_decrypt(req);
 	switch (err) {
 	case 0:
 		break;
@@ -784,18 +786,18 @@
 			DMERR("%s error = %d encrypting the request\n",
 				 __func__, result.err);
 			err = DM_REQ_CRYPT_ERROR;
-			goto ablkcipher_req_alloc_failure;
+			goto skcipher_req_alloc_failure;
 		}
 		break;
 
 	default:
 		err = DM_REQ_CRYPT_ERROR;
-		goto ablkcipher_req_alloc_failure;
+		goto skcipher_req_alloc_failure;
 	}
 	err = 0;
-ablkcipher_req_alloc_failure:
+skcipher_req_alloc_failure:
 	if (req)
-		ablkcipher_request_free(req);
+		skcipher_request_free(req);
 
 	req_crypt_split_io_complete(&io->result, err);
 }
@@ -1029,7 +1031,7 @@
 	mutex_unlock(&engine_list_mutex);
 
 	if (tfm) {
-		crypto_free_ablkcipher(tfm);
+		crypto_free_skcipher(tfm);
 		tfm = NULL;
 	}
 }
@@ -1068,9 +1070,9 @@
 	blk_queue_max_hw_sectors(q, DM_REQ_CRYPT_QUEUE_SIZE);
 
 	/* Allocate the crypto alloc blk cipher and keep the handle */
-	tfm = crypto_alloc_ablkcipher("qcom-xts(aes)", 0, 0);
+	tfm = crypto_alloc_skcipher("qcom-xts(aes)", 0, 0);
 	if (IS_ERR(tfm)) {
-		DMERR("%s ablkcipher tfm allocation failed : error\n",
+		DMERR("%s skcipher tfm allocation failed : error\n",
 						 __func__);
 		tfm = NULL;
 		goto exit_err;