Variant ops for UFS crypto and new crypto lib

Add QTI implementation for variant ops required for inline
encryption with wrapped key support. These include UFS
crypto ops and KSM ops. Also add crypto common library to cater
to different key programing mechanisms.

Change-Id: Ica930a8a806a78d4c2d074639cbed355b895a459
Signed-off-by: Gaurav Kashyap <gaurkash@codeaurora.org>
Signed-off-by: Neeraj Soni <neersoni@codeaurora.org>
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index efedd01..f1aae8b 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -140,3 +140,11 @@
 	  Enabling this makes it possible for the kernel to use the crypto
 	  capabilities of the UFS device (if present) to perform crypto
 	  operations on data being transferred to/from the device.
+
+config SCSI_UFS_CRYPTO_QTI
+	tristate "Vendor specific UFS Crypto Engine Support"
+	depends on SCSI_UFS_CRYPTO
+	help
+	 Enable Vendor Crypto Engine Support in UFS
+	 Enabling this allows kernel to use UFS crypto operations defined
+	 and implemented by QTI.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 1265913..e7294e6 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -12,3 +12,4 @@
 obj-$(CONFIG_DEBUG_FS) += ufs-debugfs.o ufs-qcom-debugfs.o
 obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
 ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
+ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO_QTI) += ufshcd-crypto-qti.o
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 0b4eb3e..982968f 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -30,6 +30,7 @@
 #include "ufshci.h"
 #include "ufs-qcom-debugfs.h"
 #include "ufs_quirks.h"
+#include "ufshcd-crypto-qti.h"
 
 #define MAX_PROP_SIZE		   32
 #define VDDP_REF_CLK_MIN_UV        1200000
@@ -2138,6 +2139,12 @@
 	/* restore the secure configuration */
 	ufs_qcom_update_sec_cfg(hba, true);
 
+	/*
+	 * Set the vendor specific ops needed for ICE.
+	 * Default implementation if the ops are not set.
+	 */
+	ufshcd_crypto_qti_set_vops(hba);
+
 	err = ufs_qcom_bus_register(host);
 	if (err)
 		goto out_variant_clear;
diff --git a/drivers/scsi/ufs/ufshcd-crypto-qti.c b/drivers/scsi/ufs/ufshcd-crypto-qti.c
new file mode 100644
index 0000000..f3351d0
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd-crypto-qti.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <crypto/algapi.h>
+#include <linux/platform_device.h>
+#include <linux/crypto-qti-common.h>
+
+#include "ufshcd-crypto-qti.h"
+
+#define MINIMUM_DUN_SIZE 512
+#define MAXIMUM_DUN_SIZE 65536
+
+#define NUM_KEYSLOTS(hba) (hba->crypto_capabilities.config_count + 1)
+
+static struct ufs_hba_crypto_variant_ops ufshcd_crypto_qti_variant_ops = {
+	.hba_init_crypto = ufshcd_crypto_qti_init_crypto,
+	.enable = ufshcd_crypto_qti_enable,
+	.disable = ufshcd_crypto_qti_disable,
+	.resume = ufshcd_crypto_qti_resume,
+	.debug = ufshcd_crypto_qti_debug,
+};
+
+static uint8_t get_data_unit_size_mask(unsigned int data_unit_size)
+{
+	if (data_unit_size < MINIMUM_DUN_SIZE ||
+		data_unit_size > MAXIMUM_DUN_SIZE ||
+	    !is_power_of_2(data_unit_size))
+		return 0;
+
+	return data_unit_size / MINIMUM_DUN_SIZE;
+}
+
+static bool ice_cap_idx_valid(struct ufs_hba *hba,
+			      unsigned int cap_idx)
+{
+	return cap_idx < hba->crypto_capabilities.num_crypto_cap;
+}
+
+void ufshcd_crypto_qti_enable(struct ufs_hba *hba)
+{
+	int err = 0;
+
+	if (!ufshcd_hba_is_crypto_supported(hba))
+		return;
+
+	err = crypto_qti_enable(hba->crypto_vops->priv);
+	if (err) {
+		pr_err("%s: Error enabling crypto, err %d\n",
+				__func__, err);
+		ufshcd_crypto_qti_disable(hba);
+	}
+
+	ufshcd_crypto_enable_spec(hba);
+
+}
+
+void ufshcd_crypto_qti_disable(struct ufs_hba *hba)
+{
+	ufshcd_crypto_disable_spec(hba);
+	crypto_qti_disable(hba->crypto_vops->priv);
+}
+
+
+static int ufshcd_crypto_qti_keyslot_program(struct keyslot_manager *ksm,
+					     const struct blk_crypto_key *key,
+					     unsigned int slot)
+{
+	struct ufs_hba *hba = keyslot_manager_private(ksm);
+	int err = 0;
+	u8 data_unit_mask;
+	int crypto_alg_id;
+
+	crypto_alg_id = ufshcd_crypto_cap_find(hba, key->crypto_mode,
+					       key->data_unit_size);
+
+	if (!ufshcd_is_crypto_enabled(hba) ||
+	    !ufshcd_keyslot_valid(hba, slot) ||
+	    !ice_cap_idx_valid(hba, crypto_alg_id))
+		return -EINVAL;
+
+	data_unit_mask = get_data_unit_size_mask(key->data_unit_size);
+
+	if (!(data_unit_mask &
+	      hba->crypto_cap_array[crypto_alg_id].sdus_mask))
+		return -EINVAL;
+
+	pm_runtime_get_sync(hba->dev);
+	err = ufshcd_hold(hba, false);
+	if (err) {
+		pr_err("%s: failed to enable clocks, err %d\n", __func__, err);
+		return err;
+	}
+
+	err = crypto_qti_keyslot_program(hba->crypto_vops->priv, key, slot,
+					data_unit_mask, crypto_alg_id);
+	if (err) {
+		pr_err("%s: failed with error %d\n", __func__, err);
+		ufshcd_release(hba, false);
+		pm_runtime_put_sync(hba->dev);
+		return err;
+	}
+
+	ufshcd_release(hba, false);
+	pm_runtime_put_sync(hba->dev);
+
+	return 0;
+}
+
+static int ufshcd_crypto_qti_keyslot_evict(struct keyslot_manager *ksm,
+					   const struct blk_crypto_key *key,
+					   unsigned int slot)
+{
+	int err = 0;
+	struct ufs_hba *hba = keyslot_manager_private(ksm);
+
+	if (!ufshcd_is_crypto_enabled(hba) ||
+	    !ufshcd_keyslot_valid(hba, slot))
+		return -EINVAL;
+
+	pm_runtime_get_sync(hba->dev);
+	err = ufshcd_hold(hba, false);
+	if (err) {
+		pr_err("%s: failed to enable clocks, err %d\n", __func__, err);
+		return err;
+	}
+
+	err = crypto_qti_keyslot_evict(hba->crypto_vops->priv, slot);
+	if (err) {
+		pr_err("%s: failed with error %d\n",
+			__func__, err);
+		ufshcd_release(hba, false);
+		pm_runtime_put_sync(hba->dev);
+		return err;
+	}
+
+	ufshcd_release(hba, false);
+	pm_runtime_put_sync(hba->dev);
+
+	return err;
+}
+
+static int ufshcd_crypto_qti_derive_raw_secret(struct keyslot_manager *ksm,
+					       const u8 *wrapped_key,
+					       unsigned int wrapped_key_size,
+					       u8 *secret,
+					       unsigned int secret_size)
+{
+	return crypto_qti_derive_raw_secret(wrapped_key, wrapped_key_size,
+			secret, secret_size);
+}
+
+static const struct keyslot_mgmt_ll_ops ufshcd_crypto_qti_ksm_ops = {
+	.keyslot_program	= ufshcd_crypto_qti_keyslot_program,
+	.keyslot_evict		= ufshcd_crypto_qti_keyslot_evict,
+	.derive_raw_secret	= ufshcd_crypto_qti_derive_raw_secret,
+};
+
+static enum blk_crypto_mode_num ufshcd_blk_crypto_qti_mode_num_for_alg_dusize(
+					enum ufs_crypto_alg ufs_crypto_alg,
+					enum ufs_crypto_key_size key_size)
+{
+	/*
+	 * This is currently the only mode that UFS and blk-crypto both support.
+	 */
+	if (ufs_crypto_alg == UFS_CRYPTO_ALG_AES_XTS &&
+		key_size == UFS_CRYPTO_KEY_SIZE_256)
+		return BLK_ENCRYPTION_MODE_AES_256_XTS;
+
+	return BLK_ENCRYPTION_MODE_INVALID;
+}
+
+static int ufshcd_hba_init_crypto_qti_spec(struct ufs_hba *hba,
+				    const struct keyslot_mgmt_ll_ops *ksm_ops)
+{
+	int cap_idx = 0;
+	int err = 0;
+	unsigned int crypto_modes_supported[BLK_ENCRYPTION_MODE_MAX];
+	enum blk_crypto_mode_num blk_mode_num;
+
+	/* Default to disabling crypto */
+	hba->caps &= ~UFSHCD_CAP_CRYPTO;
+
+	if (!(hba->capabilities & MASK_CRYPTO_SUPPORT)) {
+		err = -ENODEV;
+		goto out;
+	}
+
+	/*
+	 * Crypto Capabilities should never be 0, because the
+	 * config_array_ptr > 04h. So we use a 0 value to indicate that
+	 * crypto init failed, and can't be enabled.
+	 */
+	hba->crypto_capabilities.reg_val =
+			  cpu_to_le32(ufshcd_readl(hba, REG_UFS_CCAP));
+	hba->crypto_cfg_register =
+		 (u32)hba->crypto_capabilities.config_array_ptr * 0x100;
+	hba->crypto_cap_array =
+		 devm_kcalloc(hba->dev,
+				hba->crypto_capabilities.num_crypto_cap,
+				sizeof(hba->crypto_cap_array[0]),
+				GFP_KERNEL);
+	if (!hba->crypto_cap_array) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	memset(crypto_modes_supported, 0, sizeof(crypto_modes_supported));
+	/*
+	 * Store all the capabilities now so that we don't need to repeatedly
+	 * access the device each time we want to know its capabilities
+	 */
+	for (cap_idx = 0; cap_idx < hba->crypto_capabilities.num_crypto_cap;
+	     cap_idx++) {
+		hba->crypto_cap_array[cap_idx].reg_val =
+				cpu_to_le32(ufshcd_readl(hba,
+						REG_UFS_CRYPTOCAP +
+						cap_idx * sizeof(__le32)));
+		blk_mode_num = ufshcd_blk_crypto_qti_mode_num_for_alg_dusize(
+				hba->crypto_cap_array[cap_idx].algorithm_id,
+				hba->crypto_cap_array[cap_idx].key_size);
+		if (blk_mode_num == BLK_ENCRYPTION_MODE_INVALID)
+			continue;
+		crypto_modes_supported[blk_mode_num] |=
+			hba->crypto_cap_array[cap_idx].sdus_mask * 512;
+	}
+
+	hba->ksm = keyslot_manager_create(ufshcd_num_keyslots(hba), ksm_ops,
+					crypto_modes_supported, hba);
+
+	if (!hba->ksm) {
+		err = -ENOMEM;
+		goto out;
+	}
+	pr_debug("%s: keyslot manager created\n", __func__);
+
+	return 0;
+
+out:
+	/* Indicate that init failed by setting crypto_capabilities to 0 */
+	hba->crypto_capabilities.reg_val = 0;
+	return err;
+}
+
+int ufshcd_crypto_qti_init_crypto(struct ufs_hba *hba,
+				  const struct keyslot_mgmt_ll_ops *ksm_ops)
+{
+	int err = 0;
+	struct platform_device *pdev = to_platform_device(hba->dev);
+	void __iomem *mmio_base;
+	struct resource *mem_res;
+
+	mem_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+								"ufs_ice");
+	mmio_base = devm_ioremap_resource(hba->dev, mem_res);
+	if (IS_ERR(mmio_base)) {
+		pr_err("%s: Unable to get ufs_crypto mmio base\n", __func__);
+		return PTR_ERR(mmio_base);
+	}
+
+	err = ufshcd_hba_init_crypto_qti_spec(hba, &ufshcd_crypto_qti_ksm_ops);
+	if (err) {
+		pr_err("%s: Error initiating crypto capabilities, err %d\n",
+					__func__, err);
+		return err;
+	}
+
+	err = crypto_qti_init_crypto(hba->dev,
+			mmio_base, (void **)&hba->crypto_vops->priv);
+	if (err) {
+		pr_err("%s: Error initiating crypto, err %d\n",
+					__func__, err);
+	}
+	return err;
+}
+
+int ufshcd_crypto_qti_debug(struct ufs_hba *hba)
+{
+	return crypto_qti_debug(hba->crypto_vops->priv);
+}
+
+void ufshcd_crypto_qti_set_vops(struct ufs_hba *hba)
+{
+	return ufshcd_crypto_set_vops(hba, &ufshcd_crypto_qti_variant_ops);
+}
+
+int ufshcd_crypto_qti_resume(struct ufs_hba *hba,
+			     enum ufs_pm_op pm_op)
+{
+	return crypto_qti_resume(hba->crypto_vops->priv);
+}
diff --git a/drivers/scsi/ufs/ufshcd-crypto-qti.h b/drivers/scsi/ufs/ufshcd-crypto-qti.h
new file mode 100644
index 0000000..5c1b2ae
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd-crypto-qti.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _UFSHCD_CRYPTO_QTI_H
+#define _UFSHCD_CRYPTO_QTI_H
+
+#include "ufshcd.h"
+#include "ufshcd-crypto.h"
+
+void ufshcd_crypto_qti_enable(struct ufs_hba *hba);
+
+void ufshcd_crypto_qti_disable(struct ufs_hba *hba);
+
+int ufshcd_crypto_qti_init_crypto(struct ufs_hba *hba,
+	const struct keyslot_mgmt_ll_ops *ksm_ops);
+
+void ufshcd_crypto_qti_setup_rq_keyslot_manager(struct ufs_hba *hba,
+					    struct request_queue *q);
+
+void ufshcd_crypto_qti_destroy_rq_keyslot_manager(struct ufs_hba *hba,
+			struct request_queue *q);
+
+int ufshcd_crypto_qti_prepare_lrbp_crypto(struct ufs_hba *hba,
+			struct scsi_cmnd *cmd, struct ufshcd_lrb *lrbp);
+
+int ufshcd_crypto_qti_complete_lrbp_crypto(struct ufs_hba *hba,
+				struct scsi_cmnd *cmd, struct ufshcd_lrb *lrbp);
+
+int ufshcd_crypto_qti_debug(struct ufs_hba *hba);
+
+int ufshcd_crypto_qti_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op);
+
+int ufshcd_crypto_qti_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op);
+
+#ifdef CONFIG_SCSI_UFS_CRYPTO_QTI
+void ufshcd_crypto_qti_set_vops(struct ufs_hba *hba);
+#else
+static inline void ufshcd_crypto_qti_set_vops(struct ufs_hba *hba)
+{}
+#endif /* CONFIG_SCSI_UFS_CRYPTO_QTI */
+#endif /* _UFSHCD_CRYPTO_QTI_H */
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index ab38c20..98c3c24 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -844,6 +844,24 @@
 	  bit in tcsr register if it is going to cross its own threshold.
 	  If all clients are going to cross their thresholds then Cx ipeak
 	  hw module will raise an interrupt to cDSP block to throttle cDSP fmax.
+
+config QTI_CRYPTO_COMMON
+	tristate "Enable common crypto functionality used for FBE"
+	depends on BLK_INLINE_ENCRYPTION
+	help
+	 Say 'Y' to enable the common crypto implementation to be used by
+	 different storage layers such as UFS and EMMC for file based hardware
+	 encryption. This library implements API to program and evict
+	 keys using Trustzone or Hardware Key Manager.
+
+config QTI_CRYPTO_TZ
+	tristate "Enable Trustzone to be used for FBE"
+	depends on QTI_CRYPTO_COMMON
+	help
+	 Say 'Y' to enable routing crypto requests to Trustzone while
+	 performing hardware based file encryption. This means keys are
+	 programmed and managed through SCM calls to TZ where ICE driver
+	 will configure keys.
 endmenu
 
 config QCOM_HYP_CORE_CTL
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 530043d..4856a43 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -100,3 +100,5 @@
 obj-$(CONFIG_QCOM_CX_IPEAK) += cx_ipeak.o
 obj-$(CONFIG_QTI_L2_REUSE) += l2_reuse.o
 obj-$(CONFIG_ICNSS2) += icnss2/
+obj-$(CONFIG_QTI_CRYPTO_COMMON) += crypto-qti-common.o
+obj-$(CONFIG_QTI_CRYPTO_TZ) += crypto-qti-tz.o
diff --git a/drivers/soc/qcom/crypto-qti-common.c b/drivers/soc/qcom/crypto-qti-common.c
new file mode 100644
index 0000000..97df33a
--- /dev/null
+++ b/drivers/soc/qcom/crypto-qti-common.c
@@ -0,0 +1,467 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/crypto-qti-common.h>
+#include "crypto-qti-ice-regs.h"
+#include "crypto-qti-platform.h"
+
+static int ice_check_fuse_setting(struct crypto_vops_qti_entry *ice_entry)
+{
+	uint32_t regval;
+	uint32_t major, minor;
+
+	major = (ice_entry->ice_hw_version & ICE_CORE_MAJOR_REV_MASK) >>
+			ICE_CORE_MAJOR_REV;
+	minor = (ice_entry->ice_hw_version & ICE_CORE_MINOR_REV_MASK) >>
+			ICE_CORE_MINOR_REV;
+
+	//Check fuse setting is not supported on ICE 3.2 onwards
+	if ((major == 0x03) && (minor >= 0x02))
+		return 0;
+	regval = ice_readl(ice_entry, ICE_REGS_FUSE_SETTING);
+	regval &= (ICE_FUSE_SETTING_MASK |
+		ICE_FORCE_HW_KEY0_SETTING_MASK |
+		ICE_FORCE_HW_KEY1_SETTING_MASK);
+
+	if (regval) {
+		pr_err("%s: error: ICE_ERROR_HW_DISABLE_FUSE_BLOWN\n",
+				__func__);
+		return -EPERM;
+	}
+	return 0;
+}
+
+static int ice_check_version(struct crypto_vops_qti_entry *ice_entry)
+{
+	uint32_t version, major, minor, step;
+
+	version = ice_readl(ice_entry, ICE_REGS_VERSION);
+	major = (version & ICE_CORE_MAJOR_REV_MASK) >> ICE_CORE_MAJOR_REV;
+	minor = (version & ICE_CORE_MINOR_REV_MASK) >> ICE_CORE_MINOR_REV;
+	step = (version & ICE_CORE_STEP_REV_MASK) >> ICE_CORE_STEP_REV;
+
+	if (major < ICE_CORE_CURRENT_MAJOR_VERSION) {
+		pr_err("%s: Unknown ICE device at %lu, rev %d.%d.%d\n",
+			__func__, (unsigned long)ice_entry->icemmio_base,
+				major, minor, step);
+		return -ENODEV;
+	}
+
+	ice_entry->ice_hw_version = version;
+
+	return 0;
+}
+
+int crypto_qti_init_crypto(struct device *dev, void __iomem *mmio_base,
+			   void **priv_data)
+{
+	int err = 0;
+	struct crypto_vops_qti_entry *ice_entry;
+
+	ice_entry = devm_kzalloc(dev,
+		sizeof(struct crypto_vops_qti_entry),
+		GFP_KERNEL);
+	if (!ice_entry)
+		return -ENOMEM;
+
+	ice_entry->icemmio_base = mmio_base;
+	ice_entry->flags = 0;
+
+	err = ice_check_version(ice_entry);
+	if (err) {
+		pr_err("%s: check version failed, err %d\n", __func__, err);
+		return err;
+	}
+
+	err = ice_check_fuse_setting(ice_entry);
+	if (err)
+		return err;
+
+	*priv_data = (void *)ice_entry;
+
+	return err;
+}
+
+static void ice_low_power_and_optimization_enable(
+		struct crypto_vops_qti_entry *ice_entry)
+{
+	uint32_t regval;
+
+	regval = ice_readl(ice_entry, ICE_REGS_ADVANCED_CONTROL);
+	/* Enable low power mode sequence
+	 * [0]-0,[1]-0,[2]-0,[3]-7,[4]-0,[5]-0,[6]-0,[7]-0,
+	 * Enable CONFIG_CLK_GATING, STREAM2_CLK_GATING and STREAM1_CLK_GATING
+	 */
+	regval |= 0x7000;
+	/* Optimization enable sequence
+	 */
+	regval |= 0xD807100;
+	ice_writel(ice_entry, regval, ICE_REGS_ADVANCED_CONTROL);
+	/*
+	 * Memory barrier - to ensure write completion before next transaction
+	 */
+	wmb();
+}
+
+static int ice_wait_bist_status(struct crypto_vops_qti_entry *ice_entry)
+{
+	int count;
+	uint32_t regval;
+
+	for (count = 0; count < QTI_ICE_MAX_BIST_CHECK_COUNT; count++) {
+		regval = ice_readl(ice_entry, ICE_REGS_BIST_STATUS);
+		if (!(regval & ICE_BIST_STATUS_MASK))
+			break;
+		udelay(50);
+	}
+
+	if (regval) {
+		pr_err("%s: wait bist status failed, reg %d\n",
+				__func__, regval);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static void ice_enable_intr(struct crypto_vops_qti_entry *ice_entry)
+{
+	uint32_t regval;
+
+	regval = ice_readl(ice_entry, ICE_REGS_NON_SEC_IRQ_MASK);
+	regval &= ~ICE_REGS_NON_SEC_IRQ_MASK;
+	ice_writel(ice_entry, regval, ICE_REGS_NON_SEC_IRQ_MASK);
+	/*
+	 * Memory barrier - to ensure write completion before next transaction
+	 */
+	wmb();
+}
+
+static void ice_disable_intr(struct crypto_vops_qti_entry *ice_entry)
+{
+	uint32_t regval;
+
+	regval = ice_readl(ice_entry, ICE_REGS_NON_SEC_IRQ_MASK);
+	regval |= ICE_REGS_NON_SEC_IRQ_MASK;
+	ice_writel(ice_entry, regval, ICE_REGS_NON_SEC_IRQ_MASK);
+	/*
+	 * Memory barrier - to ensure write completion before next transaction
+	 */
+	wmb();
+}
+
+int crypto_qti_enable(void *priv_data)
+{
+	int err = 0;
+	struct crypto_vops_qti_entry *ice_entry;
+
+	ice_entry = (struct crypto_vops_qti_entry *) priv_data;
+	if (!ice_entry) {
+		pr_err("%s: vops ice data is invalid\n", __func__);
+		return -EINVAL;
+	}
+
+	ice_low_power_and_optimization_enable(ice_entry);
+	err = ice_wait_bist_status(ice_entry);
+	if (err)
+		return err;
+	ice_enable_intr(ice_entry);
+
+	return err;
+}
+
+void crypto_qti_disable(void *priv_data)
+{
+	struct crypto_vops_qti_entry *ice_entry;
+
+	ice_entry = (struct crypto_vops_qti_entry *) priv_data;
+	if (!ice_entry) {
+		pr_err("%s: vops ice data is invalid\n", __func__);
+		return;
+	}
+
+	crypto_qti_disable_platform(ice_entry);
+	ice_disable_intr(ice_entry);
+}
+
+int crypto_qti_resume(void *priv_data)
+{
+	int err = 0;
+	struct crypto_vops_qti_entry *ice_entry;
+
+	ice_entry = (struct crypto_vops_qti_entry *) priv_data;
+	if (!ice_entry) {
+		pr_err("%s: vops ice data is invalid\n", __func__);
+		return -EINVAL;
+	}
+
+	err = ice_wait_bist_status(ice_entry);
+
+	return err;
+}
+
+static void ice_dump_test_bus(struct crypto_vops_qti_entry *ice_entry)
+{
+	uint32_t regval = 0x1;
+	uint32_t val;
+	uint8_t bus_selector;
+	uint8_t stream_selector;
+
+	pr_err("ICE TEST BUS DUMP:\n");
+
+	for (bus_selector = 0; bus_selector <= 0xF;  bus_selector++) {
+		regval = 0x1;	/* enable test bus */
+		regval |= bus_selector << 28;
+		if (bus_selector == 0xD)
+			continue;
+		ice_writel(ice_entry, regval, ICE_REGS_TEST_BUS_CONTROL);
+		/*
+		 * make sure test bus selector is written before reading
+		 * the test bus register
+		 */
+		wmb();
+		val = ice_readl(ice_entry, ICE_REGS_TEST_BUS_REG);
+		pr_err("ICE_TEST_BUS_CONTROL: 0x%08x | ICE_TEST_BUS_REG: 0x%08x\n",
+			regval, val);
+	}
+
+	pr_err("ICE TEST BUS DUMP (ICE_STREAM1_DATAPATH_TEST_BUS):\n");
+	for (stream_selector = 0; stream_selector <= 0xF; stream_selector++) {
+		regval = 0xD0000001;	/* enable stream test bus */
+		regval |= stream_selector << 16;
+		ice_writel(ice_entry, regval, ICE_REGS_TEST_BUS_CONTROL);
+		/*
+		 * make sure test bus selector is written before reading
+		 * the test bus register
+		 */
+		wmb();
+		val = ice_readl(ice_entry, ICE_REGS_TEST_BUS_REG);
+		pr_err("ICE_TEST_BUS_CONTROL: 0x%08x | ICE_TEST_BUS_REG: 0x%08x\n",
+			regval, val);
+	}
+}
+
+
+int crypto_qti_debug(void *priv_data)
+{
+	struct crypto_vops_qti_entry *ice_entry;
+
+	ice_entry = (struct crypto_vops_qti_entry *) priv_data;
+	if (!ice_entry) {
+		pr_err("%s: vops ice data is invalid\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_err("%s: ICE Control: 0x%08x | ICE Reset: 0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_CONTROL),
+		ice_readl(ice_entry, ICE_REGS_RESET));
+
+	pr_err("%s: ICE Version: 0x%08x | ICE FUSE:	0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_VERSION),
+		ice_readl(ice_entry, ICE_REGS_FUSE_SETTING));
+
+	pr_err("%s: ICE Param1: 0x%08x | ICE Param2:  0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_PARAMETERS_1),
+		ice_readl(ice_entry, ICE_REGS_PARAMETERS_2));
+
+	pr_err("%s: ICE Param3: 0x%08x | ICE Param4:  0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_PARAMETERS_3),
+		ice_readl(ice_entry, ICE_REGS_PARAMETERS_4));
+
+	pr_err("%s: ICE Param5: 0x%08x | ICE IRQ STTS:  0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_PARAMETERS_5),
+		ice_readl(ice_entry, ICE_REGS_NON_SEC_IRQ_STTS));
+
+	pr_err("%s: ICE IRQ MASK: 0x%08x | ICE IRQ CLR:	0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_NON_SEC_IRQ_MASK),
+		ice_readl(ice_entry, ICE_REGS_NON_SEC_IRQ_CLR));
+
+	pr_err("%s: ICE INVALID CCFG ERR STTS: 0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_INVALID_CCFG_ERR_STTS));
+
+	pr_err("%s: ICE BIST Sts: 0x%08x | ICE Bypass Sts:  0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_BIST_STATUS),
+		ice_readl(ice_entry, ICE_REGS_BYPASS_STATUS));
+
+	pr_err("%s: ICE ADV CTRL: 0x%08x | ICE ENDIAN SWAP:	0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_ADVANCED_CONTROL),
+		ice_readl(ice_entry, ICE_REGS_ENDIAN_SWAP));
+
+	pr_err("%s: ICE_STM1_ERR_SYND1: 0x%08x | ICE_STM1_ERR_SYND2: 0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_STREAM1_ERROR_SYNDROME1),
+		ice_readl(ice_entry, ICE_REGS_STREAM1_ERROR_SYNDROME2));
+
+	pr_err("%s: ICE_STM2_ERR_SYND1: 0x%08x | ICE_STM2_ERR_SYND2: 0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_STREAM2_ERROR_SYNDROME1),
+		ice_readl(ice_entry, ICE_REGS_STREAM2_ERROR_SYNDROME2));
+
+	pr_err("%s: ICE_STM1_COUNTER1: 0x%08x | ICE_STM1_COUNTER2: 0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS1),
+		ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS2));
+
+	pr_err("%s: ICE_STM1_COUNTER3: 0x%08x | ICE_STM1_COUNTER4: 0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS3),
+		ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS4));
+
+	pr_err("%s: ICE_STM2_COUNTER1: 0x%08x | ICE_STM2_COUNTER2: 0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS1),
+		ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS2));
+
+	pr_err("%s: ICE_STM2_COUNTER3: 0x%08x | ICE_STM2_COUNTER4: 0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS3),
+		ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS4));
+
+	pr_err("%s: ICE_STM1_CTR5_MSB: 0x%08x | ICE_STM1_CTR5_LSB: 0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS5_MSB),
+		ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS5_LSB));
+
+	pr_err("%s: ICE_STM1_CTR6_MSB: 0x%08x | ICE_STM1_CTR6_LSB: 0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS6_MSB),
+		ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS6_LSB));
+
+	pr_err("%s: ICE_STM1_CTR7_MSB: 0x%08x | ICE_STM1_CTR7_LSB: 0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS7_MSB),
+		ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS7_LSB));
+
+	pr_err("%s: ICE_STM1_CTR8_MSB: 0x%08x | ICE_STM1_CTR8_LSB: 0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS8_MSB),
+		ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS8_LSB));
+
+	pr_err("%s: ICE_STM1_CTR9_MSB: 0x%08x | ICE_STM1_CTR9_LSB: 0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS9_MSB),
+		ice_readl(ice_entry, ICE_REGS_STREAM1_COUNTERS9_LSB));
+
+	pr_err("%s: ICE_STM2_CTR5_MSB: 0x%08x | ICE_STM2_CTR5_LSB: 0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS5_MSB),
+		ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS5_LSB));
+
+	pr_err("%s: ICE_STM2_CTR6_MSB: 0x%08x | ICE_STM2_CTR6_LSB: 0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS6_MSB),
+		ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS6_LSB));
+
+	pr_err("%s: ICE_STM2_CTR7_MSB: 0x%08x | ICE_STM2_CTR7_LSB: 0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS7_MSB),
+		ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS7_LSB));
+
+	pr_err("%s: ICE_STM2_CTR8_MSB: 0x%08x | ICE_STM2_CTR8_LSB: 0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS8_MSB),
+		ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS8_LSB));
+
+	pr_err("%s: ICE_STM2_CTR9_MSB: 0x%08x | ICE_STM2_CTR9_LSB: 0x%08x\n",
+		ice_entry->ice_dev_type,
+		ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS9_MSB),
+		ice_readl(ice_entry, ICE_REGS_STREAM2_COUNTERS9_LSB));
+
+	ice_dump_test_bus(ice_entry);
+
+	return 0;
+}
+
+int crypto_qti_keyslot_program(void *priv_data,
+			       const struct blk_crypto_key *key,
+			       unsigned int slot,
+			       u8 data_unit_mask, int capid)
+{
+	int err = 0;
+	struct crypto_vops_qti_entry *ice_entry;
+
+	ice_entry = (struct crypto_vops_qti_entry *) priv_data;
+	if (!ice_entry) {
+		pr_err("%s: vops ice data is invalid\n", __func__);
+		return -EINVAL;
+	}
+
+	err = crypto_qti_program_key(ice_entry, key, slot,
+				data_unit_mask, capid);
+	if (err) {
+		pr_err("%s: program key failed with error %d\n", __func__, err);
+		err = crypto_qti_invalidate_key(ice_entry, slot);
+		if (err) {
+			pr_err("%s: invalidate key failed with error %d\n",
+				__func__, err);
+			return err;
+		}
+	}
+
+	return err;
+}
+
+int crypto_qti_keyslot_evict(void *priv_data, unsigned int slot)
+{
+	int err = 0;
+	struct crypto_vops_qti_entry *ice_entry;
+
+	ice_entry = (struct crypto_vops_qti_entry *) priv_data;
+	if (!ice_entry) {
+		pr_err("%s: vops ice data is invalid\n", __func__);
+		return -EINVAL;
+	}
+
+	err = crypto_qti_invalidate_key(ice_entry, slot);
+	if (err) {
+		pr_err("%s: invalidate key failed with error %d\n",
+			__func__, err);
+		return err;
+	}
+
+	return err;
+}
+
+int crypto_qti_derive_raw_secret(const u8 *wrapped_key,
+				 unsigned int wrapped_key_size, u8 *secret,
+				 unsigned int secret_size)
+{
+	int err = 0;
+
+	if (wrapped_key_size <= RAW_SECRET_SIZE) {
+		pr_err("%s: Invalid wrapped_key_size: %u\n",
+				__func__, wrapped_key_size);
+		err = -EINVAL;
+		return err;
+	}
+	if (secret_size != RAW_SECRET_SIZE) {
+		pr_err("%s: Invalid secret size: %u\n", __func__, secret_size);
+		err = -EINVAL;
+		return err;
+	}
+
+	memcpy(secret, wrapped_key, secret_size);
+
+	return err;
+}
diff --git a/drivers/soc/qcom/crypto-qti-ice-regs.h b/drivers/soc/qcom/crypto-qti-ice-regs.h
new file mode 100644
index 0000000..38e5c35
--- /dev/null
+++ b/drivers/soc/qcom/crypto-qti-ice-regs.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CRYPTO_INLINE_CRYPTO_ENGINE_REGS_H_
+#define _CRYPTO_INLINE_CRYPTO_ENGINE_REGS_H_
+
+#include <linux/io.h>
+
+/* Register bits for ICE version */
+#define ICE_CORE_CURRENT_MAJOR_VERSION 0x03
+
+#define ICE_CORE_STEP_REV_MASK		0xFFFF
+#define ICE_CORE_STEP_REV		0 /* bit 15-0 */
+#define ICE_CORE_MAJOR_REV_MASK		0xFF000000
+#define ICE_CORE_MAJOR_REV		24 /* bit 31-24 */
+#define ICE_CORE_MINOR_REV_MASK		0xFF0000
+#define ICE_CORE_MINOR_REV		16 /* bit 23-16 */
+
+#define ICE_BIST_STATUS_MASK		(0xF0000000)	/* bits 28-31 */
+
+#define ICE_FUSE_SETTING_MASK			0x1
+#define ICE_FORCE_HW_KEY0_SETTING_MASK		0x2
+#define ICE_FORCE_HW_KEY1_SETTING_MASK		0x4
+
+/* QTI ICE Registers from SWI */
+#define ICE_REGS_CONTROL			0x0000
+#define ICE_REGS_RESET				0x0004
+#define ICE_REGS_VERSION			0x0008
+#define ICE_REGS_FUSE_SETTING			0x0010
+#define ICE_REGS_PARAMETERS_1			0x0014
+#define ICE_REGS_PARAMETERS_2			0x0018
+#define ICE_REGS_PARAMETERS_3			0x001C
+#define ICE_REGS_PARAMETERS_4			0x0020
+#define ICE_REGS_PARAMETERS_5			0x0024
+
+
+/* QTI ICE v3.X only */
+#define ICE_GENERAL_ERR_STTS			0x0040
+#define ICE_INVALID_CCFG_ERR_STTS		0x0030
+#define ICE_GENERAL_ERR_MASK			0x0044
+
+
+/* QTI ICE v2.X only */
+#define ICE_REGS_NON_SEC_IRQ_STTS		0x0040
+#define ICE_REGS_NON_SEC_IRQ_MASK		0x0044
+
+
+#define ICE_REGS_NON_SEC_IRQ_CLR		0x0048
+#define ICE_REGS_STREAM1_ERROR_SYNDROME1	0x0050
+#define ICE_REGS_STREAM1_ERROR_SYNDROME2	0x0054
+#define ICE_REGS_STREAM2_ERROR_SYNDROME1	0x0058
+#define ICE_REGS_STREAM2_ERROR_SYNDROME2	0x005C
+#define ICE_REGS_STREAM1_BIST_ERROR_VEC		0x0060
+#define ICE_REGS_STREAM2_BIST_ERROR_VEC		0x0064
+#define ICE_REGS_STREAM1_BIST_FINISH_VEC	0x0068
+#define ICE_REGS_STREAM2_BIST_FINISH_VEC	0x006C
+#define ICE_REGS_BIST_STATUS			0x0070
+#define ICE_REGS_BYPASS_STATUS			0x0074
+#define ICE_REGS_ADVANCED_CONTROL		0x1000
+#define ICE_REGS_ENDIAN_SWAP			0x1004
+#define ICE_REGS_TEST_BUS_CONTROL		0x1010
+#define ICE_REGS_TEST_BUS_REG			0x1014
+#define ICE_REGS_STREAM1_COUNTERS1		0x1100
+#define ICE_REGS_STREAM1_COUNTERS2		0x1104
+#define ICE_REGS_STREAM1_COUNTERS3		0x1108
+#define ICE_REGS_STREAM1_COUNTERS4		0x110C
+#define ICE_REGS_STREAM1_COUNTERS5_MSB		0x1110
+#define ICE_REGS_STREAM1_COUNTERS5_LSB		0x1114
+#define ICE_REGS_STREAM1_COUNTERS6_MSB		0x1118
+#define ICE_REGS_STREAM1_COUNTERS6_LSB		0x111C
+#define ICE_REGS_STREAM1_COUNTERS7_MSB		0x1120
+#define ICE_REGS_STREAM1_COUNTERS7_LSB		0x1124
+#define ICE_REGS_STREAM1_COUNTERS8_MSB		0x1128
+#define ICE_REGS_STREAM1_COUNTERS8_LSB		0x112C
+#define ICE_REGS_STREAM1_COUNTERS9_MSB		0x1130
+#define ICE_REGS_STREAM1_COUNTERS9_LSB		0x1134
+#define ICE_REGS_STREAM2_COUNTERS1		0x1200
+#define ICE_REGS_STREAM2_COUNTERS2		0x1204
+#define ICE_REGS_STREAM2_COUNTERS3		0x1208
+#define ICE_REGS_STREAM2_COUNTERS4		0x120C
+#define ICE_REGS_STREAM2_COUNTERS5_MSB		0x1210
+#define ICE_REGS_STREAM2_COUNTERS5_LSB		0x1214
+#define ICE_REGS_STREAM2_COUNTERS6_MSB		0x1218
+#define ICE_REGS_STREAM2_COUNTERS6_LSB		0x121C
+#define ICE_REGS_STREAM2_COUNTERS7_MSB		0x1220
+#define ICE_REGS_STREAM2_COUNTERS7_LSB		0x1224
+#define ICE_REGS_STREAM2_COUNTERS8_MSB		0x1228
+#define ICE_REGS_STREAM2_COUNTERS8_LSB		0x122C
+#define ICE_REGS_STREAM2_COUNTERS9_MSB		0x1230
+#define ICE_REGS_STREAM2_COUNTERS9_LSB		0x1234
+
+#define ICE_STREAM1_PREMATURE_LBA_CHANGE	(1L << 0)
+#define ICE_STREAM2_PREMATURE_LBA_CHANGE	(1L << 1)
+#define ICE_STREAM1_NOT_EXPECTED_LBO		(1L << 2)
+#define ICE_STREAM2_NOT_EXPECTED_LBO		(1L << 3)
+#define ICE_STREAM1_NOT_EXPECTED_DUN		(1L << 4)
+#define ICE_STREAM2_NOT_EXPECTED_DUN		(1L << 5)
+#define ICE_STREAM1_NOT_EXPECTED_DUS		(1L << 6)
+#define ICE_STREAM2_NOT_EXPECTED_DUS		(1L << 7)
+#define ICE_STREAM1_NOT_EXPECTED_DBO		(1L << 8)
+#define ICE_STREAM2_NOT_EXPECTED_DBO		(1L << 9)
+#define ICE_STREAM1_NOT_EXPECTED_ENC_SEL	(1L << 10)
+#define ICE_STREAM2_NOT_EXPECTED_ENC_SEL	(1L << 11)
+#define ICE_STREAM1_NOT_EXPECTED_CONF_IDX	(1L << 12)
+#define ICE_STREAM2_NOT_EXPECTED_CONF_IDX	(1L << 13)
+#define ICE_STREAM1_NOT_EXPECTED_NEW_TRNS	(1L << 14)
+#define ICE_STREAM2_NOT_EXPECTED_NEW_TRNS	(1L << 15)
+
+#define ICE_NON_SEC_IRQ_MASK				\
+			(ICE_STREAM1_PREMATURE_LBA_CHANGE |\
+			 ICE_STREAM2_PREMATURE_LBA_CHANGE |\
+			 ICE_STREAM1_NOT_EXPECTED_LBO |\
+			 ICE_STREAM2_NOT_EXPECTED_LBO |\
+			 ICE_STREAM1_NOT_EXPECTED_DUN |\
+			 ICE_STREAM2_NOT_EXPECTED_DUN |\
+			 ICE_STREAM2_NOT_EXPECTED_DUS |\
+			 ICE_STREAM1_NOT_EXPECTED_DBO |\
+			 ICE_STREAM2_NOT_EXPECTED_DBO |\
+			 ICE_STREAM1_NOT_EXPECTED_ENC_SEL |\
+			 ICE_STREAM2_NOT_EXPECTED_ENC_SEL |\
+			 ICE_STREAM1_NOT_EXPECTED_CONF_IDX |\
+			 ICE_STREAM1_NOT_EXPECTED_NEW_TRNS |\
+			 ICE_STREAM2_NOT_EXPECTED_NEW_TRNS)
+
+/* QTI ICE registers from secure side */
+#define ICE_TEST_BUS_REG_SECURE_INTR            (1L << 28)
+#define ICE_TEST_BUS_REG_NON_SECURE_INTR        (1L << 2)
+
+#define ICE_LUT_KEYS_CRYPTOCFG_R_16		0x4040
+#define ICE_LUT_KEYS_CRYPTOCFG_R_17		0x4044
+#define ICE_LUT_KEYS_CRYPTOCFG_OFFSET		0x80
+
+
+#define ICE_LUT_KEYS_ICE_SEC_IRQ_STTS		0x6200
+#define ICE_LUT_KEYS_ICE_SEC_IRQ_MASK		0x6204
+#define ICE_LUT_KEYS_ICE_SEC_IRQ_CLR		0x6208
+
+#define ICE_STREAM1_PARTIALLY_SET_KEY_USED	(1L << 0)
+#define ICE_STREAM2_PARTIALLY_SET_KEY_USED	(1L << 1)
+#define ICE_QTIC_DBG_OPEN_EVENT			(1L << 30)
+#define ICE_KEYS_RAM_RESET_COMPLETED		(1L << 31)
+
+#define ICE_SEC_IRQ_MASK					  \
+			(ICE_STREAM1_PARTIALLY_SET_KEY_USED |\
+			 ICE_STREAM2_PARTIALLY_SET_KEY_USED |\
+			 ICE_QTIC_DBG_OPEN_EVENT |	  \
+			 ICE_KEYS_RAM_RESET_COMPLETED)
+
+#define ice_writel(ice_entry, val, reg)	\
+	writel_relaxed((val), (ice_entry)->icemmio_base + (reg))
+#define ice_readl(ice_entry, reg)	\
+	readl_relaxed((ice_entry)->icemmio_base + (reg))
+
+#endif /* _CRYPTO_INLINE_CRYPTO_ENGINE_REGS_H_ */
diff --git a/drivers/soc/qcom/crypto-qti-platform.h b/drivers/soc/qcom/crypto-qti-platform.h
new file mode 100644
index 0000000..be00e50
--- /dev/null
+++ b/drivers/soc/qcom/crypto-qti-platform.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CRYPTO_QTI_PLATFORM_H
+#define _CRYPTO_QTI_PLATFORM_H
+
+#include <linux/bio-crypt-ctx.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/device.h>
+
+#if IS_ENABLED(CONFIG_QTI_CRYPTO_TZ)
+int crypto_qti_program_key(struct crypto_vops_qti_entry *ice_entry,
+			   const struct blk_crypto_key *key, unsigned int slot,
+			   unsigned int data_unit_mask, int capid);
+int crypto_qti_invalidate_key(struct crypto_vops_qti_entry *ice_entry,
+			      unsigned int slot);
+#else
+static inline int crypto_qti_program_key(
+				struct crypto_vops_qti_entry *ice_entry,
+				const struct blk_crypto_key *key,
+				unsigned int slot, unsigned int data_unit_mask,
+				int capid)
+{
+	return 0;
+}
+static inline int crypto_qti_invalidate_key(
+		struct crypto_vops_qti_entry *ice_entry, unsigned int slot)
+{
+	return 0;
+}
+#endif /* CONFIG_QTI_CRYPTO_TZ */
+
+static inline void crypto_qti_disable_platform(
+				struct crypto_vops_qti_entry *ice_entry)
+{}
+
+#endif /* _CRYPTO_QTI_PLATFORM_H */
diff --git a/drivers/soc/qcom/crypto-qti-tz.c b/drivers/soc/qcom/crypto-qti-tz.c
new file mode 100644
index 0000000..1d8b27b
--- /dev/null
+++ b/drivers/soc/qcom/crypto-qti-tz.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <asm/cacheflush.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/qtee_shmbridge.h>
+#include <linux/crypto-qti-common.h>
+#include "crypto-qti-platform.h"
+#include "crypto-qti-tz.h"
+
+unsigned int storage_type = SDCC_CE;
+
+int crypto_qti_program_key(struct crypto_vops_qti_entry *ice_entry,
+			   const struct blk_crypto_key *key,
+			   unsigned int slot, unsigned int data_unit_mask,
+			   int capid)
+{
+	int err = 0;
+	uint32_t smc_id = 0;
+	char *tzbuf = NULL;
+	struct qtee_shm shm;
+	struct scm_desc desc = {0};
+
+	err = qtee_shmbridge_allocate_shm(key->size, &shm);
+	if (err)
+		return -ENOMEM;
+
+	tzbuf = shm.vaddr;
+
+	memcpy(tzbuf, key->raw, key->size);
+	dmac_flush_range(tzbuf, tzbuf + key->size);
+
+	smc_id = TZ_ES_CONFIG_SET_ICE_KEY_CE_TYPE_ID;
+	desc.arginfo = TZ_ES_CONFIG_SET_ICE_KEY_CE_TYPE_PARAM_ID;
+	desc.args[0] = slot;
+	desc.args[1] = shm.paddr;
+	desc.args[2] = shm.size;
+	desc.args[3] = ICE_CIPHER_MODE_XTS_256;
+	desc.args[4] = data_unit_mask;
+	desc.args[5] = storage_type;
+
+
+	err = scm_call2_noretry(smc_id, &desc);
+	if (err)
+		pr_err("%s:SCM call Error: 0x%x slot %d\n",
+				__func__, err, slot);
+
+	qtee_shmbridge_free_shm(&shm);
+
+	return err;
+}
+
+int crypto_qti_invalidate_key(
+		struct crypto_vops_qti_entry *ice_entry, unsigned int slot)
+{
+	int err = 0;
+	uint32_t smc_id = 0;
+	struct scm_desc desc = {0};
+
+	smc_id = TZ_ES_INVALIDATE_ICE_KEY_CE_TYPE_ID;
+
+	desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_CE_TYPE_PARAM_ID;
+	desc.args[0] = slot;
+	desc.args[1] = storage_type;
+
+	err = scm_call2_noretry(smc_id, &desc);
+	if (err)
+		pr_err("%s:SCM call Error: 0x%x\n", __func__, err);
+	return err;
+}
+
+static int crypto_qti_storage_type(unsigned int *s_type)
+{
+	char boot[20] = {'\0'};
+	char *match = (char *)strnstr(saved_command_line,
+				"androidboot.bootdevice=",
+				strlen(saved_command_line));
+	if (match) {
+		memcpy(boot, (match + strlen("androidboot.bootdevice=")),
+			sizeof(boot) - 1);
+		if (strnstr(boot, "ufs", strlen(boot)))
+			*s_type = UFS_CE;
+
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int __init crypto_qti_init(void)
+{
+	return crypto_qti_storage_type(&storage_type);
+}
+
+module_init(crypto_qti_init);
diff --git a/drivers/soc/qcom/crypto-qti-tz.h b/drivers/soc/qcom/crypto-qti-tz.h
new file mode 100644
index 0000000..bf7ac00
--- /dev/null
+++ b/drivers/soc/qcom/crypto-qti-tz.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <soc/qcom/qseecomi.h>
+
+#ifndef _CRYPTO_QTI_TZ_H
+#define _CRYPTO_QTI_TZ_H
+
+#define TZ_ES_CONFIG_SET_ICE_KEY_CE_TYPE 0x5
+#define TZ_ES_INVALIDATE_ICE_KEY_CE_TYPE 0x6
+
+#define TZ_ES_CONFIG_SET_ICE_KEY_CE_TYPE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, \
+	TZ_ES_CONFIG_SET_ICE_KEY_CE_TYPE)
+
+#define TZ_ES_INVALIDATE_ICE_KEY_CE_TYPE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, \
+	TZ_SVC_ES, TZ_ES_INVALIDATE_ICE_KEY_CE_TYPE)
+
+#define TZ_ES_INVALIDATE_ICE_KEY_CE_TYPE_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_ES_CONFIG_SET_ICE_KEY_CE_TYPE_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_6( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+enum {
+	ICE_CIPHER_MODE_XTS_128 = 0,
+	ICE_CIPHER_MODE_CBC_128 = 1,
+	ICE_CIPHER_MODE_XTS_256 = 3,
+	ICE_CIPHER_MODE_CBC_256 = 4
+};
+
+#define UFS_CE 10
+#define SDCC_CE 20
+#define UFS_CARD_CE 30
+
+#endif /* _CRYPTO_QTI_TZ_H */
diff --git a/include/linux/crypto-qti-common.h b/include/linux/crypto-qti-common.h
new file mode 100644
index 0000000..ef72618
--- /dev/null
+++ b/include/linux/crypto-qti-common.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CRYPTO_QTI_COMMON_H
+#define _CRYPTO_QTI_COMMON_H
+
+#include <linux/bio-crypt-ctx.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+
+#define RAW_SECRET_SIZE 32
+#define QTI_ICE_MAX_BIST_CHECK_COUNT 100
+#define QTI_ICE_TYPE_NAME_LEN 8
+
+struct crypto_vops_qti_entry {
+	void __iomem *icemmio_base;
+	uint32_t ice_hw_version;
+	uint8_t ice_dev_type[QTI_ICE_TYPE_NAME_LEN];
+	uint32_t flags;
+};
+
+#if IS_ENABLED(CONFIG_QTI_CRYPTO_COMMON)
+// crypto-qti-common.c
+int crypto_qti_init_crypto(struct device *dev, void __iomem *mmio_base,
+			   void **priv_data);
+int crypto_qti_enable(void *priv_data);
+void crypto_qti_disable(void *priv_data);
+int crypto_qti_resume(void *priv_data);
+int crypto_qti_debug(void *priv_data);
+int crypto_qti_keyslot_program(void *priv_data,
+			       const struct blk_crypto_key *key,
+			       unsigned int slot, u8 data_unit_mask,
+			       int capid);
+int crypto_qti_keyslot_evict(void *priv_data, unsigned int slot);
+int crypto_qti_derive_raw_secret(const u8 *wrapped_key,
+				 unsigned int wrapped_key_size, u8 *secret,
+				 unsigned int secret_size);
+
+#else
+static inline int crypto_qti_init_crypto(struct device *dev,
+					 void __iomem *mmio_base,
+					 void **priv_data)
+{
+	return 0;
+}
+static inline int crypto_qti_enable(void *priv_data)
+{
+	return 0;
+}
+static inline void crypto_qti_disable(void *priv_data)
+{
+	return 0;
+}
+static inline int crypto_qti_resume(void *priv_data)
+{
+	return 0;
+}
+static inline int crypto_qti_debug(void *priv_data)
+{
+	return 0;
+}
+static inline int crypto_qti_keyslot_program(void *priv_data,
+					     const struct blk_crypto_key *key,
+					     unsigned int slot,
+					     u8 data_unit_mask,
+					     int capid)
+{
+	return 0;
+}
+static inline int crypto_qti_keyslot_evict(void *priv_data, unsigned int slot)
+{
+	return 0;
+}
+static inline int crypto_qti_derive_raw_secret(const u8 *wrapped_key,
+					       unsigned int wrapped_key_size,
+					       u8 *secret,
+					       unsigned int secret_size)
+{
+	return 0;
+}
+
+#endif /* CONFIG_QTI_CRYPTO_COMMON */
+
+#endif /* _CRYPTO_QTI_COMMON_H */