msm: ipa: update IPA driver to the tip of msm-4.4 kernel

The commit contains changes from the following original commits:
commit c114fe5555d0 ("msm: ipa: fix potential race condition ioctls")
commit c6d606203352 ("msm: ipa: WDI2.0: hotspot offload using WDI 2.0
interface")
commit 634732c1bb02 ("msm: ipa3: fix GSI ring DMA allocation
parameters")
commit 915354544a81 ("msm: ipa3: Fix timeout period of IPA resource add
dependency")
commit a1e8c020b542 ("msm: ipa3: Fix wrong parameter to xdci release
function")
commit bd09eb993a1a ("msm: ipa: fix skb recycle logic")
commit b2c8a0dbe6c8 ("msm: ipa: fix memory leak on QMI")
commit c4fddbc45db0 ("msm: ipa: mhi: add perf vote")
commit 7bd2a476f7d8 ("msm: ipa: Allocate memory on send message")
commit b544de499e55 ("msm: ipa3: increase SSR tag timeout")
commit 15dc347f5272 ("msm: ipa3: hal: change FnR prints to low")
commit b0756920f39f ("msm: ipa: unlock WLAN doorbell register access")
commit 0dfb2feb3e79 ("msm: ipa: Recycle wan buffer")
commit 1966a333f80b ("mam: ipa: always add default flt rule at the
end")
commit 9ba1638c4bdb ("msm: ipa3: change FnR prints to low")
commit 8b81dd70999c ("msm: ipa3: fix benign prints on ipa3_usb_init")
commit d51f2f62e4bc ("msm: ipa3: Add specific logs to improve debugging
capabilities")
commit 21fab3e70330 ("msm: ipa: fix mutex_lock with task->state !=
RUNNING")
commit bdb22900877b ("msm: ipa3: Do not wait for IPA DMA_TASK H/W
ACK for GSI")
commit fc5f71883677 ("msm: ipa: fix reference count in rm")
commit cd5f5121db9a ("msm: ipa: update wan pool size on napi enable")
commit 7e079d086ffe ("msm: ipa: Remove obsolete IOMMU domain
attribute")
commit 38b227b0c22f ("msm: ipa: Add guard around nat entries read")
commit 9dc2df841d20 ("msm: ipa: fix static analysis sign issue")
commit 3ce1ef220c57 ("msm: ipa3: add check on qmi-client handler")
commit 5334ff493a3f ("msm: ipa: Fix ipa ready cb return value")

All of these changes come to complete the initial IPA driver snapshot:
commit 9659e593c80a ("msm: ipa: initial commit of IPA driver")

Change-Id: Iad2c7633eb15393202d5ba8b3209363442410160
Signed-off-by: Gidon Studinski <gidons@codeaurora.org>
Signed-off-by: Amir Levy <alevy@codeaurora.org>
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index 8c0b9c6..f3e6ca9 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -33,7 +33,6 @@
 	compatible "qcom,ipa-smmu-wlan-cb"
 - ipa_smmu_uc: uc SMMU device
 	compatible "qcom,ipa-smmu-uc-cb"
-- qcom,smmu-disable-htw: boolean value to turn off SMMU page table caching
 - qcom,use-a2-service: determine if A2 service will be used
 - qcom,use-ipa-tethering-bridge: determine if tethering bridge will be used
 - qcom,use-ipa-bamdma-a2-bridge: determine if a2/ipa hw bridge will be used
@@ -72,6 +71,12 @@
 - qcom,tethered-flow-control:   Boolean context flag to indicate whether
                                 apps based flow control is needed for tethered
                                 call.
+- qcom,rx-polling-sleep-ms:	Receive Polling Timeout in millisecond,
+				default is 1 millisecond.
+- qcom,ipa-polling-iteration:	IPA Polling Iteration Count,default is 40.
+- qcom,ipa-tz-unlock-reg:       Register start addresses and ranges which
+                                need to be unlocked by TZ.
+
 IPA pipe sub nodes (A2 static pipes configurations):
 
 -label: two labels are supported, a2-to-ipa and ipa-to-a2 which
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index b6dd436..06881d3 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -26,7 +26,8 @@
 #define IPA_API_DISPATCH_RETURN(api, p...) \
 	do { \
 		if (!ipa_api_ctrl) { \
-			pr_err("IPA HW is not supported on this target\n"); \
+			pr_err("%s:%d IPA HW is not supported\n", \
+				__func__, __LINE__); \
 			ret = -EPERM; \
 		} \
 		else { \
@@ -44,7 +45,8 @@
 #define IPA_API_DISPATCH(api, p...) \
 	do { \
 		if (!ipa_api_ctrl) \
-			pr_err("IPA HW is not supported on this target\n"); \
+			pr_err("%s:%d IPA HW is not supported\n", \
+				__func__, __LINE__); \
 		else { \
 			if (ipa_api_ctrl->api) { \
 				ipa_api_ctrl->api(p); \
@@ -59,7 +61,8 @@
 #define IPA_API_DISPATCH_RETURN_PTR(api, p...) \
 	do { \
 		if (!ipa_api_ctrl) { \
-			pr_err("IPA HW is not supported on this target\n"); \
+			pr_err("%s:%d IPA HW is not supported\n", \
+				__func__, __LINE__); \
 			ret = NULL; \
 		} \
 		else { \
@@ -77,7 +80,8 @@
 #define IPA_API_DISPATCH_RETURN_BOOL(api, p...) \
 	do { \
 		if (!ipa_api_ctrl) { \
-			pr_err("IPA HW is not supported on this target\n"); \
+			pr_err("%s:%d IPA HW is not supported\n", \
+				__func__, __LINE__); \
 			ret = false; \
 		} \
 		else { \
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
index 6addf14..a02247d 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
@@ -2435,6 +2435,7 @@
 	int res;
 	struct ipa_rm_create_params mhi_prod_params;
 	struct ipa_rm_create_params mhi_cons_params;
+	struct ipa_rm_perf_profile profile;
 
 	IPA_MHI_FUNC_ENTRY();
 
@@ -2506,6 +2507,14 @@
 		goto fail_create_rm_prod;
 	}
 
+	memset(&profile, 0, sizeof(profile));
+	profile.max_supported_bandwidth_mbps = 1000;
+	res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_PROD, &profile);
+	if (res) {
+		IPA_MHI_ERR("fail to set profile to MHI_PROD\n");
+		goto fail_perf_rm_prod;
+	}
+
 	/* Create CONS in IPA RM */
 	memset(&mhi_cons_params, 0, sizeof(mhi_cons_params));
 	mhi_cons_params.name = IPA_RM_RESOURCE_MHI_CONS;
@@ -2518,6 +2527,14 @@
 		goto fail_create_rm_cons;
 	}
 
+	memset(&profile, 0, sizeof(profile));
+	profile.max_supported_bandwidth_mbps = 1000;
+	res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_CONS, &profile);
+	if (res) {
+		IPA_MHI_ERR("fail to set profile to MHI_CONS\n");
+		goto fail_perf_rm_cons;
+	}
+
 	/* Initialize uC interface */
 	ipa_uc_mhi_init(ipa_mhi_uc_ready_cb,
 		ipa_mhi_uc_wakeup_request_cb);
@@ -2530,7 +2547,10 @@
 	IPA_MHI_FUNC_EXIT();
 	return 0;
 
+fail_perf_rm_cons:
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS);
 fail_create_rm_cons:
+fail_perf_rm_prod:
 	ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
 fail_create_rm_prod:
 	destroy_workqueue(ipa_mhi_client_ctx->wq);
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index 8e58320..d183083 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -2034,7 +2034,7 @@
 
 	ipa3_usb_ctx->dent = debugfs_create_dir("ipa_usb", 0);
 	if (IS_ERR(ipa3_usb_ctx->dent)) {
-		IPA_USB_ERR("fail to create folder in debug_fs.\n");
+		pr_err("fail to create folder in debug_fs.\n");
 		return;
 	}
 
@@ -2043,7 +2043,7 @@
 			&ipa3_ipa_usb_ops);
 	if (!ipa3_usb_ctx->dfile_state_info ||
 		IS_ERR(ipa3_usb_ctx->dfile_state_info)) {
-		IPA_USB_ERR("failed to create file for state_info\n");
+		pr_err("failed to create file for state_info\n");
 		goto fail;
 	}
 
@@ -2136,11 +2136,11 @@
 
 connect_fail:
 	ipa3_usb_release_xdci_channel(dl_out_params->clnt_hdl,
-		dl_chan_params->teth_prot);
+		IPA3_USB_GET_TTYPE(dl_chan_params->teth_prot));
 alloc_dl_chan_fail:
 	if (connect_params->teth_prot != IPA_USB_DIAG)
 		ipa3_usb_release_xdci_channel(ul_out_params->clnt_hdl,
-			ul_chan_params->teth_prot);
+			IPA3_USB_GET_TTYPE(ul_chan_params->teth_prot));
 bad_params:
 	mutex_unlock(&ipa3_usb_ctx->general_mutex);
 	return result;
@@ -2644,11 +2644,11 @@
 	unsigned long flags;
 	int res;
 
-	IPA_USB_DBG("entry\n");
+	pr_debug("entry\n");
 	ipa3_usb_ctx = kzalloc(sizeof(struct ipa3_usb_context), GFP_KERNEL);
 	if (ipa3_usb_ctx == NULL) {
-		IPA_USB_ERR("failed to allocate memory\n");
-		IPA_USB_ERR(":ipa_usb init failed\n");
+		pr_err("failed to allocate memory\n");
+		pr_err(":ipa_usb init failed\n");
 		return -EFAULT;
 	}
 	memset(ipa3_usb_ctx, 0, sizeof(struct ipa3_usb_context));
@@ -2680,19 +2680,19 @@
 
 	ipa3_usb_ctx->wq = create_singlethread_workqueue("ipa_usb_wq");
 	if (!ipa3_usb_ctx->wq) {
-		IPA_USB_ERR("failed to create workqueue\n");
+		pr_err("failed to create workqueue\n");
 		res = -EFAULT;
 		goto ipa_usb_workqueue_fail;
 	}
 
 	ipa_usb_debugfs_init();
 
-	IPA_USB_INFO("exit: IPA_USB init success!\n");
+	pr_info("exit: IPA_USB init success!\n");
 
 	return 0;
 
 ipa_usb_workqueue_fail:
-	IPA_USB_ERR(":init failed (%d)\n", -res);
+	pr_err(":init failed (%d)\n", -res);
 	kfree(ipa3_usb_ctx);
 	return res;
 }
diff --git a/drivers/platform/msm/ipa/ipa_rm.c b/drivers/platform/msm/ipa/ipa_rm.c
index e01bb7e..1431dcf 100644
--- a/drivers/platform/msm/ipa/ipa_rm.c
+++ b/drivers/platform/msm/ipa/ipa_rm.c
@@ -267,17 +267,18 @@
 		time = wait_for_completion_timeout(
 				&((struct ipa_rm_resource_cons *)consumer)->
 				request_consumer_in_progress,
-				HZ);
+				HZ * 5);
 		result = 0;
 		if (!time) {
 			IPA_RM_ERR("TIMEOUT waiting for %s GRANT event.",
 					ipa_rm_resource_str(depends_on_name));
 			result = -ETIME;
-		}
-		IPA_RM_DBG("%s waited for %s GRANT %lu time.\n",
+		} else {
+			IPA_RM_DBG("%s waited for %s GRANT %lu time.\n",
 				ipa_rm_resource_str(resource_name),
 				ipa_rm_resource_str(depends_on_name),
 				time);
+		}
 	}
 	IPA_RM_DBG("EXIT with %d\n", result);
 
@@ -819,7 +820,8 @@
 	}
 	ipa_rm_resource_consumer_request_work(
 			(struct ipa_rm_resource_cons *)resource,
-			ipa_rm_work->prev_state, ipa_rm_work->needed_bw, true);
+			ipa_rm_work->prev_state, ipa_rm_work->needed_bw, true,
+			ipa_rm_work->inc_usage_count);
 	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
 bail:
 	kfree(ipa_rm_work);
@@ -915,7 +917,8 @@
 
 int ipa_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name,
 		enum ipa_rm_resource_state prev_state,
-		u32 needed_bw)
+		u32 needed_bw,
+		bool inc_usage_count)
 {
 	int result = -ENOMEM;
 	struct ipa_rm_wq_suspend_resume_work_type *work = kzalloc(sizeof(*work),
@@ -925,6 +928,7 @@
 		work->resource_name = resource_name;
 		work->prev_state = prev_state;
 		work->needed_bw = needed_bw;
+		work->inc_usage_count = inc_usage_count;
 		result = queue_work(ipa_rm_ctx->ipa_rm_wq,
 				(struct work_struct *)work);
 	} else {
diff --git a/drivers/platform/msm/ipa/ipa_rm_i.h b/drivers/platform/msm/ipa/ipa_rm_i.h
index eb86c54..1610bb1 100644
--- a/drivers/platform/msm/ipa/ipa_rm_i.h
+++ b/drivers/platform/msm/ipa/ipa_rm_i.h
@@ -118,6 +118,7 @@
 	enum ipa_rm_resource_name	resource_name;
 	enum ipa_rm_resource_state	prev_state;
 	u32				needed_bw;
+	bool				inc_usage_count;
 
 };
 
@@ -128,7 +129,8 @@
 
 int ipa_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name,
 		enum ipa_rm_resource_state prev_state,
-		u32 needed_bw);
+		u32 needed_bw,
+		bool inc_usage_count);
 
 int ipa_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name,
 		enum ipa_rm_resource_state prev_state,
diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.c b/drivers/platform/msm/ipa/ipa_rm_resource.c
index ec5eb3d..6657bd9 100644
--- a/drivers/platform/msm/ipa/ipa_rm_resource.c
+++ b/drivers/platform/msm/ipa/ipa_rm_resource.c
@@ -116,7 +116,8 @@
 int ipa_rm_resource_consumer_request_work(struct ipa_rm_resource_cons *consumer,
 		enum ipa_rm_resource_state prev_state,
 		u32 prod_needed_bw,
-		bool notify_completion)
+		bool notify_completion,
+		bool dec_client_on_err)
 {
 	int driver_result;
 
@@ -135,7 +136,8 @@
 	} else if (driver_result != -EINPROGRESS) {
 		consumer->resource.state = prev_state;
 		consumer->resource.needed_bw -= prod_needed_bw;
-		consumer->usage_count--;
+		if (dec_client_on_err)
+			consumer->usage_count--;
 	}
 
 	return driver_result;
@@ -170,19 +172,22 @@
 				ipa_rm_resource_str(consumer->resource.name));
 			ipa_rm_wq_send_resume_cmd(consumer->resource.name,
 						prev_state,
-						prod_needed_bw);
+						prod_needed_bw,
+						inc_usage_count);
 			result = -EINPROGRESS;
 			break;
 		}
 		result = ipa_rm_resource_consumer_request_work(consumer,
 						prev_state,
 						prod_needed_bw,
-						false);
+						false,
+						inc_usage_count);
 		break;
 	case IPA_RM_GRANTED:
 		if (wake_client) {
 			result = ipa_rm_resource_consumer_request_work(
-				consumer, prev_state, prod_needed_bw, false);
+				consumer, prev_state, prod_needed_bw, false,
+				inc_usage_count);
 			break;
 		}
 		ipa_rm_perf_profile_change(consumer->resource.name);
diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.h b/drivers/platform/msm/ipa/ipa_rm_resource.h
index 5c3a019..da149c5 100644
--- a/drivers/platform/msm/ipa/ipa_rm_resource.h
+++ b/drivers/platform/msm/ipa/ipa_rm_resource.h
@@ -155,7 +155,8 @@
 int ipa_rm_resource_consumer_request_work(struct ipa_rm_resource_cons *consumer,
 		enum ipa_rm_resource_state prev_state,
 		u32 needed_bw,
-		bool notify_completion);
+		bool notify_completion,
+		bool dec_client_on_err);
 
 int ipa_rm_resource_consumer_release_work(
 		struct ipa_rm_resource_cons *consumer,
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 037231c..84a5180 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -207,7 +207,6 @@
 static struct {
 	bool present;
 	bool arm_smmu;
-	bool disable_htw;
 	bool fast_map;
 	bool s1_bypass;
 	u32 ipa_base;
@@ -575,6 +574,7 @@
 	struct ipa_ioc_v4_nat_del nat_del;
 	struct ipa_ioc_rm_dependency rm_depend;
 	size_t sz;
+	int pre_entry;
 
 	IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
 
@@ -623,11 +623,11 @@
 			retval = -EFAULT;
 			break;
 		}
-
+		pre_entry =
+			((struct ipa_ioc_nat_dma_cmd *)header)->entries;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_nat_dma_cmd) +
-		   ((struct ipa_ioc_nat_dma_cmd *)header)->entries *
-		   sizeof(struct ipa_ioc_nat_dma_one);
+		   pre_entry * sizeof(struct ipa_ioc_nat_dma_one);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -638,7 +638,15 @@
 			retval = -EFAULT;
 			break;
 		}
-
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_nat_dma_cmd *)param)->entries,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa2_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
 			retval = -EFAULT;
 			break;
@@ -663,10 +671,11 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_add_hdr *)header)->num_hdrs;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_add_hdr) +
-		   ((struct ipa_ioc_add_hdr *)header)->num_hdrs *
-		   sizeof(struct ipa_hdr_add);
+		   pre_entry * sizeof(struct ipa_hdr_add);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -676,6 +685,15 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_add_hdr *)param)->num_hdrs,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa2_add_hdr((struct ipa_ioc_add_hdr *)param)) {
 			retval = -EFAULT;
 			break;
@@ -692,10 +710,11 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_del_hdr *)header)->num_hdls;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_del_hdr) +
-		   ((struct ipa_ioc_del_hdr *)header)->num_hdls *
-		   sizeof(struct ipa_hdr_del);
+		   pre_entry * sizeof(struct ipa_hdr_del);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -705,6 +724,15 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_del_hdr *)param)->num_hdls,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa2_del_hdr((struct ipa_ioc_del_hdr *)param)) {
 			retval = -EFAULT;
 			break;
@@ -721,10 +749,11 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_add_rt_rule *)header)->num_rules;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_add_rt_rule) +
-		   ((struct ipa_ioc_add_rt_rule *)header)->num_rules *
-		   sizeof(struct ipa_rt_rule_add);
+		   pre_entry * sizeof(struct ipa_rt_rule_add);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -734,6 +763,16 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_add_rt_rule *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa2_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
 			retval = -EFAULT;
 			break;
@@ -750,10 +789,11 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_mdfy_rt_rule) +
-		   ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules *
-		   sizeof(struct ipa_rt_rule_mdfy);
+		   pre_entry * sizeof(struct ipa_rt_rule_mdfy);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -763,6 +803,16 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_mdfy_rt_rule *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa2_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) {
 			retval = -EFAULT;
 			break;
@@ -779,10 +829,11 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_del_rt_rule *)header)->num_hdls;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_del_rt_rule) +
-		   ((struct ipa_ioc_del_rt_rule *)header)->num_hdls *
-		   sizeof(struct ipa_rt_rule_del);
+		   pre_entry * sizeof(struct ipa_rt_rule_del);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -792,6 +843,15 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa2_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
 			retval = -EFAULT;
 			break;
@@ -808,10 +868,11 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_add_flt_rule *)header)->num_rules;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_add_flt_rule) +
-		   ((struct ipa_ioc_add_flt_rule *)header)->num_rules *
-		   sizeof(struct ipa_flt_rule_add);
+		   pre_entry * sizeof(struct ipa_flt_rule_add);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -821,6 +882,16 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_add_flt_rule *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa2_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
 			retval = -EFAULT;
 			break;
@@ -837,10 +908,11 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_del_flt_rule *)header)->num_hdls;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_del_flt_rule) +
-		   ((struct ipa_ioc_del_flt_rule *)header)->num_hdls *
-		   sizeof(struct ipa_flt_rule_del);
+		   pre_entry * sizeof(struct ipa_flt_rule_del);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -850,6 +922,16 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_del_flt_rule *)param)->
+				num_hdls,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa2_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
 			retval = -EFAULT;
 			break;
@@ -866,10 +948,11 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_mdfy_flt_rule) +
-		   ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules *
-		   sizeof(struct ipa_flt_rule_mdfy);
+		   pre_entry * sizeof(struct ipa_flt_rule_mdfy);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -879,6 +962,16 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_mdfy_flt_rule *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa2_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) {
 			retval = -EFAULT;
 			break;
@@ -992,9 +1085,10 @@
 			retval = -EFAULT;
 			break;
 		}
-
-		pyld_sz = sz + ((struct ipa_ioc_query_intf_tx_props *)
-				header)->num_tx_props *
+		pre_entry =
+			((struct ipa_ioc_query_intf_tx_props *)
+			header)->num_tx_props;
+		pyld_sz = sz + pre_entry *
 			sizeof(struct ipa_ioc_tx_intf_prop);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
@@ -1005,6 +1099,16 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
+			param)->num_tx_props
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_query_intf_tx_props *)
+				param)->num_tx_props, pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa_query_intf_tx_props(
 				(struct ipa_ioc_query_intf_tx_props *)param)) {
 			retval = -1;
@@ -1027,9 +1131,10 @@
 			retval = -EFAULT;
 			break;
 		}
-
-		pyld_sz = sz + ((struct ipa_ioc_query_intf_rx_props *)
-				header)->num_rx_props *
+		pre_entry =
+			((struct ipa_ioc_query_intf_rx_props *)
+			header)->num_rx_props;
+		pyld_sz = sz + pre_entry *
 			sizeof(struct ipa_ioc_rx_intf_prop);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
@@ -1040,6 +1145,15 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
+			param)->num_rx_props != pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_query_intf_rx_props *)
+				param)->num_rx_props, pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa_query_intf_rx_props(
 				(struct ipa_ioc_query_intf_rx_props *)param)) {
 			retval = -1;
@@ -1062,9 +1176,10 @@
 			retval = -EFAULT;
 			break;
 		}
-
-		pyld_sz = sz + ((struct ipa_ioc_query_intf_ext_props *)
-				header)->num_ext_props *
+		pre_entry =
+			((struct ipa_ioc_query_intf_ext_props *)
+			header)->num_ext_props;
+		pyld_sz = sz + pre_entry *
 			sizeof(struct ipa_ioc_ext_intf_prop);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
@@ -1075,6 +1190,15 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
+			param)->num_ext_props != pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_query_intf_ext_props *)
+				param)->num_ext_props, pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa_query_intf_ext_props(
 				(struct ipa_ioc_query_intf_ext_props *)param)) {
 			retval = -1;
@@ -1091,8 +1215,10 @@
 			retval = -EFAULT;
 			break;
 		}
-		pyld_sz = sizeof(struct ipa_msg_meta) +
+		pre_entry =
 		   ((struct ipa_msg_meta *)header)->msg_len;
+		pyld_sz = sizeof(struct ipa_msg_meta) +
+		   pre_entry;
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -1102,6 +1228,15 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_msg_meta *)param)->msg_len
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_msg_meta *)param)->msg_len,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa_pull_msg((struct ipa_msg_meta *)param,
 				 (char *)param + sizeof(struct ipa_msg_meta),
 				 ((struct ipa_msg_meta *)param)->msg_len) !=
@@ -1218,10 +1353,12 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_add_hdr_proc_ctx *)
+			header)->num_proc_ctxs;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_add_hdr_proc_ctx) +
-		   ((struct ipa_ioc_add_hdr_proc_ctx *)header)->num_proc_ctxs *
-		   sizeof(struct ipa_hdr_proc_ctx_add);
+		   pre_entry * sizeof(struct ipa_hdr_proc_ctx_add);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -1231,6 +1368,15 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
+			param)->num_proc_ctxs != pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_add_hdr_proc_ctx *)
+				param)->num_proc_ctxs, pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa2_add_hdr_proc_ctx(
 			(struct ipa_ioc_add_hdr_proc_ctx *)param)) {
 			retval = -EFAULT;
@@ -1247,10 +1393,11 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_del_hdr_proc_ctx) +
-		   ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls *
-		   sizeof(struct ipa_hdr_proc_ctx_del);
+		   pre_entry * sizeof(struct ipa_hdr_proc_ctx_del);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -1260,6 +1407,16 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
+			param)->num_hdls != pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_del_hdr_proc_ctx *)param)->
+				num_hdls,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa2_del_hdr_proc_ctx(
 			(struct ipa_ioc_del_hdr_proc_ctx *)param)) {
 			retval = -EFAULT;
@@ -3612,6 +3769,7 @@
 	ipa_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
 	ipa_ctx->ipa_bam_remote_mode = resource_p->ipa_bam_remote_mode;
 	ipa_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt;
+	ipa_ctx->ipa_wdi2 = resource_p->ipa_wdi2;
 	ipa_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size;
 	ipa_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size;
 	ipa_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset;
@@ -4150,12 +4308,10 @@
 	ipa_drv_res->ipa_hw_mode = 0;
 	ipa_drv_res->ipa_bam_remote_mode = false;
 	ipa_drv_res->modem_cfg_emb_pipe_flt = false;
+	ipa_drv_res->ipa_wdi2 = false;
 	ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
 	ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
 
-	smmu_info.disable_htw = of_property_read_bool(pdev->dev.of_node,
-			"qcom,smmu-disable-htw");
-
 	/* Get IPA HW Version */
 	result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
 					&ipa_drv_res->ipa_hw_type);
@@ -4216,6 +4372,13 @@
 			ipa_drv_res->modem_cfg_emb_pipe_flt
 			? "True" : "False");
 
+	ipa_drv_res->ipa_wdi2 =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,ipa-wdi2");
+	IPADBG(": WDI-2.0 = %s\n",
+		ipa_drv_res->ipa_wdi2
+		? "True" : "False");
+
 	ipa_drv_res->skip_uc_pipe_reset =
 		of_property_read_bool(pdev->dev.of_node,
 		"qcom,skip-uc-pipe-reset");
@@ -4335,7 +4498,6 @@
 static int ipa_smmu_wlan_cb_probe(struct device *dev)
 {
 	struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx();
-	int disable_htw = 1;
 	int atomic_ctx = 1;
 	int fast = 1;
 	int bypass = 1;
@@ -4352,17 +4514,6 @@
 	}
 	cb->valid = true;
 
-	if (smmu_info.disable_htw) {
-		ret = iommu_domain_set_attr(cb->iommu,
-			DOMAIN_ATTR_COHERENT_HTW_DISABLE,
-			&disable_htw);
-		if (ret) {
-			IPAERR("couldn't disable coherent HTW\n");
-			cb->valid = false;
-			return -EIO;
-		}
-	}
-
 	if (smmu_info.s1_bypass) {
 		if (iommu_domain_set_attr(cb->iommu,
 			DOMAIN_ATTR_S1_BYPASS,
@@ -4422,7 +4573,6 @@
 static int ipa_smmu_uc_cb_probe(struct device *dev)
 {
 	struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx();
-	int disable_htw = 1;
 	int atomic_ctx = 1;
 	int ret;
 	int fast = 1;
@@ -4461,18 +4611,6 @@
 	IPADBG("SMMU mapping created\n");
 	cb->valid = true;
 
-	IPADBG("UC CB PROBE sub pdev=%p disable htw\n", dev);
-	if (smmu_info.disable_htw) {
-		if (iommu_domain_set_attr(cb->mapping->domain,
-				DOMAIN_ATTR_COHERENT_HTW_DISABLE,
-				 &disable_htw)) {
-			IPAERR("couldn't disable coherent HTW\n");
-			arm_iommu_release_mapping(cb->mapping);
-			cb->valid = false;
-			return -EIO;
-		}
-	}
-
 	IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
 	if (smmu_info.s1_bypass) {
 		if (iommu_domain_set_attr(cb->mapping->domain,
@@ -4527,7 +4665,6 @@
 {
 	struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx();
 	int result;
-	int disable_htw = 1;
 	int atomic_ctx = 1;
 	int fast = 1;
 	int bypass = 1;
@@ -4564,18 +4701,6 @@
 	IPADBG("SMMU mapping created\n");
 	cb->valid = true;
 
-	if (smmu_info.disable_htw) {
-		if (iommu_domain_set_attr(cb->mapping->domain,
-				DOMAIN_ATTR_COHERENT_HTW_DISABLE,
-				 &disable_htw)) {
-			IPAERR("couldn't disable coherent HTW\n");
-			arm_iommu_release_mapping(cb->mapping);
-			cb->valid = false;
-			return -EIO;
-		}
-		IPADBG("SMMU disable HTW\n");
-	}
-
 	if (smmu_info.s1_bypass) {
 		if (iommu_domain_set_attr(cb->mapping->domain,
 			DOMAIN_ATTR_S1_BYPASS,
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index a8266c8..c9e20d3 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -1420,6 +1420,7 @@
 	u16 enable, tbl_entry, flag;
 	u32 no_entrys = 0;
 
+	mutex_lock(&ipa_ctx->nat_mem.lock);
 	value = ipa_ctx->nat_mem.public_ip_addr;
 	pr_err(
 				"Table IP Address:%d.%d.%d.%d\n",
@@ -1573,6 +1574,7 @@
 		}
 	}
 	pr_err("Current No. Nat Entries: %d\n", no_entrys);
+	mutex_unlock(&ipa_ctx->nat_mem.lock);
 
 	return 0;
 }
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index bc67e84..179a31b 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -2404,7 +2404,7 @@
 		if (skb->len < IPA_PKT_STATUS_SIZE) {
 			WARN_ON(sys->prev_skb != NULL);
 			IPADBG("status straddles buffer\n");
-			sys->prev_skb = skb;
+			sys->prev_skb = skb_copy(skb, GFP_KERNEL);
 			sys->len_partial = skb->len;
 			return rc;
 		}
@@ -2489,7 +2489,7 @@
 					!status->exception) {
 				WARN_ON(sys->prev_skb != NULL);
 				IPADBG("Ins header in next buffer\n");
-				sys->prev_skb = skb;
+				sys->prev_skb = skb_copy(skb, GFP_KERNEL);
 				sys->len_partial =	 skb->len;
 				return rc;
 			}
@@ -3160,23 +3160,23 @@
 		} else if (in->client ==
 					  IPA_CLIENT_APPS_WAN_CONS) {
 			sys->pyld_hdlr = ipa_wan_rx_pyld_hdlr;
-			if (in->napi_enabled) {
+			sys->rx_pool_sz = ipa_ctx->wan_rx_ring_size;
+			if (nr_cpu_ids > 1) {
 				sys->repl_hdlr =
-				   ipa_replenish_rx_cache_recycle;
-				sys->rx_pool_sz =
-				   IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+				   ipa_fast_replenish_rx_cache;
+				sys->repl_trig_thresh =
+				   sys->rx_pool_sz / 8;
 			} else {
-				if (nr_cpu_ids > 1) {
-					sys->repl_hdlr =
-					   ipa_fast_replenish_rx_cache;
-					sys->repl_trig_thresh =
-					   sys->rx_pool_sz / 8;
-				} else {
-					sys->repl_hdlr =
-					   ipa_replenish_rx_cache;
-				}
+				sys->repl_hdlr =
+				   ipa_replenish_rx_cache;
+			}
+			if (in->napi_enabled) {
 				sys->rx_pool_sz =
-				   ipa_ctx->wan_rx_ring_size;
+					   IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+				if (in->recycle_enabled) {
+					sys->repl_hdlr =
+					   ipa_replenish_rx_cache_recycle;
+				}
 			}
 			sys->ep->wakelock_client =
 			   IPA_WAKELOCK_REF_CLIENT_WAN_RX;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index 8b69298..350b5a1 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -41,8 +41,6 @@
 #define MTU_BYTE 1500
 
 #define IPA_MAX_NUM_PIPES 0x14
-#define IPA_WAN_CONS_DESC_FIFO_SZ 0x5E80
-#define IPA_WAN_NAPI_CONS_RX_POOL_SZ 3000
 #define IPA_SYS_DESC_FIFO_SZ 0x2000
 #define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000
 #define IPA_LAN_RX_HEADER_LENGTH (2)
@@ -50,6 +48,11 @@
 #define IPA_DL_CHECKSUM_LENGTH (8)
 #define IPA_NUM_DESC_PER_SW_TX (2)
 #define IPA_GENERIC_RX_POOL_SZ 1000
+#define IPA_UC_FINISH_MAX 6
+#define IPA_UC_WAIT_MIN_SLEEP 1000
+#define IPA_UC_WAII_MAX_SLEEP 1200
+#define IPA_WAN_NAPI_CONS_RX_POOL_SZ (IPA_GENERIC_RX_POOL_SZ*3)
+#define IPA_WAN_CONS_DESC_FIFO_SZ (IPA_SYS_DESC_FIFO_SZ*3)
 
 #define IPA_MAX_STATUS_STAT_NUM 30
 
@@ -890,6 +893,14 @@
 	u32 uc_status;
 	bool uc_zip_error;
 	u32 uc_error_type;
+	phys_addr_t rdy_ring_base_pa;
+	phys_addr_t rdy_ring_rp_pa;
+	u32 rdy_ring_size;
+	phys_addr_t rdy_comp_ring_base_pa;
+	phys_addr_t rdy_comp_ring_wp_pa;
+	u32 rdy_comp_ring_size;
+	u32 *rdy_ring_rp_va;
+	u32 *rdy_comp_ring_wp_va;
 };
 
 /**
@@ -994,6 +1005,7 @@
  * @use_ipa_teth_bridge: use tethering bridge driver
  * @ipa_bam_remote_mode: ipa bam is in remote mode
  * @modem_cfg_emb_pipe_flt: modem configure embedded pipe filtering rules
+ * @ipa_wdi2: using wdi-2.0
  * @ipa_bus_hdl: msm driver handle for the data path bus
  * @ctrl: holds the core specific operations based on
  *  core version (vtable like)
@@ -1081,6 +1093,7 @@
 	bool use_ipa_teth_bridge;
 	bool ipa_bam_remote_mode;
 	bool modem_cfg_emb_pipe_flt;
+	bool ipa_wdi2;
 	/* featurize if memory footprint becomes a concern */
 	struct ipa_stats stats;
 	void *smem_pipe_mem;
@@ -1171,6 +1184,7 @@
 	u32 ee;
 	bool ipa_bam_remote_mode;
 	bool modem_cfg_emb_pipe_flt;
+	bool ipa_wdi2;
 	u32 wan_rx_ring_size;
 	u32 lan_rx_ring_size;
 	bool skip_uc_pipe_reset;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
index 8ec83eb..2a68970 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -347,6 +347,11 @@
 	return result;
 }
 
+static void ipa2_send_msg_free(void *buff, u32 len, u32 type)
+{
+	kfree(buff);
+}
+
 /**
  * ipa2_send_msg() - Send "message" from kernel client to IPA driver
  * @meta: [in] message meta-data
@@ -366,6 +371,7 @@
 		  ipa_msg_free_fn callback)
 {
 	struct ipa_push_msg *msg;
+	void *data = NULL;
 
 	if (unlikely(!ipa_ctx)) {
 		IPAERR("IPA driver was not initialized\n");
@@ -391,8 +397,17 @@
 	}
 
 	msg->meta = *meta;
-	msg->buff = buff;
-	msg->callback = callback;
+	if (meta->msg_len > 0 && buff) {
+		data = kmalloc(meta->msg_len, GFP_KERNEL);
+		if (data == NULL) {
+			IPAERR("fail to alloc data container\n");
+			kfree(msg);
+			return -ENOMEM;
+		}
+		memcpy(data, buff, meta->msg_len);
+		msg->buff = data;
+		msg->callback = ipa2_send_msg_free;
+	}
 
 	mutex_lock(&ipa_ctx->msg_lock);
 	list_add_tail(&msg->link, &ipa_ctx->msg_list);
@@ -400,6 +415,8 @@
 	IPA_STATS_INC_CNT(ipa_ctx->stats.msg_w[meta->msg_type]);
 
 	wake_up(&ipa_ctx->msg_waitq);
+	if (buff)
+		callback(buff, meta->msg_len, meta->msg_type);
 
 	return 0;
 }
@@ -505,10 +522,9 @@
 	start = buf;
 
 	while (1) {
-		prepare_to_wait(&ipa_ctx->msg_waitq, &wait, TASK_INTERRUPTIBLE);
-
 		mutex_lock(&ipa_ctx->msg_lock);
 		locked = 1;
+		prepare_to_wait(&ipa_ctx->msg_waitq, &wait, TASK_INTERRUPTIBLE);
 		if (!list_empty(&ipa_ctx->msg_list)) {
 			msg = list_first_entry(&ipa_ctx->msg_list,
 					struct ipa_push_msg, link);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
index 68cd7d5..7291a44 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
@@ -493,6 +493,8 @@
 	resp_desc.ei_array = ipa_init_modem_driver_resp_msg_data_v01_ei;
 
 	pr_info("Sending QMI_IPA_INIT_MODEM_DRIVER_REQ_V01\n");
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
 	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
 			&resp_desc, &resp, sizeof(resp),
 			QMI_SEND_REQ_TIMEOUT_MS);
@@ -538,7 +540,8 @@
 		QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01;
 	resp_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01;
 	resp_desc.ei_array = ipa_install_fltr_rule_resp_msg_data_v01_ei;
-
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
 	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc,
 			req,
 			sizeof(struct ipa_install_fltr_rule_req_msg_v01),
@@ -574,7 +577,8 @@
 	resp_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
 	resp_desc.ei_array =
 		ipa_enable_force_clear_datapath_resp_msg_data_v01_ei;
-
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
 	rc = qmi_send_req_wait(ipa_q6_clnt,
 			&req_desc,
 			req,
@@ -618,7 +622,8 @@
 	resp_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
 	resp_desc.ei_array =
 		ipa_disable_force_clear_datapath_resp_msg_data_v01_ei;
-
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
 	rc = qmi_send_req_wait(ipa_q6_clnt,
 			&req_desc,
 			req,
@@ -688,7 +693,8 @@
 		QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01;
 	resp_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01;
 	resp_desc.ei_array = ipa_fltr_installed_notif_resp_msg_data_v01_ei;
-
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
 	rc = qmi_send_req_wait(ipa_q6_clnt,
 			&req_desc,
 			req,
@@ -1088,7 +1094,8 @@
 	resp_desc.ei_array = ipa_get_data_stats_resp_msg_data_v01_ei;
 
 	IPAWANDBG("Sending QMI_IPA_GET_DATA_STATS_REQ_V01\n");
-
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
 	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
 			sizeof(struct ipa_get_data_stats_req_msg_v01),
 			&resp_desc, resp,
@@ -1117,7 +1124,8 @@
 	resp_desc.ei_array = ipa_get_apn_data_stats_resp_msg_data_v01_ei;
 
 	IPAWANDBG("Sending QMI_IPA_GET_APN_DATA_STATS_REQ_V01\n");
-
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
 	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
 			sizeof(struct ipa_get_apn_data_stats_req_msg_v01),
 			&resp_desc, resp,
@@ -1149,7 +1157,8 @@
 	resp_desc.ei_array = ipa_set_data_usage_quota_resp_msg_data_v01_ei;
 
 	IPAWANDBG("Sending QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01\n");
-
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
 	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
 			sizeof(struct ipa_set_data_usage_quota_req_msg_v01),
 			&resp_desc, &resp, sizeof(resp),
@@ -1183,7 +1192,8 @@
 	resp_desc.ei_array = ipa_stop_data_usage_quota_resp_msg_data_v01_ei;
 
 	IPAWANDBG("Sending QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01\n");
-
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
 	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
 		&resp_desc, &resp, sizeof(resp),
 		QMI_SEND_STATS_REQ_TIMEOUT_MS);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
index 08ed47f..d14f8da 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
@@ -150,10 +150,16 @@
 {
 	int ret;
 
+	if (!ipa_ctx) {
+		IPAERR("IPA ctx is null\n");
+		return -ENXIO;
+	}
+
 	ret = ipa2_uc_state_check();
 	if (ret) {
 		ipa_ctx->uc_ntn_ctx.uc_ready_cb = ipa_ready_cb;
 		ipa_ctx->uc_ntn_ctx.priv = user_data;
+		return 0;
 	}
 
 	return -EEXIST;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
index abeb359..aca6d05 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -25,12 +25,14 @@
 #define IPA_WDI_RESUMED BIT(2)
 #define IPA_UC_POLL_SLEEP_USEC 100
 
-#define IPA_WDI_RX_RING_RES	0
-#define IPA_WDI_RX_RING_RP_RES	1
-#define IPA_WDI_TX_RING_RES	2
-#define IPA_WDI_CE_RING_RES	3
-#define IPA_WDI_CE_DB_RES	4
-#define IPA_WDI_MAX_RES		5
+#define IPA_WDI_RX_RING_RES 0
+#define IPA_WDI_RX_RING_RP_RES 1
+#define IPA_WDI_RX_COMP_RING_RES 2
+#define IPA_WDI_RX_COMP_RING_WP_RES 3
+#define IPA_WDI_TX_RING_RES 4
+#define IPA_WDI_CE_RING_RES 5
+#define IPA_WDI_CE_DB_RES 6
+#define IPA_WDI_MAX_RES 7
 
 struct ipa_wdi_res {
 	struct ipa_wdi_buffer_info *res;
@@ -233,6 +235,21 @@
 	u8  reserved;
 } __packed;
 
+struct IpaHwWdi2TxSetUpCmdData_t {
+	u32 comp_ring_base_pa;
+	u32 comp_ring_base_pa_hi;
+	u16 comp_ring_size;
+	u16 reserved_comp_ring;
+	u32 ce_ring_base_pa;
+	u32 ce_ring_base_pa_hi;
+	u16 ce_ring_size;
+	u16 reserved_ce_ring;
+	u32 ce_ring_doorbell_pa;
+	u32 ce_ring_doorbell_pa_hi;
+	u16 num_tx_buffers;
+	u8  ipa_pipe_number;
+	u8  reserved;
+} __packed;
 /**
  * struct IpaHwWdiRxSetUpCmdData_t -  Structure holding the parameters for
  * IPA_CPU_2_HW_CMD_WDI_RX_SET_UP command.
@@ -254,6 +271,19 @@
 	u8  ipa_pipe_number;
 } __packed;
 
+struct IpaHwWdi2RxSetUpCmdData_t {
+	u32 rx_ring_base_pa;
+	u32 rx_ring_base_pa_hi;
+	u32 rx_ring_size;
+	u32 rx_ring_rp_pa;
+	u32 rx_ring_rp_pa_hi;
+	u32 rx_comp_ring_base_pa;
+	u32 rx_comp_ring_base_pa_hi;
+	u32 rx_comp_ring_size;
+	u32 rx_comp_ring_wp_pa;
+	u32 rx_comp_ring_wp_pa_hi;
+	u8  ipa_pipe_number;
+} __packed;
 /**
  * union IpaHwWdiRxExtCfgCmdData_t - Structure holding the parameters for
  * IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG command.
@@ -561,7 +591,10 @@
 		end = IPA_WDI_CE_DB_RES;
 	} else {
 		start = IPA_WDI_RX_RING_RES;
-		end = IPA_WDI_RX_RING_RP_RES;
+		if (ipa_ctx->ipa_wdi2)
+			end = IPA_WDI_RX_COMP_RING_WP_RES;
+		else
+			end = IPA_WDI_RX_RING_RP_RES;
 	}
 
 	for (i = start; i <= end; i++) {
@@ -709,6 +742,8 @@
 	struct ipa_mem_buffer cmd;
 	struct IpaHwWdiTxSetUpCmdData_t *tx;
 	struct IpaHwWdiRxSetUpCmdData_t *rx;
+	struct IpaHwWdi2TxSetUpCmdData_t *tx_2;
+	struct IpaHwWdi2RxSetUpCmdData_t *rx_2;
 	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
 	unsigned long va;
 	phys_addr_t pa;
@@ -761,7 +796,10 @@
 
 	IPADBG("client=%d ep=%d\n", in->sys.client, ipa_ep_idx);
 	if (IPA_CLIENT_IS_CONS(in->sys.client)) {
-		cmd.size = sizeof(*tx);
+		if (ipa_ctx->ipa_wdi2)
+			cmd.size = sizeof(*tx_2);
+		else
+			cmd.size = sizeof(*tx);
 		IPADBG("comp_ring_base_pa=0x%pa\n",
 				&in->u.dl.comp_ring_base_pa);
 		IPADBG("comp_ring_size=%d\n", in->u.dl.comp_ring_size);
@@ -771,10 +809,58 @@
 				&in->u.dl.ce_door_bell_pa);
 		IPADBG("num_tx_buffers=%d\n", in->u.dl.num_tx_buffers);
 	} else {
-		cmd.size = sizeof(*rx);
-		IPADBG("rx_ring_base_pa=0x%pa\n", &in->u.ul.rdy_ring_base_pa);
-		IPADBG("rx_ring_size=%d\n", in->u.ul.rdy_ring_size);
-		IPADBG("rx_ring_rp_pa=0x%pa\n", &in->u.ul.rdy_ring_rp_pa);
+		if (ipa_ctx->ipa_wdi2) {
+			/* WDI2.0 feature */
+			cmd.size = sizeof(*rx_2);
+			IPADBG("rdy_ring_rp value =%d\n",
+				*in->u.ul.rdy_ring_rp_va);
+			IPADBG("rx_comp_ring_wp value=%d\n",
+				*in->u.ul.rdy_comp_ring_wp_va);
+			ipa_ctx->uc_ctx.rdy_ring_rp_va =
+				in->u.ul.rdy_ring_rp_va;
+			ipa_ctx->uc_ctx.rdy_comp_ring_wp_va =
+				in->u.ul.rdy_comp_ring_wp_va;
+		} else {
+			cmd.size = sizeof(*rx);
+		}
+		IPADBG("rx_ring_base_pa=0x%pa\n",
+			&in->u.ul.rdy_ring_base_pa);
+		IPADBG("rx_ring_size=%d\n",
+			in->u.ul.rdy_ring_size);
+		IPADBG("rx_ring_rp_pa=0x%pa\n",
+			&in->u.ul.rdy_ring_rp_pa);
+
+		IPADBG("rx_comp_ring_base_pa=0x%pa\n",
+			&in->u.ul.rdy_comp_ring_base_pa);
+		IPADBG("rx_comp_ring_size=%d\n",
+			in->u.ul.rdy_comp_ring_size);
+		IPADBG("rx_comp_ring_wp_pa=0x%pa\n",
+			&in->u.ul.rdy_comp_ring_wp_pa);
+
+		ipa_ctx->uc_ctx.rdy_ring_base_pa =
+			in->u.ul.rdy_ring_base_pa;
+		ipa_ctx->uc_ctx.rdy_ring_rp_pa =
+			in->u.ul.rdy_ring_rp_pa;
+		ipa_ctx->uc_ctx.rdy_ring_size =
+			in->u.ul.rdy_ring_size;
+		ipa_ctx->uc_ctx.rdy_comp_ring_base_pa =
+			in->u.ul.rdy_comp_ring_base_pa;
+		ipa_ctx->uc_ctx.rdy_comp_ring_wp_pa =
+			in->u.ul.rdy_comp_ring_wp_pa;
+		ipa_ctx->uc_ctx.rdy_comp_ring_size =
+			in->u.ul.rdy_comp_ring_size;
+
+		/* check if the VA is empty */
+		if (!in->u.ul.rdy_ring_rp_va && ipa_ctx->ipa_wdi2) {
+			IPAERR("rdy_ring_rp_va is empty, wdi2.0(%d)\n",
+				ipa_ctx->ipa_wdi2);
+				goto dma_alloc_fail;
+		}
+		if (!in->u.ul.rdy_comp_ring_wp_va && ipa_ctx->ipa_wdi2) {
+			IPAERR("comp_ring_wp_va is empty, wdi2.0(%d)\n",
+				ipa_ctx->ipa_wdi2);
+				goto dma_alloc_fail;
+		}
 	}
 
 	cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size,
@@ -786,64 +872,140 @@
 	}
 
 	if (IPA_CLIENT_IS_CONS(in->sys.client)) {
-		tx = (struct IpaHwWdiTxSetUpCmdData_t *)cmd.base;
+		if (ipa_ctx->ipa_wdi2) {
+			tx_2 = (struct IpaHwWdi2TxSetUpCmdData_t *)cmd.base;
 
-		len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size :
-			in->u.dl.comp_ring_size;
-		IPADBG("TX ring smmu_en=%d ring_size=%d %d\n", in->smmu_enabled,
+			len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size :
+				in->u.dl.comp_ring_size;
+			IPADBG("TX_2 ring smmu_en=%d ring_size=%d %d\n",
+				in->smmu_enabled,
 				in->u.dl_smmu.comp_ring_size,
 				in->u.dl.comp_ring_size);
-		if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES,
-				in->smmu_enabled,
-				in->u.dl.comp_ring_base_pa,
-				&in->u.dl_smmu.comp_ring,
-				len,
-				false,
-				&va)) {
-			IPAERR("fail to create uc mapping TX ring.\n");
-			result = -ENOMEM;
-			goto uc_timeout;
-		}
-		tx->comp_ring_base_pa = va;
-		tx->comp_ring_size = len;
-
-		len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size :
-			in->u.dl.ce_ring_size;
-		IPADBG("TX CE ring smmu_en=%d ring_size=%d %d\n",
-				in->smmu_enabled,
-				in->u.dl_smmu.ce_ring_size,
-				in->u.dl.ce_ring_size);
-		if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES,
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES,
 					in->smmu_enabled,
-					in->u.dl.ce_ring_base_pa,
-					&in->u.dl_smmu.ce_ring,
+					in->u.dl.comp_ring_base_pa,
+					&in->u.dl_smmu.comp_ring,
 					len,
 					false,
 					&va)) {
-			IPAERR("fail to create uc mapping CE ring.\n");
-			result = -ENOMEM;
-			goto uc_timeout;
-		}
-		tx->ce_ring_base_pa = va;
-		tx->ce_ring_size = len;
+				IPAERR("fail to create uc mapping TX ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx_2->comp_ring_base_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			tx_2->comp_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+			tx_2->comp_ring_size = len;
+			IPADBG("TX_2 comp_ring_base_pa_hi=0x%08x :0x%08x\n",
+					tx_2->comp_ring_base_pa_hi,
+					tx_2->comp_ring_base_pa);
 
-		pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa :
-			in->u.dl.ce_door_bell_pa;
-		if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES,
+			len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size :
+				in->u.dl.ce_ring_size;
+			IPADBG("TX_2 CE ring smmu_en=%d ring_size=%d %d\n",
 					in->smmu_enabled,
-					pa,
-					NULL,
-					4,
-					true,
-					&va)) {
-			IPAERR("fail to create uc mapping CE DB.\n");
-			result = -ENOMEM;
-			goto uc_timeout;
-		}
-		tx->ce_ring_doorbell_pa = va;
+					in->u.dl_smmu.ce_ring_size,
+					in->u.dl.ce_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES,
+						in->smmu_enabled,
+						in->u.dl.ce_ring_base_pa,
+						&in->u.dl_smmu.ce_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping CE ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx_2->ce_ring_base_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			tx_2->ce_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+			tx_2->ce_ring_size = len;
+			IPADBG("TX_2 ce_ring_base_pa_hi=0x%08x :0x%08x\n",
+					tx_2->ce_ring_base_pa_hi,
+					tx_2->ce_ring_base_pa);
 
-		tx->num_tx_buffers = in->u.dl.num_tx_buffers;
-		tx->ipa_pipe_number = ipa_ep_idx;
+			pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa :
+				in->u.dl.ce_door_bell_pa;
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						true,
+						&va)) {
+				IPAERR("fail to create uc mapping CE DB.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx_2->ce_ring_doorbell_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			tx_2->ce_ring_doorbell_pa = (u32) (va & 0xFFFFFFFF);
+			IPADBG("TX_2 ce_ring_doorbell_pa_hi=0x%08x :0x%08x\n",
+					tx_2->ce_ring_doorbell_pa_hi,
+					tx_2->ce_ring_doorbell_pa);
+
+			tx_2->num_tx_buffers = in->u.dl.num_tx_buffers;
+			tx_2->ipa_pipe_number = ipa_ep_idx;
+		} else {
+			tx = (struct IpaHwWdiTxSetUpCmdData_t *)cmd.base;
+			len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size :
+				in->u.dl.comp_ring_size;
+				IPADBG("TX ring smmu_en=%d ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.dl_smmu.comp_ring_size,
+					in->u.dl.comp_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES,
+						in->smmu_enabled,
+						in->u.dl.comp_ring_base_pa,
+						&in->u.dl_smmu.comp_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping TX ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx->comp_ring_base_pa = va;
+			tx->comp_ring_size = len;
+
+			len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size :
+				in->u.dl.ce_ring_size;
+			IPADBG("TX CE ring smmu_en=%d ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.dl_smmu.ce_ring_size,
+					in->u.dl.ce_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES,
+						in->smmu_enabled,
+						in->u.dl.ce_ring_base_pa,
+						&in->u.dl_smmu.ce_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping CE ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx->ce_ring_base_pa = va;
+			tx->ce_ring_size = len;
+			pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa :
+				in->u.dl.ce_door_bell_pa;
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						true,
+						&va)) {
+				IPAERR("fail to create uc mapping CE DB.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx->ce_ring_doorbell_pa = va;
+			tx->num_tx_buffers = in->u.dl.num_tx_buffers;
+			tx->ipa_pipe_number = ipa_ep_idx;
+		}
+
 		if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) {
 			out->uc_door_bell_pa =
 				ipa_ctx->ipa_wrapper_base +
@@ -860,43 +1022,141 @@
 				IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
 		}
 	} else {
-		rx = (struct IpaHwWdiRxSetUpCmdData_t *)cmd.base;
+		if (ipa_ctx->ipa_wdi2) {
+			rx_2 = (struct IpaHwWdi2RxSetUpCmdData_t *)cmd.base;
 
-		len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size :
-			in->u.ul.rdy_ring_size;
-		IPADBG("RX ring smmu_en=%d ring_size=%d %d\n", in->smmu_enabled,
+			len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size :
+				in->u.ul.rdy_ring_size;
+			IPADBG("RX_2 ring smmu_en=%d ring_size=%d %d\n",
+				in->smmu_enabled,
 				in->u.ul_smmu.rdy_ring_size,
 				in->u.ul.rdy_ring_size);
-		if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES,
-					in->smmu_enabled,
-					in->u.ul.rdy_ring_base_pa,
-					&in->u.ul_smmu.rdy_ring,
-					len,
-					false,
-					&va)) {
-			IPAERR("fail to create uc mapping RX ring.\n");
-			result = -ENOMEM;
-			goto uc_timeout;
-		}
-		rx->rx_ring_base_pa = va;
-		rx->rx_ring_size = len;
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES,
+						in->smmu_enabled,
+						in->u.ul.rdy_ring_base_pa,
+						&in->u.ul_smmu.rdy_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc RX_2 ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx_2->rx_ring_base_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			rx_2->rx_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+			rx_2->rx_ring_size = len;
+			IPADBG("RX_2 rx_ring_base_pa_hi=0x%08x:0x%08x\n",
+					rx_2->rx_ring_base_pa_hi,
+					rx_2->rx_ring_base_pa);
 
-		pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa :
-			in->u.ul.rdy_ring_rp_pa;
-		if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
+			pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa :
+				in->u.ul.rdy_ring_rp_pa;
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						false,
+						&va)) {
+				IPAERR("fail to create uc RX_2 rng RP\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx_2->rx_ring_rp_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			rx_2->rx_ring_rp_pa = (u32) (va & 0xFFFFFFFF);
+			IPADBG("RX_2 rx_ring_rp_pa_hi=0x%08x :0x%08x\n",
+					rx_2->rx_ring_rp_pa_hi,
+					rx_2->rx_ring_rp_pa);
+			len = in->smmu_enabled ?
+				in->u.ul_smmu.rdy_comp_ring_size :
+				in->u.ul.rdy_comp_ring_size;
+			IPADBG("RX_2 ring smmu_en=%d comp_ring_size=%d %d\n",
 					in->smmu_enabled,
-					pa,
-					NULL,
-					4,
-					false,
-					&va)) {
-			IPAERR("fail to create uc mapping RX rng RP\n");
-			result = -ENOMEM;
-			goto uc_timeout;
-		}
-		rx->rx_ring_rp_pa = va;
+					in->u.ul_smmu.rdy_comp_ring_size,
+					in->u.ul.rdy_comp_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_COMP_RING_RES,
+						in->smmu_enabled,
+						in->u.ul.rdy_comp_ring_base_pa,
+						&in->u.ul_smmu.rdy_comp_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc RX_2 comp_ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx_2->rx_comp_ring_base_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			rx_2->rx_comp_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+			rx_2->rx_comp_ring_size = len;
+			IPADBG("RX_2 rx_comp_ring_base_pa_hi=0x%08x:0x%08x\n",
+					rx_2->rx_comp_ring_base_pa_hi,
+					rx_2->rx_comp_ring_base_pa);
 
-		rx->ipa_pipe_number = ipa_ep_idx;
+			pa = in->smmu_enabled ?
+				in->u.ul_smmu.rdy_comp_ring_wp_pa :
+				in->u.ul.rdy_comp_ring_wp_pa;
+			if (ipa_create_uc_smmu_mapping(
+						IPA_WDI_RX_COMP_RING_WP_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						false,
+						&va)) {
+				IPAERR("fail to create uc RX_2 comp_rng WP\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx_2->rx_comp_ring_wp_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			rx_2->rx_comp_ring_wp_pa = (u32) (va & 0xFFFFFFFF);
+			IPADBG("RX_2 rx_comp_ring_wp_pa_hi=0x%08x:0x%08x\n",
+					rx_2->rx_comp_ring_wp_pa_hi,
+					rx_2->rx_comp_ring_wp_pa);
+			rx_2->ipa_pipe_number = ipa_ep_idx;
+		} else {
+			rx = (struct IpaHwWdiRxSetUpCmdData_t *)cmd.base;
+
+			len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size :
+				in->u.ul.rdy_ring_size;
+				IPADBG("RX ring smmu_en=%d ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.ul_smmu.rdy_ring_size,
+					in->u.ul.rdy_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES,
+						in->smmu_enabled,
+						in->u.ul.rdy_ring_base_pa,
+						&in->u.ul_smmu.rdy_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping RX ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx->rx_ring_base_pa = va;
+			rx->rx_ring_size = len;
+
+			pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa :
+				in->u.ul.rdy_ring_rp_pa;
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping RX rng RP\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx->rx_ring_rp_pa = va;
+			rx->ipa_pipe_number = ipa_ep_idx;
+		}
+
 		if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) {
 			out->uc_door_bell_pa =
 				ipa_ctx->ipa_wrapper_base +
@@ -1128,6 +1388,7 @@
 	union IpaHwWdiCommonChCmdData_t disable;
 	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
 	u32 prod_hdl;
+	int i;
 
 	if (unlikely(!ipa_ctx)) {
 		IPAERR("IPA driver was not initialized\n");
@@ -1144,6 +1405,28 @@
 	if (result)
 		return result;
 
+	/* checking rdy_ring_rp_pa matches the rdy_comp_ring_wp_pa on WDI2.0 */
+	if (ipa_ctx->ipa_wdi2) {
+		for (i = 0; i < IPA_UC_FINISH_MAX; i++) {
+			IPADBG("(%d) rp_value(%u), comp_wp_value(%u)\n",
+					i,
+					*ipa_ctx->uc_ctx.rdy_ring_rp_va,
+					*ipa_ctx->uc_ctx.rdy_comp_ring_wp_va);
+			if (*ipa_ctx->uc_ctx.rdy_ring_rp_va !=
+				*ipa_ctx->uc_ctx.rdy_comp_ring_wp_va) {
+				usleep_range(IPA_UC_WAIT_MIN_SLEEP,
+					IPA_UC_WAII_MAX_SLEEP);
+			} else {
+				break;
+			}
+		}
+		/* In case ipa_uc still haven't processed all
+		 * pending descriptors, we have to assert
+		 */
+		if (i == IPA_UC_FINISH_MAX)
+			ipa_assert();
+	}
+
 	IPADBG("ep=%d\n", clnt_hdl);
 
 	ep = &ipa_ctx->ep[clnt_hdl];
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 41f339a..02e2c5f 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -34,6 +34,8 @@
 #include <linux/rmnet_ipa_fd_ioctl.h>
 #include <linux/ipa.h>
 #include <uapi/linux/net_map.h>
+#include <uapi/linux/msm_rmnet.h>
+#include <net/rmnet_config.h>
 
 #include "ipa_trace.h"
 
@@ -1232,6 +1234,81 @@
 
 }
 
+static int handle_ingress_format(struct net_device *dev,
+			struct rmnet_ioctl_extended_s *in)
+{
+	int ret = 0;
+	struct rmnet_phys_ep_conf_s *ep_cfg;
+
+	IPAWANDBG("Get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
+	if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
+		ipa_to_apps_ep_cfg.ipa_ep_cfg.cfg.cs_offload_en =
+		   IPA_ENABLE_CS_OFFLOAD_DL;
+
+	if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
+		IPAWANERR("get AGG size %d count %d\n",
+				  in->u.ingress_format.agg_size,
+				  in->u.ingress_format.agg_count);
+
+		ret = ipa_disable_apps_wan_cons_deaggr(
+			  in->u.ingress_format.agg_size,
+			  in->u.ingress_format.agg_count);
+
+		if (!ret) {
+			ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_byte_limit =
+				in->u.ingress_format.agg_size;
+			ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_pkt_limit =
+				in->u.ingress_format.agg_count;
+
+			if (ipa_rmnet_res.ipa_napi_enable) {
+				ipa_to_apps_ep_cfg.recycle_enabled = true;
+				ep_cfg = (struct rmnet_phys_ep_conf_s *)
+				   rcu_dereference(dev->rx_handler_data);
+				ep_cfg->recycle = ipa_recycle_wan_skb;
+				pr_info("Wan Recycle Enabled\n");
+			}
+		}
+	}
+
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 1;
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2;
+
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = 0;
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding =
+		true;
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_little_endian = 0;
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.metadata_mask.metadata_mask = 0xFF000000;
+
+	ipa_to_apps_ep_cfg.client = IPA_CLIENT_APPS_WAN_CONS;
+	ipa_to_apps_ep_cfg.notify = apps_ipa_packet_receive_notify;
+	ipa_to_apps_ep_cfg.priv = dev;
+
+	ipa_to_apps_ep_cfg.napi_enabled = ipa_rmnet_res.ipa_napi_enable;
+	if (ipa_to_apps_ep_cfg.napi_enabled)
+		ipa_to_apps_ep_cfg.desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
+	else
+		ipa_to_apps_ep_cfg.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+
+	mutex_lock(&ipa_to_apps_pipe_handle_guard);
+	if (atomic_read(&is_ssr)) {
+		IPAWANDBG("In SSR sequence/recovery\n");
+		mutex_unlock(&ipa_to_apps_pipe_handle_guard);
+		return -EFAULT;
+	}
+	ret = ipa2_setup_sys_pipe(&ipa_to_apps_ep_cfg, &ipa_to_apps_hdl);
+	mutex_unlock(&ipa_to_apps_pipe_handle_guard);
+
+	if (ret)
+		IPAWANERR("failed to configure ingress\n");
+
+	return ret;
+}
+
 /**
  * ipa_wwan_ioctl() - I/O control for wwan network driver.
  *
@@ -1532,83 +1609,7 @@
 			}
 			break;
 		case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/*  Set IDF  */
-			IPAWANDBG("get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
-			if ((extend_ioctl_data.u.data) &
-					RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
-				ipa_to_apps_ep_cfg.ipa_ep_cfg.cfg.
-					cs_offload_en =
-					IPA_ENABLE_CS_OFFLOAD_DL;
-
-			if ((extend_ioctl_data.u.data) &
-					RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
-				IPAWANERR("get AGG size %d count %d\n",
-					extend_ioctl_data.u.
-					ingress_format.agg_size,
-					extend_ioctl_data.u.
-					ingress_format.agg_count);
-				if (!ipa_disable_apps_wan_cons_deaggr(
-					extend_ioctl_data.u.
-					ingress_format.agg_size,
-					extend_ioctl_data.
-					u.ingress_format.agg_count)) {
-					ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.
-					aggr_byte_limit = extend_ioctl_data.
-					u.ingress_format.agg_size;
-					ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.
-					aggr_pkt_limit = extend_ioctl_data.
-					u.ingress_format.agg_count;
-				}
-			}
-
-			ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
-			ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
-				hdr_ofst_metadata_valid = 1;
-			ipa_to_apps_ep_cfg.ipa_ep_cfg.
-				hdr.hdr_ofst_metadata = 1;
-			ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
-				hdr_ofst_pkt_size_valid = 1;
-			ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
-				hdr_ofst_pkt_size = 2;
-
-			ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
-				hdr_total_len_or_pad_valid = true;
-			ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
-				hdr_total_len_or_pad = 0;
-			ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
-				hdr_payload_len_inc_padding = true;
-			ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
-				hdr_total_len_or_pad_offset = 0;
-			ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
-				hdr_little_endian = 0;
-			ipa_to_apps_ep_cfg.ipa_ep_cfg.metadata_mask.
-				metadata_mask = 0xFF000000;
-
-			ipa_to_apps_ep_cfg.client = IPA_CLIENT_APPS_WAN_CONS;
-			ipa_to_apps_ep_cfg.notify =
-				apps_ipa_packet_receive_notify;
-			ipa_to_apps_ep_cfg.priv = dev;
-
-			ipa_to_apps_ep_cfg.napi_enabled =
-				ipa_rmnet_res.ipa_napi_enable;
-			if (ipa_to_apps_ep_cfg.napi_enabled)
-				ipa_to_apps_ep_cfg.desc_fifo_sz =
-					IPA_WAN_CONS_DESC_FIFO_SZ;
-			else
-				ipa_to_apps_ep_cfg.desc_fifo_sz =
-					IPA_SYS_DESC_FIFO_SZ;
-
-			mutex_lock(&ipa_to_apps_pipe_handle_guard);
-			if (atomic_read(&is_ssr)) {
-				IPAWANDBG("In SSR sequence/recovery\n");
-				mutex_unlock(&ipa_to_apps_pipe_handle_guard);
-				rc = -EFAULT;
-				break;
-			}
-			rc = ipa2_setup_sys_pipe(
-				&ipa_to_apps_ep_cfg, &ipa_to_apps_hdl);
-			mutex_unlock(&ipa_to_apps_pipe_handle_guard);
-			if (rc)
-				IPAWANERR("failed to configure ingress\n");
+			rc = handle_ingress_format(dev, &extend_ioctl_data);
 			break;
 		case RMNET_IOCTL_SET_XLAT_DEV_INFO:
 			wan_msg = kzalloc(sizeof(struct ipa_wan_msg),
@@ -2395,18 +2396,20 @@
 	}
 
 	rc = ipa_qmi_get_data_stats(&req, resp);
+	if (rc) {
+		IPAWANERR("ipa_qmi_get_data_stats failed: %d\n", rc);
+		kfree(resp);
+		return;
+	}
 
-	if (!rc) {
-		memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
-		msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS;
-		msg_meta.msg_len =
-			sizeof(struct ipa_get_data_stats_resp_msg_v01);
-		rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
-		if (rc) {
-			IPAWANERR("ipa2_send_msg failed: %d\n", rc);
-			kfree(resp);
-			return;
-		}
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS;
+	msg_meta.msg_len = sizeof(struct ipa_get_data_stats_resp_msg_v01);
+	rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
+	if (rc) {
+		IPAWANERR("ipa2_send_msg failed: %d\n", rc);
+		kfree(resp);
+		return;
 	}
 }
 
@@ -2455,18 +2458,20 @@
 	req.mux_id_list[0] = ipa_rmnet_ctx.metered_mux_id;
 
 	rc = ipa_qmi_get_network_stats(&req, resp);
+	if (rc) {
+		IPAWANERR("ipa_qmi_get_network_stats failed %d\n", rc);
+		kfree(resp);
+		return;
+	}
 
-	if (!rc) {
-		memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
-		msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS;
-		msg_meta.msg_len =
-			sizeof(struct ipa_get_apn_data_stats_resp_msg_v01);
-		rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
-		if (rc) {
-			IPAWANERR("ipa2_send_msg failed: %d\n", rc);
-			kfree(resp);
-			return;
-		}
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS;
+	msg_meta.msg_len = sizeof(struct ipa_get_apn_data_stats_resp_msg_v01);
+	rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
+	if (rc) {
+		IPAWANERR("ipa2_send_msg failed: %d\n", rc);
+		kfree(resp);
+		return;
 	}
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 15e7d47..043d2a2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -38,6 +38,16 @@
 #include <linux/hash.h>
 #include <soc/qcom/subsystem_restart.h>
 #include <soc/qcom/smem.h>
+#include <soc/qcom/scm.h>
+
+#ifdef CONFIG_ARM64
+
+/* Outer caches unsupported on ARM64 platforms */
+#define outer_flush_range(x, y)
+#define __cpuc_flush_dcache_area __flush_dcache_area
+
+#endif
+
 #define IPA_SUBSYSTEM_NAME "ipa_fws"
 #include "ipa_i.h"
 #include "../ipa_rm_i.h"
@@ -62,7 +72,7 @@
 
 #define IPA_AGGR_MAX_STR_LENGTH (10)
 
-#define CLEANUP_TAG_PROCESS_TIMEOUT 150
+#define CLEANUP_TAG_PROCESS_TIMEOUT 500
 
 #define IPA_AGGR_STR_IN_BYTES(str) \
 	(strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
@@ -199,6 +209,21 @@
 };
 #endif
 
+#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311
+#define TZ_MEM_PROTECT_REGION_ID 0x10
+
+struct tz_smmu_ipa_protect_region_iovec_s {
+	u64 input_addr;
+	u64 output_addr;
+	u64 size;
+	u32 attr;
+} __packed;
+
+struct tz_smmu_ipa_protect_region_s {
+	phys_addr_t iovec_buf;
+	u32 size_bytes;
+} __packed;
+
 static void ipa3_start_tag_process(struct work_struct *work);
 static DECLARE_WORK(ipa3_tag_work, ipa3_start_tag_process);
 
@@ -226,7 +251,6 @@
 static struct {
 	bool present;
 	bool arm_smmu;
-	bool disable_htw;
 	bool fast_map;
 	bool s1_bypass;
 	bool use_64_bit_dma_mask;
@@ -596,6 +620,7 @@
 	struct ipa_ioc_v4_nat_del nat_del;
 	struct ipa_ioc_rm_dependency rm_depend;
 	size_t sz;
+	int pre_entry;
 
 	IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
 
@@ -649,11 +674,11 @@
 			retval = -EFAULT;
 			break;
 		}
-
+		pre_entry =
+			((struct ipa_ioc_nat_dma_cmd *)header)->entries;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_nat_dma_cmd) +
-		   ((struct ipa_ioc_nat_dma_cmd *)header)->entries *
-		   sizeof(struct ipa_ioc_nat_dma_one);
+		   pre_entry * sizeof(struct ipa_ioc_nat_dma_one);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -664,7 +689,15 @@
 			retval = -EFAULT;
 			break;
 		}
-
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_nat_dma_cmd *)param)->entries,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa3_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
 			retval = -EFAULT;
 			break;
@@ -689,10 +722,11 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_add_hdr *)header)->num_hdrs;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_add_hdr) +
-		   ((struct ipa_ioc_add_hdr *)header)->num_hdrs *
-		   sizeof(struct ipa_hdr_add);
+		   pre_entry * sizeof(struct ipa_hdr_add);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -702,6 +736,15 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_add_hdr *)param)->num_hdrs,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa3_add_hdr((struct ipa_ioc_add_hdr *)param)) {
 			retval = -EFAULT;
 			break;
@@ -718,10 +761,11 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_del_hdr *)header)->num_hdls;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_del_hdr) +
-		   ((struct ipa_ioc_del_hdr *)header)->num_hdls *
-		   sizeof(struct ipa_hdr_del);
+		   pre_entry * sizeof(struct ipa_hdr_del);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -731,6 +775,15 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_del_hdr *)param)->num_hdls,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa3_del_hdr((struct ipa_ioc_del_hdr *)param)) {
 			retval = -EFAULT;
 			break;
@@ -747,10 +800,11 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_add_rt_rule *)header)->num_rules;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_add_rt_rule) +
-		   ((struct ipa_ioc_add_rt_rule *)header)->num_rules *
-		   sizeof(struct ipa_rt_rule_add);
+		   pre_entry * sizeof(struct ipa_rt_rule_add);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -760,6 +814,16 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_add_rt_rule *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa3_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
 			retval = -EFAULT;
 			break;
@@ -776,10 +840,11 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_add_rt_rule_after *)header)->num_rules;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_add_rt_rule_after) +
-		   ((struct ipa_ioc_add_rt_rule_after *)header)->num_rules *
-		   sizeof(struct ipa_rt_rule_add);
+		   pre_entry * sizeof(struct ipa_rt_rule_add);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -789,6 +854,16 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_rt_rule_after *)param)->
+			num_rules != pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_add_rt_rule_after *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa3_add_rt_rule_after(
 			(struct ipa_ioc_add_rt_rule_after *)param)) {
 
@@ -807,10 +882,11 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_mdfy_rt_rule) +
-		   ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules *
-		   sizeof(struct ipa_rt_rule_mdfy);
+		   pre_entry * sizeof(struct ipa_rt_rule_mdfy);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -820,6 +896,16 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_mdfy_rt_rule *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa3_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) {
 			retval = -EFAULT;
 			break;
@@ -836,10 +922,11 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_del_rt_rule *)header)->num_hdls;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_del_rt_rule) +
-		   ((struct ipa_ioc_del_rt_rule *)header)->num_hdls *
-		   sizeof(struct ipa_rt_rule_del);
+		   pre_entry * sizeof(struct ipa_rt_rule_del);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -849,6 +936,15 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa3_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
 			retval = -EFAULT;
 			break;
@@ -865,10 +961,11 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_add_flt_rule *)header)->num_rules;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_add_flt_rule) +
-		   ((struct ipa_ioc_add_flt_rule *)header)->num_rules *
-		   sizeof(struct ipa_flt_rule_add);
+		   pre_entry * sizeof(struct ipa_flt_rule_add);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -878,6 +975,16 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_add_flt_rule *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
 			retval = -EFAULT;
 			break;
@@ -895,10 +1002,12 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_add_flt_rule_after *)header)->
+			num_rules;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_add_flt_rule_after) +
-		   ((struct ipa_ioc_add_flt_rule_after *)header)->num_rules *
-		   sizeof(struct ipa_flt_rule_add);
+		   pre_entry * sizeof(struct ipa_flt_rule_add);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -908,6 +1017,16 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_flt_rule_after *)param)->
+			num_rules != pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_add_flt_rule_after *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa3_add_flt_rule_after(
 				(struct ipa_ioc_add_flt_rule_after *)param)) {
 			retval = -EFAULT;
@@ -925,10 +1044,11 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_del_flt_rule *)header)->num_hdls;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_del_flt_rule) +
-		   ((struct ipa_ioc_del_flt_rule *)header)->num_hdls *
-		   sizeof(struct ipa_flt_rule_del);
+		   pre_entry * sizeof(struct ipa_flt_rule_del);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -938,6 +1058,16 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_del_flt_rule *)param)->
+				num_hdls,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
 			retval = -EFAULT;
 			break;
@@ -954,10 +1084,11 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_mdfy_flt_rule) +
-		   ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules *
-		   sizeof(struct ipa_flt_rule_mdfy);
+		   pre_entry * sizeof(struct ipa_flt_rule_mdfy);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -967,6 +1098,16 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_mdfy_flt_rule *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa3_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) {
 			retval = -EFAULT;
 			break;
@@ -1080,9 +1221,10 @@
 			retval = -EFAULT;
 			break;
 		}
-
-		pyld_sz = sz + ((struct ipa_ioc_query_intf_tx_props *)
-				header)->num_tx_props *
+		pre_entry =
+			((struct ipa_ioc_query_intf_tx_props *)
+			header)->num_tx_props;
+		pyld_sz = sz + pre_entry *
 			sizeof(struct ipa_ioc_tx_intf_prop);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
@@ -1093,6 +1235,16 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
+			param)->num_tx_props
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_query_intf_tx_props *)
+				param)->num_tx_props, pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa3_query_intf_tx_props(
 				(struct ipa_ioc_query_intf_tx_props *)param)) {
 			retval = -1;
@@ -1115,9 +1267,10 @@
 			retval = -EFAULT;
 			break;
 		}
-
-		pyld_sz = sz + ((struct ipa_ioc_query_intf_rx_props *)
-				header)->num_rx_props *
+		pre_entry =
+			((struct ipa_ioc_query_intf_rx_props *)
+			header)->num_rx_props;
+		pyld_sz = sz + pre_entry *
 			sizeof(struct ipa_ioc_rx_intf_prop);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
@@ -1128,6 +1281,15 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
+			param)->num_rx_props != pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_query_intf_rx_props *)
+				param)->num_rx_props, pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa3_query_intf_rx_props(
 				(struct ipa_ioc_query_intf_rx_props *)param)) {
 			retval = -1;
@@ -1150,9 +1312,10 @@
 			retval = -EFAULT;
 			break;
 		}
-
-		pyld_sz = sz + ((struct ipa_ioc_query_intf_ext_props *)
-				header)->num_ext_props *
+		pre_entry =
+			((struct ipa_ioc_query_intf_ext_props *)
+			header)->num_ext_props;
+		pyld_sz = sz + pre_entry *
 			sizeof(struct ipa_ioc_ext_intf_prop);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
@@ -1163,6 +1326,15 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
+			param)->num_ext_props != pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_query_intf_ext_props *)
+				param)->num_ext_props, pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa3_query_intf_ext_props(
 				(struct ipa_ioc_query_intf_ext_props *)param)) {
 			retval = -1;
@@ -1179,8 +1351,10 @@
 			retval = -EFAULT;
 			break;
 		}
-		pyld_sz = sizeof(struct ipa_msg_meta) +
+		pre_entry =
 		   ((struct ipa_msg_meta *)header)->msg_len;
+		pyld_sz = sizeof(struct ipa_msg_meta) +
+		   pre_entry;
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -1190,6 +1364,15 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_msg_meta *)param)->msg_len
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_msg_meta *)param)->msg_len,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa3_pull_msg((struct ipa_msg_meta *)param,
 				 (char *)param + sizeof(struct ipa_msg_meta),
 				 ((struct ipa_msg_meta *)param)->msg_len) !=
@@ -1306,10 +1489,12 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_add_hdr_proc_ctx *)
+			header)->num_proc_ctxs;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_add_hdr_proc_ctx) +
-		   ((struct ipa_ioc_add_hdr_proc_ctx *)header)->num_proc_ctxs *
-		   sizeof(struct ipa_hdr_proc_ctx_add);
+		   pre_entry * sizeof(struct ipa_hdr_proc_ctx_add);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -1319,6 +1504,15 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
+			param)->num_proc_ctxs != pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_add_hdr_proc_ctx *)
+				param)->num_proc_ctxs, pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa3_add_hdr_proc_ctx(
 			(struct ipa_ioc_add_hdr_proc_ctx *)param)) {
 			retval = -EFAULT;
@@ -1335,10 +1529,11 @@
 			retval = -EFAULT;
 			break;
 		}
+		pre_entry =
+			((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls;
 		pyld_sz =
 		   sizeof(struct ipa_ioc_del_hdr_proc_ctx) +
-		   ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls *
-		   sizeof(struct ipa_hdr_proc_ctx_del);
+		   pre_entry * sizeof(struct ipa_hdr_proc_ctx_del);
 		param = kzalloc(pyld_sz, GFP_KERNEL);
 		if (!param) {
 			retval = -ENOMEM;
@@ -1348,6 +1543,16 @@
 			retval = -EFAULT;
 			break;
 		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
+			param)->num_hdls != pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_del_hdr_proc_ctx *)param)->
+				num_hdls,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
 		if (ipa3_del_hdr_proc_ctx(
 			(struct ipa_ioc_del_hdr_proc_ctx *)param)) {
 			retval = -EFAULT;
@@ -2032,7 +2237,7 @@
 		}
 	}
 
-	/* Will wait 150msecs for IPA tag process completion */
+	/* Will wait 500msecs for IPA tag process completion */
 	retval = ipa3_tag_process(desc, num_descs,
 		msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT));
 	if (retval) {
@@ -3892,6 +4097,53 @@
 	return count;
 }
 
+static int ipa3_tz_unlock_reg(struct ipa3_context *ipa3_ctx)
+{
+	int i, size, ret, resp;
+	struct tz_smmu_ipa_protect_region_iovec_s *ipa_tz_unlock_vec;
+	struct tz_smmu_ipa_protect_region_s cmd_buf;
+
+	if (ipa3_ctx && ipa3_ctx->ipa_tz_unlock_reg_num > 0) {
+		size = ipa3_ctx->ipa_tz_unlock_reg_num *
+			sizeof(struct tz_smmu_ipa_protect_region_iovec_s);
+		ipa_tz_unlock_vec = kzalloc(PAGE_ALIGN(size), GFP_KERNEL);
+		if (ipa_tz_unlock_vec == NULL)
+			return -ENOMEM;
+
+		for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) {
+			ipa_tz_unlock_vec[i].input_addr =
+				ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr ^
+				(ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr &
+				0xFFF);
+			ipa_tz_unlock_vec[i].output_addr =
+				ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr ^
+				(ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr &
+				0xFFF);
+			ipa_tz_unlock_vec[i].size =
+				ipa3_ctx->ipa_tz_unlock_reg[i].size;
+			ipa_tz_unlock_vec[i].attr = IPA_TZ_UNLOCK_ATTRIBUTE;
+		}
+
+		/* pass physical address of command buffer */
+		cmd_buf.iovec_buf = virt_to_phys((void *)ipa_tz_unlock_vec);
+		cmd_buf.size_bytes = size;
+
+		/* flush cache to DDR */
+		__cpuc_flush_dcache_area((void *)ipa_tz_unlock_vec, size);
+		outer_flush_range(cmd_buf.iovec_buf, cmd_buf.iovec_buf + size);
+
+		ret = scm_call(SCM_SVC_MP, TZ_MEM_PROTECT_REGION_ID, &cmd_buf,
+				sizeof(cmd_buf), &resp, sizeof(resp));
+		if (ret) {
+			IPAERR("scm call SCM_SVC_MP failed: %d\n", ret);
+			kfree(ipa_tz_unlock_vec);
+			return -EFAULT;
+		}
+		kfree(ipa_tz_unlock_vec);
+	}
+	return 0;
+}
+
 /**
 * ipa3_pre_init() - Initialize the IPA Driver.
 * This part contains all initialization which doesn't require IPA HW, such
@@ -3976,6 +4228,27 @@
 	ipa3_ctx->apply_rg10_wa = resource_p->apply_rg10_wa;
 	ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
 	ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
+	if (resource_p->ipa_tz_unlock_reg) {
+		ipa3_ctx->ipa_tz_unlock_reg_num =
+			resource_p->ipa_tz_unlock_reg_num;
+		ipa3_ctx->ipa_tz_unlock_reg = kcalloc(
+			ipa3_ctx->ipa_tz_unlock_reg_num,
+			sizeof(*ipa3_ctx->ipa_tz_unlock_reg),
+			GFP_KERNEL);
+		if (ipa3_ctx->ipa_tz_unlock_reg == NULL) {
+			result = -ENOMEM;
+			goto fail_tz_unlock_reg;
+		}
+		for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) {
+			ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr =
+				resource_p->ipa_tz_unlock_reg[i].reg_addr;
+			ipa3_ctx->ipa_tz_unlock_reg[i].size =
+				resource_p->ipa_tz_unlock_reg[i].size;
+		}
+	}
+
+	/* unlock registers for uc */
+	ipa3_tz_unlock_reg(ipa3_ctx);
 
 	/* default aggregation parameters */
 	ipa3_ctx->aggregation_type = IPA_MBIM_16;
@@ -4425,6 +4698,8 @@
 fail_bind:
 	kfree(ipa3_ctx->ctrl);
 fail_mem_ctrl:
+	kfree(ipa3_ctx->ipa_tz_unlock_reg);
+fail_tz_unlock_reg:
 	ipc_log_context_destroy(ipa3_ctx->logbuf);
 fail_logbuf:
 	kfree(ipa3_ctx);
@@ -4436,8 +4711,10 @@
 static int get_ipa_dts_configuration(struct platform_device *pdev,
 		struct ipa3_plat_drv_res *ipa_drv_res)
 {
-	int result;
+	int i, result, pos;
 	struct resource *resource;
+	u32 *ipa_tz_unlock_reg;
+	int elem_num;
 
 	/* initialize ipa3_res */
 	ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
@@ -4452,9 +4729,8 @@
 	ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
 	ipa_drv_res->apply_rg10_wa = false;
 	ipa_drv_res->gsi_ch20_wa = false;
-
-	smmu_info.disable_htw = of_property_read_bool(pdev->dev.of_node,
-			"qcom,smmu-disable-htw");
+	ipa_drv_res->ipa_tz_unlock_reg_num = 0;
+	ipa_drv_res->ipa_tz_unlock_reg = NULL;
 
 	/* Get IPA HW Version */
 	result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
@@ -4665,13 +4941,52 @@
 		ipa_drv_res->apply_rg10_wa
 		? "Needed" : "Not needed");
 
+	elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
+		"qcom,ipa-tz-unlock-reg", sizeof(u32));
+
+	if (elem_num > 0 && elem_num % 2 == 0) {
+		ipa_drv_res->ipa_tz_unlock_reg_num = elem_num / 2;
+
+		ipa_tz_unlock_reg = kcalloc(elem_num, sizeof(u32), GFP_KERNEL);
+		if (ipa_tz_unlock_reg == NULL)
+			return -ENOMEM;
+
+		ipa_drv_res->ipa_tz_unlock_reg = kcalloc(
+			ipa_drv_res->ipa_tz_unlock_reg_num,
+			sizeof(*ipa_drv_res->ipa_tz_unlock_reg),
+			GFP_KERNEL);
+		if (ipa_drv_res->ipa_tz_unlock_reg == NULL) {
+			kfree(ipa_tz_unlock_reg);
+			return -ENOMEM;
+		}
+
+		if (of_property_read_u32_array(pdev->dev.of_node,
+			"qcom,ipa-tz-unlock-reg", ipa_tz_unlock_reg,
+			elem_num)) {
+			IPAERR("failed to read register addresses\n");
+			kfree(ipa_tz_unlock_reg);
+			kfree(ipa_drv_res->ipa_tz_unlock_reg);
+			return -EFAULT;
+		}
+
+		pos = 0;
+		for (i = 0; i < ipa_drv_res->ipa_tz_unlock_reg_num; i++) {
+			ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr =
+				ipa_tz_unlock_reg[pos++];
+			ipa_drv_res->ipa_tz_unlock_reg[i].size =
+				ipa_tz_unlock_reg[pos++];
+			IPADBG("tz unlock reg %d: addr 0x%pa size %d\n", i,
+				&ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr,
+				ipa_drv_res->ipa_tz_unlock_reg[i].size);
+		}
+		kfree(ipa_tz_unlock_reg);
+	}
 	return 0;
 }
 
 static int ipa_smmu_wlan_cb_probe(struct device *dev)
 {
 	struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx();
-	int disable_htw = 1;
 	int atomic_ctx = 1;
 	int fast = 1;
 	int bypass = 1;
@@ -4691,17 +5006,6 @@
 	}
 	cb->valid = true;
 
-	if (smmu_info.disable_htw) {
-		ret = iommu_domain_set_attr(cb->iommu,
-			DOMAIN_ATTR_COHERENT_HTW_DISABLE,
-			&disable_htw);
-		if (ret) {
-			IPAERR("couldn't disable coherent HTW\n");
-			cb->valid = false;
-			return -EIO;
-		}
-	}
-
 	if (smmu_info.s1_bypass) {
 		if (iommu_domain_set_attr(cb->iommu,
 					DOMAIN_ATTR_S1_BYPASS,
@@ -4774,7 +5078,6 @@
 static int ipa_smmu_uc_cb_probe(struct device *dev)
 {
 	struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
-	int disable_htw = 1;
 	int atomic_ctx = 1;
 	int bypass = 1;
 	int fast = 1;
@@ -4820,18 +5123,6 @@
 	IPADBG("SMMU mapping created\n");
 	cb->valid = true;
 
-	IPADBG("UC CB PROBE sub pdev=%p disable htw\n", dev);
-	if (smmu_info.disable_htw) {
-		if (iommu_domain_set_attr(cb->mapping->domain,
-				DOMAIN_ATTR_COHERENT_HTW_DISABLE,
-				 &disable_htw)) {
-			IPAERR("couldn't disable coherent HTW\n");
-			arm_iommu_release_mapping(cb->mapping);
-			cb->valid = false;
-			return -EIO;
-		}
-	}
-
 	IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
 	if (smmu_info.s1_bypass) {
 		if (iommu_domain_set_attr(cb->mapping->domain,
@@ -4886,7 +5177,6 @@
 {
 	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
 	int result;
-	int disable_htw = 1;
 	int atomic_ctx = 1;
 	int fast = 1;
 	int bypass = 1;
@@ -4934,17 +5224,6 @@
 	IPADBG("SMMU mapping created\n");
 	cb->valid = true;
 
-	if (smmu_info.disable_htw) {
-		if (iommu_domain_set_attr(cb->mapping->domain,
-				DOMAIN_ATTR_COHERENT_HTW_DISABLE,
-				 &disable_htw)) {
-			IPAERR("couldn't disable coherent HTW\n");
-			arm_iommu_release_mapping(cb->mapping);
-			cb->valid = false;
-			return -EIO;
-		}
-		IPADBG("SMMU disable HTW\n");
-	}
 	if (smmu_info.s1_bypass) {
 		if (iommu_domain_set_attr(cb->mapping->domain,
 				DOMAIN_ATTR_S1_BYPASS,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 2368797..5912d3f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -1478,6 +1478,7 @@
 	u16 enable, tbl_entry, flag;
 	u32 no_entrys = 0;
 
+	mutex_lock(&ipa3_ctx->nat_mem.lock);
 	value = ipa3_ctx->nat_mem.public_ip_addr;
 	pr_err(
 				"Table IP Address:%d.%d.%d.%d\n",
@@ -1631,6 +1632,7 @@
 		}
 	}
 	pr_err("Current No. Nat Entries: %d\n", no_entrys);
+	mutex_unlock(&ipa3_ctx->nat_mem.lock);
 
 	return 0;
 }
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index ec3334c..7ac26e6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -778,10 +778,28 @@
  */
 int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr)
 {
+	return ipa3_send_cmd_timeout(num_desc, descr, 0);
+}
+
+/**
+ * ipa3_send_cmd_timeout - send immediate commands with limited time
+ *	waiting for ACK from IPA HW
+ * @num_desc:	number of descriptors within the desc struct
+ * @descr:	descriptor structure
+ * @timeout:	millisecond to wait till get ACK from IPA HW
+ *
+ * Function will block till command gets ACK from IPA HW or timeout.
+ * Caller needs to free any resources it allocated after function returns
+ * The callback in ipa3_desc should not be set by the caller
+ * for this function.
+ */
+int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout)
+{
 	struct ipa3_desc *desc;
 	int i, result = 0;
 	struct ipa3_sys_context *sys;
 	int ep_idx;
+	int completed;
 
 	for (i = 0; i < num_desc; i++)
 		IPADBG("sending imm cmd %d\n", descr[i].opcode);
@@ -808,7 +826,14 @@
 			result = -EFAULT;
 			goto bail;
 		}
-		wait_for_completion(&descr->xfer_done);
+		if (timeout) {
+			completed = wait_for_completion_timeout(
+				&descr->xfer_done, msecs_to_jiffies(timeout));
+			if (!completed)
+				IPADBG("timeout waiting for imm-cmd ACK\n");
+		} else {
+			wait_for_completion(&descr->xfer_done);
+		}
 	} else {
 		desc = &descr[num_desc - 1];
 		init_completion(&desc->xfer_done);
@@ -823,7 +848,15 @@
 			result = -EFAULT;
 			goto bail;
 		}
-		wait_for_completion(&desc->xfer_done);
+		if (timeout) {
+			completed = wait_for_completion_timeout(
+				&desc->xfer_done, msecs_to_jiffies(timeout));
+			if (!completed)
+				IPADBG("timeout waiting for imm-cmd ACK\n");
+		} else {
+			wait_for_completion(&desc->xfer_done);
+		}
+
 	}
 
 bail:
@@ -3181,22 +3214,20 @@
 					IPA_CLIENT_APPS_WAN_CONS) {
 				sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
 				sys->free_rx_wrapper = ipa3_free_rx_wrapper;
-				if (in->napi_enabled) {
+				sys->rx_pool_sz = ipa3_ctx->wan_rx_ring_size;
+				if (nr_cpu_ids > 1) {
 					sys->repl_hdlr =
-					   ipa3_replenish_rx_cache_recycle;
+					   ipa3_fast_replenish_rx_cache;
+				} else {
+					sys->repl_hdlr =
+					   ipa3_replenish_rx_cache;
+				}
+				if (in->napi_enabled)
 					sys->rx_pool_sz =
 					   IPA_WAN_NAPI_CONS_RX_POOL_SZ;
-				} else {
-					if (nr_cpu_ids > 1) {
-						sys->repl_hdlr =
-						   ipa3_fast_replenish_rx_cache;
-					} else {
-						sys->repl_hdlr =
-						   ipa3_replenish_rx_cache;
-					}
-					sys->rx_pool_sz =
-					   ipa3_ctx->wan_rx_ring_size;
-				}
+				if (in->napi_enabled && in->recycle_enabled)
+					sys->repl_hdlr =
+					 ipa3_replenish_rx_cache_recycle;
 				in->ipa_ep_cfg.aggr.aggr_sw_eof_active
 					= true;
 				if (ipa3_ctx->
@@ -3837,6 +3868,7 @@
 	union __packed gsi_channel_scratch ch_scratch;
 	struct ipa_gsi_ep_config *gsi_ep_info;
 	dma_addr_t dma_addr;
+	dma_addr_t evt_dma_addr;
 	int result;
 
 	if (!ep) {
@@ -3845,13 +3877,13 @@
 	}
 
 	ep->gsi_evt_ring_hdl = ~0;
+	memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
 	/*
 	 * allocate event ring for all interrupt-policy
 	 * pipes and IPA consumers pipes
 	 */
 	if (ep->sys->policy != IPA_POLICY_NOINTR_MODE ||
 	     IPA_CLIENT_IS_CONS(ep->client)) {
-		memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
 		gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
 		gsi_evt_ring_props.intr = GSI_INTR_IRQ;
 		gsi_evt_ring_props.re_size =
@@ -3860,8 +3892,13 @@
 		gsi_evt_ring_props.ring_len = IPA_GSI_EVT_RING_LEN;
 		gsi_evt_ring_props.ring_base_vaddr =
 			dma_alloc_coherent(ipa3_ctx->pdev, IPA_GSI_EVT_RING_LEN,
-			&dma_addr, 0);
-		gsi_evt_ring_props.ring_base_addr = dma_addr;
+			&evt_dma_addr, GFP_KERNEL);
+		if (!gsi_evt_ring_props.ring_base_vaddr) {
+			IPAERR("fail to dma alloc %u bytes\n",
+				IPA_GSI_EVT_RING_LEN);
+			return -ENOMEM;
+		}
+		gsi_evt_ring_props.ring_base_addr = evt_dma_addr;
 
 		/* copy mem info */
 		ep->gsi_mem_info.evt_ring_len = gsi_evt_ring_props.ring_len;
@@ -3896,7 +3933,7 @@
 	if (!gsi_ep_info) {
 		IPAERR("Invalid ep number\n");
 		result = -EINVAL;
-		goto fail_alloc_evt_ring;
+		goto fail_get_gsi_ep_info;
 	} else
 		gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num;
 
@@ -3915,7 +3952,12 @@
 		gsi_channel_props.ring_len = 2 * in->desc_fifo_sz;
 	gsi_channel_props.ring_base_vaddr =
 		dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
-			&dma_addr, 0);
+			&dma_addr, GFP_KERNEL);
+	if (!gsi_channel_props.ring_base_vaddr) {
+		IPAERR("fail to dma alloc %u bytes\n",
+			gsi_channel_props.ring_len);
+		goto fail_alloc_channel_ring;
+	}
 	gsi_channel_props.ring_base_addr = dma_addr;
 
 	/* copy mem info */
@@ -3951,7 +3993,7 @@
 	result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
 	if (result != GSI_STATUS_SUCCESS) {
 		IPAERR("failed to write scratch %d\n", result);
-		goto fail_start_channel;
+		goto fail_write_channel_scratch;
 	}
 
 	result = gsi_start_channel(ep->gsi_chan_hdl);
@@ -3963,17 +4005,25 @@
 	return 0;
 
 fail_start_channel:
+fail_write_channel_scratch:
 	if (gsi_dealloc_channel(ep->gsi_chan_hdl)
 		!= GSI_STATUS_SUCCESS) {
 		IPAERR("Failed to dealloc GSI chan.\n");
 		BUG();
 	}
 fail_alloc_channel:
+	dma_free_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
+			gsi_channel_props.ring_base_vaddr, dma_addr);
+fail_alloc_channel_ring:
+fail_get_gsi_ep_info:
 	if (ep->gsi_evt_ring_hdl != ~0) {
 		gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
 		ep->gsi_evt_ring_hdl = ~0;
 	}
 fail_alloc_evt_ring:
+	if (gsi_evt_ring_props.ring_base_vaddr)
+		dma_free_coherent(ipa3_ctx->pdev, IPA_GSI_EVT_RING_LEN,
+			gsi_evt_ring_props.ring_base_vaddr, evt_dma_addr);
 	IPAERR("Return with err: %d\n", result);
 	return result;
 }
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index e7af53f..340bacab0 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -136,7 +136,7 @@
 			IPAERR("failed to calculate HW FLT rule size\n");
 			return -EPERM;
 		}
-		IPADBG("pipe %d rule_id (handle) %u hw_len %d priority %u\n",
+		IPADBG_LOW("pipe %d rule_id(handle) %u hw_len %d priority %u\n",
 			pipe_idx, entry->rule_id, entry->hw_len, entry->prio);
 
 		if (entry->rule.hashable)
@@ -1372,18 +1372,18 @@
 
 	mutex_lock(&ipa3_ctx->lock);
 	tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
-	tbl->sticky_rear = true;
 	rule.action = IPA_PASS_TO_EXCEPTION;
-	__ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, false,
+	__ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, true,
 			&ep->dflt_flt4_rule_hdl);
 	ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
+	tbl->sticky_rear = true;
 
 	tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
-	tbl->sticky_rear = true;
 	rule.action = IPA_PASS_TO_EXCEPTION;
-	__ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, false,
+	__ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, true,
 			&ep->dflt_flt6_rule_hdl);
 	ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
+	tbl->sticky_rear = true;
 	mutex_unlock(&ipa3_ctx->lock);
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index da52b26..6274579 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -326,7 +326,7 @@
 	int needed_len;
 	int mem_size;
 
-	IPADBG_LOW("processing type %d hdr_hdl %d\n",
+	IPADBG_LOW("Add processing type %d hdr_hdl %d\n",
 		proc_ctx->type, proc_ctx->hdr_hdl);
 
 	if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
@@ -335,10 +335,17 @@
 	}
 
 	hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl);
-	if (!hdr_entry || (hdr_entry->cookie != IPA_COOKIE)) {
+	if (!hdr_entry) {
 		IPAERR("hdr_hdl is invalid\n");
 		return -EINVAL;
 	}
+	if (hdr_entry->cookie != IPA_COOKIE) {
+		IPAERR("Invalid header cookie %u\n", hdr_entry->cookie);
+		WARN_ON(1);
+		return -EINVAL;
+	}
+	IPADBG("Associated header is name=%s is_hdr_proc_ctx=%d\n",
+		hdr_entry->name, hdr_entry->is_hdr_proc_ctx);
 
 	entry = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_cache, GFP_KERNEL);
 	if (!entry) {
@@ -403,7 +410,7 @@
 	entry->offset_entry = offset;
 	list_add(&entry->link, &htbl->head_proc_ctx_entry_list);
 	htbl->proc_ctx_cnt++;
-	IPADBG_LOW("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
+	IPADBG("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
 			htbl->proc_ctx_cnt, offset->offset);
 
 	id = ipa3_id_alloc(entry);
@@ -520,12 +527,12 @@
 	list_add(&entry->link, &htbl->head_hdr_entry_list);
 	htbl->hdr_cnt++;
 	if (entry->is_hdr_proc_ctx)
-		IPADBG_LOW("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
+		IPADBG("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
 			hdr->hdr_len,
 			htbl->hdr_cnt,
 			&entry->phys_base);
 	else
-		IPADBG_LOW("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
+		IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
 			hdr->hdr_len,
 			htbl->hdr_cnt,
 			entry->offset_entry->offset);
@@ -580,7 +587,7 @@
 		return -EINVAL;
 	}
 
-	IPADBG("del ctx proc cnt=%d ofst=%d\n",
+	IPADBG("del proc ctx cnt=%d ofst=%d\n",
 		htbl->proc_ctx_cnt, entry->offset_entry->offset);
 
 	if (--entry->ref_cnt) {
@@ -624,11 +631,12 @@
 	}
 
 	if (entry->is_hdr_proc_ctx)
-		IPADBG("del hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
+		IPADBG("del hdr of len=%d hdr_cnt=%d phys_base=%pa\n",
 			entry->hdr_len, htbl->hdr_cnt, &entry->phys_base);
 	else
-		IPADBG("del hdr of sz=%d hdr_cnt=%d ofst=%d\n", entry->hdr_len,
-			htbl->hdr_cnt, entry->offset_entry->offset);
+		IPADBG("del hdr of len=%d hdr_cnt=%d ofst=%d\n",
+			entry->hdr_len, htbl->hdr_cnt,
+			entry->offset_entry->offset);
 
 	if (--entry->ref_cnt) {
 		IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 23fb2ae..db1f52c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -43,8 +43,6 @@
 #define MTU_BYTE 1500
 
 #define IPA3_MAX_NUM_PIPES 31
-#define IPA_WAN_CONS_DESC_FIFO_SZ 0x5E80
-#define IPA_WAN_NAPI_CONS_RX_POOL_SZ 3000
 #define IPA_SYS_DESC_FIFO_SZ 0x800
 #define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000
 #define IPA_LAN_RX_HEADER_LENGTH (2)
@@ -55,6 +53,8 @@
 #define IPA_UC_FINISH_MAX 6
 #define IPA_UC_WAIT_MIN_SLEEP 1000
 #define IPA_UC_WAII_MAX_SLEEP 1200
+#define IPA_WAN_NAPI_CONS_RX_POOL_SZ (IPA_GENERIC_RX_POOL_SZ*3)
+#define IPA_WAN_CONS_DESC_FIFO_SZ (IPA_SYS_DESC_FIFO_SZ*3)
 
 #define IPA_MAX_STATUS_STAT_NUM 30
 
@@ -481,7 +481,7 @@
 
 struct ipa3_status_stats {
 	struct ipahal_pkt_status status[IPA_MAX_STATUS_STAT_NUM];
-	int curr;
+	unsigned int curr;
 };
 
 /**
@@ -1015,6 +1015,11 @@
 	void *user_data;
 };
 
+struct ipa_tz_unlock_reg_info {
+	u64 reg_addr;
+	u32 size;
+};
+
 /**
  * struct ipa3_context - IPA context
  * @class: pointer to the struct class
@@ -1232,6 +1237,8 @@
 	struct completion init_completion_obj;
 	struct completion uc_loaded_completion_obj;
 	struct ipa3_smp2p_info smp2p_info;
+	u32 ipa_tz_unlock_reg_num;
+	struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg;
 };
 
 /**
@@ -1270,6 +1277,8 @@
 	bool apply_rg10_wa;
 	bool gsi_ch20_wa;
 	bool tethered_flow_control;
+	u32 ipa_tz_unlock_reg_num;
+	struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg;
 };
 
 /**
@@ -1830,6 +1839,7 @@
 int ipa3_controller_static_bind(struct ipa3_controller *controller,
 		enum ipa_hw_type ipa_hw_type);
 int ipa3_cfg_route(struct ipahal_reg_route *route);
+int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout);
 int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr);
 int ipa3_cfg_filter(u32 disable);
 int ipa3_pipe_mem_init(u32 start_ofst, u32 size);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
index 32c5004..b9f5755 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
@@ -358,6 +358,11 @@
 	return result;
 }
 
+static void ipa3_send_msg_free(void *buff, u32 len, u32 type)
+{
+	kfree(buff);
+}
+
 /**
  * ipa3_send_msg() - Send "message" from kernel client to IPA driver
  * @meta: [in] message meta-data
@@ -377,6 +382,7 @@
 		  ipa_msg_free_fn callback)
 {
 	struct ipa3_push_msg *msg;
+	void *data = NULL;
 
 	if (meta == NULL || (buff == NULL && callback != NULL) ||
 	    (buff != NULL && callback == NULL)) {
@@ -397,8 +403,17 @@
 	}
 
 	msg->meta = *meta;
-	msg->buff = buff;
-	msg->callback = callback;
+	if (meta->msg_len > 0 && buff) {
+		data = kmalloc(meta->msg_len, GFP_KERNEL);
+		if (data == NULL) {
+			IPAERR("fail to alloc data container\n");
+			kfree(msg);
+			return -ENOMEM;
+		}
+		memcpy(data, buff, meta->msg_len);
+		msg->buff = data;
+		msg->callback = ipa3_send_msg_free;
+	}
 
 	mutex_lock(&ipa3_ctx->msg_lock);
 	list_add_tail(&msg->link, &ipa3_ctx->msg_list);
@@ -406,6 +421,8 @@
 	IPA_STATS_INC_CNT(ipa3_ctx->stats.msg_w[meta->msg_type]);
 
 	wake_up(&ipa3_ctx->msg_waitq);
+	if (buff)
+		callback(buff, meta->msg_len, meta->msg_type);
 
 	return 0;
 }
@@ -511,12 +528,12 @@
 	start = buf;
 
 	while (1) {
+		mutex_lock(&ipa3_ctx->msg_lock);
+		locked = 1;
 		prepare_to_wait(&ipa3_ctx->msg_waitq,
 				&wait,
 				TASK_INTERRUPTIBLE);
 
-		mutex_lock(&ipa3_ctx->msg_lock);
-		locked = 1;
 		if (!list_empty(&ipa3_ctx->msg_list)) {
 			msg = list_first_entry(&ipa3_ctx->msg_list,
 					struct ipa3_push_msg, link);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 719eb2d..b49fb32 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -572,6 +572,8 @@
 	resp_desc.ei_array = ipa3_init_modem_driver_resp_msg_data_v01_ei;
 
 	pr_info("Sending QMI_IPA_INIT_MODEM_DRIVER_REQ_V01\n");
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
 	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
 			&resp_desc, &resp, sizeof(resp),
 			QMI_SEND_REQ_TIMEOUT_MS);
@@ -613,6 +615,8 @@
 	resp_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01;
 	resp_desc.ei_array = ipa3_install_fltr_rule_resp_msg_data_v01_ei;
 
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
 	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc,
 			req,
 			sizeof(struct ipa_install_fltr_rule_req_msg_v01),
@@ -650,6 +654,8 @@
 	resp_desc.ei_array =
 		ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei;
 
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
 	rc = qmi_send_req_wait(ipa_q6_clnt,
 			&req_desc,
 			req,
@@ -693,7 +699,8 @@
 	resp_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
 	resp_desc.ei_array =
 		ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei;
-
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
 	rc = qmi_send_req_wait(ipa_q6_clnt,
 			&req_desc,
 			req,
@@ -750,6 +757,8 @@
 	resp_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01;
 	resp_desc.ei_array = ipa3_fltr_installed_notif_resp_msg_data_v01_ei;
 
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
 	rc = qmi_send_req_wait(ipa_q6_clnt,
 			&req_desc,
 			req,
@@ -1160,6 +1169,8 @@
 
 	IPAWANDBG_LOW("Sending QMI_IPA_GET_DATA_STATS_REQ_V01\n");
 
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
 	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
 			sizeof(struct ipa_get_data_stats_req_msg_v01),
 			&resp_desc, resp,
@@ -1189,6 +1200,8 @@
 
 	IPAWANDBG_LOW("Sending QMI_IPA_GET_APN_DATA_STATS_REQ_V01\n");
 
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
 	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
 			sizeof(struct ipa_get_apn_data_stats_req_msg_v01),
 			&resp_desc, resp,
@@ -1220,7 +1233,8 @@
 	resp_desc.ei_array = ipa3_set_data_usage_quota_resp_msg_data_v01_ei;
 
 	IPAWANDBG_LOW("Sending QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01\n");
-
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
 	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
 			sizeof(struct ipa_set_data_usage_quota_req_msg_v01),
 			&resp_desc, &resp, sizeof(resp),
@@ -1254,7 +1268,8 @@
 	resp_desc.ei_array = ipa3_stop_data_usage_quota_resp_msg_data_v01_ei;
 
 	IPAWANDBG_LOW("Sending QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01\n");
-
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
 	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
 		&resp_desc, &resp, sizeof(resp),
 		QMI_SEND_STATS_REQ_TIMEOUT_MS);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index 8930d92..273877c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -291,7 +291,7 @@
 			return -EPERM;
 		}
 
-		IPADBG("RT rule id (handle) %d hw_len %u priority %u\n",
+		IPADBG_LOW("RT rule id (handle) %d hw_len %u priority %u\n",
 			entry->id, entry->hw_len, entry->prio);
 
 		if (entry->rule.hashable)
@@ -644,23 +644,23 @@
 		goto fail_imm_cmd_construct;
 	}
 
-	IPADBG("Hashable HEAD\n");
+	IPADBG_LOW("Hashable HEAD\n");
 	IPA_DUMP_BUFF(alloc_params.hash_hdr.base,
 		alloc_params.hash_hdr.phys_base, alloc_params.hash_hdr.size);
 
-	IPADBG("Non-Hashable HEAD\n");
+	IPADBG_LOW("Non-Hashable HEAD\n");
 	IPA_DUMP_BUFF(alloc_params.nhash_hdr.base,
 		alloc_params.nhash_hdr.phys_base, alloc_params.nhash_hdr.size);
 
 	if (alloc_params.hash_bdy.size) {
-		IPADBG("Hashable BODY\n");
+		IPADBG_LOW("Hashable BODY\n");
 		IPA_DUMP_BUFF(alloc_params.hash_bdy.base,
 			alloc_params.hash_bdy.phys_base,
 			alloc_params.hash_bdy.size);
 	}
 
 	if (alloc_params.nhash_bdy.size) {
-		IPADBG("Non-Hashable BODY\n");
+		IPADBG_LOW("Non-Hashable BODY\n");
 		IPA_DUMP_BUFF(alloc_params.nhash_bdy.base,
 			alloc_params.nhash_bdy.phys_base,
 			alloc_params.nhash_bdy.size);
@@ -1212,8 +1212,9 @@
 		__ipa3_release_hdr_proc_ctx(entry->proc_ctx->id);
 	list_del(&entry->link);
 	entry->tbl->rule_cnt--;
-	IPADBG("del rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n",
-		entry->tbl->idx, entry->tbl->rule_cnt, entry->rule_id);
+	IPADBG("del rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n ref_cnt=%u",
+		entry->tbl->idx, entry->tbl->rule_cnt,
+		entry->rule_id, entry->tbl->ref_cnt);
 	idr_remove(&entry->tbl->rule_ids, entry->rule_id);
 	if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
 		if (__ipa_del_rt_tbl(entry->tbl))
@@ -1490,6 +1491,8 @@
 
 	entry->ref_cnt--;
 	if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
+		IPADBG("zero ref_cnt, delete rt tbl (idx=%u)\n",
+			entry->idx);
 		if (__ipa_del_rt_tbl(entry))
 			IPAERR("fail to del RT tbl\n");
 		/* commit for put */
@@ -1677,7 +1680,7 @@
 	u8 *rule_addr;
 	int rule_idx;
 
-	IPADBG("tbl_idx=%d ip_type=%d hashable=%d entry=0x%p num_entry=0x%p\n",
+	IPADBG_LOW("tbl_idx=%d ip_t=%d hashable=%d entry=0x%p num_entry=0x%p\n",
 		tbl_idx, ip_type, hashable, entry, num_entry);
 
 	if (ip_type == IPA_IP_v4 && tbl_idx >= IPA_MEM_PART(v4_rt_num_index)) {
@@ -1718,7 +1721,7 @@
 				IPA_MEM_PART(v6_rt_nhash_ofst);
 	}
 
-	IPADBG("hdr_base_ofst=0x%llx\n", hdr_base_ofst);
+	IPADBG_LOW("hdr_base_ofst=0x%llx\n", hdr_base_ofst);
 
 	res = ipahal_fltrt_read_addr_from_hdr(ipa_sram_mmio + hdr_base_ofst,
 		tbl_idx, &tbl_addr, &is_sys);
@@ -1726,7 +1729,7 @@
 		IPAERR("failed to read table address from header structure\n");
 		goto bail;
 	}
-	IPADBG("rt tbl %d: tbl_addr=0x%llx is_sys=%d\n",
+	IPADBG_LOW("rt tbl %d: tbl_addr=0x%llx is_sys=%d\n",
 		tbl_idx, tbl_addr, is_sys);
 	if (!tbl_addr) {
 		IPAERR("invalid rt tbl addr\n");
@@ -1762,7 +1765,7 @@
 		rule_addr = ipa_sram_mmio + hdr_base_ofst + tbl_addr;
 	}
 
-	IPADBG("First rule addr 0x%p\n", rule_addr);
+	IPADBG_LOW("First rule addr 0x%p\n", rule_addr);
 
 	if (!rule_addr) {
 		/* Modem table in system memory or empty table */
@@ -1778,7 +1781,7 @@
 			goto bail;
 		}
 
-		IPADBG("rule_size=%d\n", entry[rule_idx].rule_size);
+		IPADBG_LOW("rule_size=%d\n", entry[rule_idx].rule_size);
 		if (!entry[rule_idx].rule_size)
 			break;
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index c3f3cfa..15449b4 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -47,6 +47,8 @@
 #define IPA_EOT_COAL_GRAN_MIN (1)
 #define IPA_EOT_COAL_GRAN_MAX (16)
 
+#define IPA_DMA_TASK_FOR_GSI_TIMEOUT_MSEC (15)
+
 #define IPA_AGGR_BYTE_LIMIT (\
 		IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK >> \
 		IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT)
@@ -101,7 +103,7 @@
 #define IPA_GROUP_DPL		IPA_GROUP_DL
 #define IPA_GROUP_DIAG		(2)
 #define IPA_GROUP_DMA		(3)
-#define IPA_GROUP_IMM_CMD	IPA_GROUP_DMA
+#define IPA_GROUP_IMM_CMD	IPA_GROUP_UL
 #define IPA_GROUP_Q6ZIP		(4)
 #define IPA_GROUP_Q6ZIP_GENERAL	IPA_GROUP_Q6ZIP
 #define IPA_GROUP_UC_RX_Q	(5)
@@ -3474,7 +3476,8 @@
 	desc.type = IPA_IMM_CMD_DESC;
 
 	IPADBG("sending 1B packet to IPA\n");
-	if (ipa3_send_cmd(1, &desc)) {
+	if (ipa3_send_cmd_timeout(1, &desc,
+		IPA_DMA_TASK_FOR_GSI_TIMEOUT_MSEC)) {
 		IPAERR("ipa3_send_cmd failed\n");
 		return -EFAULT;
 	}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index c88b104..e39874e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -1222,9 +1222,9 @@
 
 	if (!base ||
 		!hdr_len ||
-		(!phys_base && !hdr_base_addr) ||
-		!hdr_base_addr ||
-		((is_hdr_proc_ctx == false) && !offset_entry)) {
+		(is_hdr_proc_ctx && !phys_base) ||
+		(!is_hdr_proc_ctx && !offset_entry) ||
+		(!is_hdr_proc_ctx && !hdr_base_addr)) {
 		IPAHAL_ERR(
 			"invalid input: hdr_len:%u phys_base:%pad hdr_base_addr:%u is_hdr_proc_ctx:%d offset_entry:%pK\n"
 			, hdr_len, &phys_base, hdr_base_addr
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index e355d9d..67b3cb3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -115,7 +115,7 @@
 
 static void ipa_fltrt_parse_tbl_addr(u64 hwaddr, u64 *addr, bool *is_sys)
 {
-	IPAHAL_DBG("Parsing hwaddr 0x%llx\n", hwaddr);
+	IPAHAL_DBG_LOW("Parsing hwaddr 0x%llx\n", hwaddr);
 
 	*is_sys = !(hwaddr & 0x1);
 	hwaddr &= (~0ULL - 1);
@@ -254,7 +254,7 @@
 	}
 	rule_hdr->u.hdr.en_rule = en_rule;
 
-	IPAHAL_DBG("en_rule 0x%x\n", en_rule);
+	IPAHAL_DBG_LOW("en_rule 0x%x\n", en_rule);
 	ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
 
 	if (*hw_len == 0) {
@@ -327,12 +327,12 @@
 	}
 	rule_hdr->u.hdr.en_rule = en_rule;
 
-	IPAHAL_DBG("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n",
+	IPAHAL_DBG_LOW("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n",
 		en_rule,
 		rule_hdr->u.hdr.action,
 		rule_hdr->u.hdr.rt_tbl_idx,
 		rule_hdr->u.hdr.retain_hdr);
-	IPAHAL_DBG("priority=%d, rule_id=%d\n",
+	IPAHAL_DBG_LOW("priority=%d, rule_id=%d\n",
 		rule_hdr->u.hdr.priority,
 		rule_hdr->u.hdr.rule_id);
 
@@ -1152,25 +1152,25 @@
 	 * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
 	 */
 	if (attrib->attrib_mask == 0) {
-		IPAHAL_DBG("building default rule\n");
+		IPAHAL_DBG_LOW("building default rule\n");
 		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(ipa3_0_ofst_meq32[0]);
 		extra_wrd_i = ipa_write_8(0, extra_wrd_i);  /* offset */
 		rest_wrd_i = ipa_write_32(0, rest_wrd_i);   /* mask */
 		rest_wrd_i = ipa_write_32(0, rest_wrd_i);   /* val */
 	}
 
-	IPAHAL_DBG("extra_word_1 0x%llx\n", *(u64 *)extra_wrd_start);
-	IPAHAL_DBG("extra_word_2 0x%llx\n",
+	IPAHAL_DBG_LOW("extra_word_1 0x%llx\n", *(u64 *)extra_wrd_start);
+	IPAHAL_DBG_LOW("extra_word_2 0x%llx\n",
 		*(u64 *)(extra_wrd_start + IPA3_0_HW_TBL_WIDTH));
 
 	extra_wrd_i = ipa_pad_to_64(extra_wrd_i);
 	sz = extra_wrd_i - extra_wrd_start;
-	IPAHAL_DBG("extra words params sz %d\n", sz);
+	IPAHAL_DBG_LOW("extra words params sz %d\n", sz);
 	*buf = ipa_fltrt_copy_mem(extra_wrd_start, *buf, sz);
 
 	rest_wrd_i = ipa_pad_to_64(rest_wrd_i);
 	sz = rest_wrd_i - rest_wrd_start;
-	IPAHAL_DBG("non extra words params sz %d\n", sz);
+	IPAHAL_DBG_LOW("non extra words params sz %d\n", sz);
 	*buf = ipa_fltrt_copy_mem(rest_wrd_start, *buf, sz);
 
 fail_err_check:
@@ -1208,7 +1208,7 @@
 	if (attrib->ihl_offset_eq_16_present)
 		num++;
 
-	IPAHAL_DBG("extra bytes number %d\n", num);
+	IPAHAL_DBG_LOW("extra bytes number %d\n", num);
 
 	return num;
 }
@@ -2024,7 +2024,7 @@
 
 	eq_bitmap = atrb->rule_eq_bitmap;
 
-	IPAHAL_DBG("eq_bitmap=0x%x\n", eq_bitmap);
+	IPAHAL_DBG_LOW("eq_bitmap=0x%x\n", eq_bitmap);
 
 	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ))
 		atrb->tos_eq_present = true;
@@ -2080,7 +2080,7 @@
 		extra = &dummy_extra_wrd;
 		rest = addr + hdr_sz;
 	}
-	IPAHAL_DBG("addr=0x%p extra=0x%p rest=0x%p\n", addr, extra, rest);
+	IPAHAL_DBG_LOW("addr=0x%p extra=0x%p rest=0x%p\n", addr, extra, rest);
 
 	if (atrb->tos_eq_present)
 		atrb->tos_eq = *extra++;
@@ -2182,13 +2182,13 @@
 		rest += 4;
 	}
 
-	IPAHAL_DBG("before rule alignment rest=0x%p\n", rest);
+	IPAHAL_DBG_LOW("before rule alignment rest=0x%p\n", rest);
 	rest = (u8 *)(((unsigned long)rest + IPA3_0_HW_RULE_START_ALIGNMENT) &
 		~IPA3_0_HW_RULE_START_ALIGNMENT);
-	IPAHAL_DBG("after rule alignment  rest=0x%p\n", rest);
+	IPAHAL_DBG_LOW("after rule alignment  rest=0x%p\n", rest);
 
 	*rule_size = rest - addr;
-	IPAHAL_DBG("rule_size=0x%x\n", *rule_size);
+	IPAHAL_DBG_LOW("rule_size=0x%x\n", *rule_size);
 
 	return 0;
 }
@@ -2198,12 +2198,12 @@
 	struct ipa3_0_rt_rule_hw_hdr *rule_hdr;
 	struct ipa_ipfltri_rule_eq *atrb;
 
-	IPAHAL_DBG("Entry\n");
+	IPAHAL_DBG_LOW("Entry\n");
 
 	rule_hdr = (struct ipa3_0_rt_rule_hw_hdr *)addr;
 	atrb = &rule->eq_attrib;
 
-	IPAHAL_DBG("read hdr 0x%llx\n", rule_hdr->u.word);
+	IPAHAL_DBG_LOW("read hdr 0x%llx\n", rule_hdr->u.word);
 
 	if (rule_hdr->u.word == 0) {
 		/* table termintator - empty table */
@@ -2235,7 +2235,7 @@
 	struct ipa3_0_flt_rule_hw_hdr *rule_hdr;
 	struct ipa_ipfltri_rule_eq *atrb;
 
-	IPAHAL_DBG("Entry\n");
+	IPAHAL_DBG_LOW("Entry\n");
 
 	rule_hdr = (struct ipa3_0_flt_rule_hw_hdr *)addr;
 	atrb = &rule->rule.eq_attrib;
@@ -2775,11 +2775,11 @@
 	/* The HAL allocates larger sizes than the given effective ones
 	 * for alignments and border indications
 	 */
-	IPAHAL_DBG("lcl tbl bdy total effective sizes: hash=%u nhash=%u\n",
+	IPAHAL_DBG_LOW("lcl tbl bdy total effective sizes: hash=%u nhash=%u\n",
 		params->total_sz_lcl_hash_tbls,
 		params->total_sz_lcl_nhash_tbls);
 
-	IPAHAL_DBG("lcl tbl bdy count: hash=%u nhash=%u\n",
+	IPAHAL_DBG_LOW("lcl tbl bdy count: hash=%u nhash=%u\n",
 		params->num_lcl_hash_tbls,
 		params->num_lcl_nhash_tbls);
 
@@ -2798,7 +2798,7 @@
 		params->nhash_bdy.size += obj->blk_sz_alignment;
 		params->nhash_bdy.size &= ~(obj->blk_sz_alignment);
 
-		IPAHAL_DBG("nhash lcl tbl bdy total h/w size = %u\n",
+		IPAHAL_DBG_LOW("nhash lcl tbl bdy total h/w size = %u\n",
 			params->nhash_bdy.size);
 
 		params->nhash_bdy.base = dma_alloc_coherent(
@@ -2829,7 +2829,7 @@
 		params->hash_bdy.size += obj->blk_sz_alignment;
 		params->hash_bdy.size &= ~(obj->blk_sz_alignment);
 
-		IPAHAL_DBG("hash lcl tbl bdy total h/w size = %u\n",
+		IPAHAL_DBG_LOW("hash lcl tbl bdy total h/w size = %u\n",
 			params->hash_bdy.size);
 
 		params->hash_bdy.base = dma_alloc_coherent(
@@ -2862,7 +2862,7 @@
 int ipahal_fltrt_allocate_hw_tbl_imgs(
 	struct ipahal_fltrt_alloc_imgs_params *params)
 {
-	IPAHAL_DBG("Entry\n");
+	IPAHAL_DBG_LOW("Entry\n");
 
 	/* Input validation */
 	if (!params) {
@@ -2904,7 +2904,7 @@
 {
 	struct ipahal_fltrt_obj *obj;
 
-	IPAHAL_DBG("Entry\n");
+	IPAHAL_DBG_LOW("Entry\n");
 
 	if (!tbl_mem) {
 		IPAHAL_ERR("Input err\n");
@@ -2958,7 +2958,7 @@
 	u64 hwaddr;
 	u8 *hdr;
 
-	IPAHAL_DBG("Entry\n");
+	IPAHAL_DBG_LOW("Entry\n");
 
 	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
 
@@ -2991,7 +2991,7 @@
 	u64 hwaddr;
 	u8 *hdr;
 
-	IPAHAL_DBG("Entry\n");
+	IPAHAL_DBG_LOW("Entry\n");
 
 	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
 
@@ -3023,7 +3023,7 @@
 	u8 *tmp = NULL;
 	int rc;
 
-	IPAHAL_DBG("Entry\n");
+	IPAHAL_DBG_LOW("Entry\n");
 
 	if (!params || !hw_len) {
 		IPAHAL_ERR("Input err: params=%p hw_len=%p\n", params, hw_len);
@@ -3081,7 +3081,7 @@
 	u8 *tmp = NULL;
 	int rc;
 
-	IPAHAL_DBG("Entry\n");
+	IPAHAL_DBG_LOW("Entry\n");
 
 	if (!params || !hw_len) {
 		IPAHAL_ERR("Input err: params=%p hw_len=%p\n", params, hw_len);
@@ -3138,7 +3138,7 @@
 		const struct ipa_rule_attrib *attrib,
 		struct ipa_ipfltri_rule_eq *eq_atrb)
 {
-	IPAHAL_DBG("Entry\n");
+	IPAHAL_DBG_LOW("Entry\n");
 
 	if (ipt >= IPA_IP_MAX) {
 		IPAHAL_ERR("Input err: Invalid ip type %d\n", ipt);
@@ -3165,7 +3165,7 @@
 int ipahal_rt_parse_hw_rule(u8 *rule_addr,
 	struct ipahal_rt_rule_entry *rule)
 {
-	IPAHAL_DBG("Entry\n");
+	IPAHAL_DBG_LOW("Entry\n");
 
 	if (!rule_addr || !rule) {
 		IPAHAL_ERR("Input err: rule_addr=%p rule=%p\n",
@@ -3186,7 +3186,7 @@
 int ipahal_flt_parse_hw_rule(u8 *rule_addr,
 	struct ipahal_flt_rule_entry *rule)
 {
-	IPAHAL_DBG("Entry\n");
+	IPAHAL_DBG_LOW("Entry\n");
 
 	if (!rule_addr || !rule) {
 		IPAHAL_ERR("Input err: rule_addr=%p rule=%p\n",
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 56ec538..e653bcd 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -34,6 +34,8 @@
 #include <linux/rmnet_ipa_fd_ioctl.h>
 #include <linux/ipa.h>
 #include <uapi/linux/net_map.h>
+#include <uapi/linux/msm_rmnet.h>
+#include <net/rmnet_config.h>
 
 #include "ipa_trace.h"
 
@@ -1242,6 +1244,85 @@
 		IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt);
 }
 
+static int handle3_ingress_format(struct net_device *dev,
+			struct rmnet_ioctl_extended_s *in)
+{
+	int ret = 0;
+	struct ipa_sys_connect_params *ipa_wan_ep_cfg;
+	struct rmnet_phys_ep_conf_s *ep_cfg;
+
+	IPAWANDBG("Get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
+	ipa_wan_ep_cfg = &rmnet_ipa3_ctx->ipa_to_apps_ep_cfg;
+	if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
+		ipa_wan_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en =
+		   IPA_ENABLE_CS_OFFLOAD_DL;
+
+	if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
+		IPAWANERR("get AGG size %d count %d\n",
+				  in->u.ingress_format.agg_size,
+				  in->u.ingress_format.agg_count);
+
+		ret = ipa_disable_apps_wan_cons_deaggr(
+			  in->u.ingress_format.agg_size,
+			  in->u.ingress_format.agg_count);
+
+		if (!ret) {
+			ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_byte_limit =
+			   in->u.ingress_format.agg_size;
+			ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_pkt_limit =
+			   in->u.ingress_format.agg_count;
+
+			if (ipa_wan_ep_cfg->napi_enabled) {
+				ipa_wan_ep_cfg->recycle_enabled = true;
+				ep_cfg = (struct rmnet_phys_ep_conf_s *)
+				   rcu_dereference(dev->rx_handler_data);
+				ep_cfg->recycle = ipa_recycle_wan_skb;
+				pr_info("Wan Recycle Enabled\n");
+			}
+		}
+	}
+
+	ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 4;
+	ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
+	ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata = 1;
+	ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
+	ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2;
+
+	ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
+	ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = 0;
+	ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = true;
+	ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
+	ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_little_endian = 0;
+	ipa_wan_ep_cfg->ipa_ep_cfg.metadata_mask.metadata_mask = 0xFF000000;
+
+	ipa_wan_ep_cfg->client = IPA_CLIENT_APPS_WAN_CONS;
+	ipa_wan_ep_cfg->notify = apps_ipa_packet_receive_notify;
+	ipa_wan_ep_cfg->priv = dev;
+
+	ipa_wan_ep_cfg->napi_enabled = ipa3_rmnet_res.ipa_napi_enable;
+	if (ipa_wan_ep_cfg->napi_enabled)
+		ipa_wan_ep_cfg->desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
+	else
+		ipa_wan_ep_cfg->desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+
+	mutex_lock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+
+	if (atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
+		IPAWANDBG("In SSR sequence/recovery\n");
+		mutex_unlock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+		return -EFAULT;
+	}
+	ret = ipa3_setup_sys_pipe(&rmnet_ipa3_ctx->ipa_to_apps_ep_cfg,
+	   &rmnet_ipa3_ctx->ipa3_to_apps_hdl);
+
+	mutex_unlock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+
+	if (ret)
+		IPAWANERR("failed to configure ingress\n");
+
+	return ret;
+}
+
 /**
  * ipa3_wwan_ioctl() - I/O control for wwan network driver.
  *
@@ -1557,91 +1638,7 @@
 			}
 			break;
 		case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/*  Set IDF  */
-			IPAWANDBG("get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
-			if ((extend_ioctl_data.u.data) &
-					RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
-				rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
-					ipa_ep_cfg.cfg.cs_offload_en =
-					IPA_ENABLE_CS_OFFLOAD_DL;
-
-			if ((extend_ioctl_data.u.data) &
-					RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
-				IPAWANERR("get AGG size %d count %d\n",
-					extend_ioctl_data.u.
-					ingress_format.agg_size,
-					extend_ioctl_data.u.
-					ingress_format.agg_count);
-				if (!ipa_disable_apps_wan_cons_deaggr(
-					extend_ioctl_data.u.
-					ingress_format.agg_size,
-					extend_ioctl_data.
-					u.ingress_format.agg_count)) {
-					rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
-					ipa_ep_cfg.aggr.aggr_byte_limit =
-					extend_ioctl_data.u.ingress_format.
-					agg_size;
-					rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
-					ipa_ep_cfg.aggr.aggr_pkt_limit =
-					extend_ioctl_data.u.ingress_format.
-					agg_count;
-				}
-			}
-
-			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
-				hdr_len = 4;
-			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
-				hdr_ofst_metadata_valid = 1;
-			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.
-				hdr.hdr_ofst_metadata = 1;
-			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
-				hdr_ofst_pkt_size_valid = 1;
-			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
-				hdr_ofst_pkt_size = 2;
-
-			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
-				hdr_total_len_or_pad_valid = true;
-			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
-				hdr_total_len_or_pad = 0;
-			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
-				hdr_payload_len_inc_padding = true;
-			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
-				hdr_total_len_or_pad_offset = 0;
-			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
-				hdr_little_endian = 0;
-			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.
-				metadata_mask.metadata_mask = 0xFF000000;
-
-			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.client =
-				IPA_CLIENT_APPS_WAN_CONS;
-			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.notify =
-				apps_ipa_packet_receive_notify;
-			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.priv = dev;
-
-			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.napi_enabled =
-				ipa3_rmnet_res.ipa_napi_enable;
-			if (rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.napi_enabled)
-				rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
-				desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
-			else
-				rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
-				desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
-
-			mutex_lock(
-				&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
-			if (atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
-				IPAWANDBG("In SSR sequence/recovery\n");
-				mutex_unlock(&rmnet_ipa3_ctx->
-					ipa_to_apps_pipe_handle_guard);
-				rc = -EFAULT;
-				break;
-			}
-			rc = ipa3_setup_sys_pipe(
-				&rmnet_ipa3_ctx->ipa_to_apps_ep_cfg,
-				&rmnet_ipa3_ctx->ipa3_to_apps_hdl);
-			mutex_unlock(&rmnet_ipa3_ctx->
-				ipa_to_apps_pipe_handle_guard);
-			if (rc)
-				IPAWANERR("failed to configure ingress\n");
+			rc = handle3_ingress_format(dev, &extend_ioctl_data);
 			break;
 		case RMNET_IOCTL_SET_XLAT_DEV_INFO:
 			wan_msg = kzalloc(sizeof(struct ipa_wan_msg),
@@ -1973,6 +1970,12 @@
 		"qcom,ipa-advertise-sg-support");
 	pr_info("IPA SG support = %s\n",
 		ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False");
+
+	ipa_rmnet_drv_res->ipa_napi_enable =
+		of_property_read_bool(pdev->dev.of_node,
+			"qcom,ipa-napi-enable");
+	pr_info("IPA Napi Enable = %s\n",
+		ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
 	return 0;
 }
 
@@ -2468,18 +2471,20 @@
 	req.ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
 
 	rc = ipa3_qmi_get_data_stats(&req, resp);
+	if (rc) {
+		IPAWANERR("ipa3_qmi_get_data_stats failed: %d\n", rc);
+		kfree(resp);
+		return;
+	}
 
-	if (!rc) {
-		memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
-		msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS;
-		msg_meta.msg_len =
-			sizeof(struct ipa_get_data_stats_resp_msg_v01);
-		rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
-		if (rc) {
-			IPAWANERR("ipa_send_msg failed: %d\n", rc);
-			kfree(resp);
-			return;
-		}
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS;
+	msg_meta.msg_len = sizeof(struct ipa_get_data_stats_resp_msg_v01);
+	rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
+	if (rc) {
+		IPAWANERR("ipa_send_msg failed: %d\n", rc);
+		kfree(resp);
+		return;
 	}
 }
 
@@ -2528,18 +2533,20 @@
 	req.mux_id_list[0] = ipa3_rmnet_ctx.metered_mux_id;
 
 	rc = ipa3_qmi_get_network_stats(&req, resp);
+	if (rc) {
+		IPAWANERR("ipa3_qmi_get_network_stats failed: %d\n", rc);
+		kfree(resp);
+		return;
+	}
 
-	if (!rc) {
-		memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
-		msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS;
-		msg_meta.msg_len =
-			sizeof(struct ipa_get_apn_data_stats_resp_msg_v01);
-		rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
-		if (rc) {
-			IPAWANERR("ipa_send_msg failed: %d\n", rc);
-			kfree(resp);
-			return;
-		}
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS;
+	msg_meta.msg_len = sizeof(struct ipa_get_apn_data_stats_resp_msg_v01);
+	rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
+	if (rc) {
+		IPAWANERR("ipa_send_msg failed: %d\n", rc);
+		kfree(resp);
+		return;
 	}
 }
 
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index 37d18e9..d173b4c 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -551,6 +551,7 @@
 	bool skip_ep_cfg;
 	bool keep_ipa_awake;
 	bool napi_enabled;
+	bool recycle_enabled;
 };
 
 /**