msm: ipa: initial commit of IPA driver

This is a snapshot of IPA from kernel msm-4.4 based on
commit ebc2a18351d4 ("msm: ipa: WA to get PA of sgt_tbl from wlan")

CRs-Fixed: 1077422
Change-Id: I97cf9ee9c104ac5ab5bc0577eb9413264b08a7a5
Signed-off-by: Amir Levy <alevy@codeaurora.org>
diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile
new file mode 100644
index 0000000..a4faaea
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_IPA3) += ipahal/
+
+obj-$(CONFIG_IPA3) += ipat.o
+ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
+	ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
+	ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o
+
+obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
new file mode 100644
index 0000000..a2e1366
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -0,0 +1,5412 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/compat.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/fs.h>
+#include <linux/genalloc.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/rbtree.h>
+#include <linux/of_gpio.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/msm_gsi.h>
+#include <linux/qcom_iommu.h>
+#include <linux/time.h>
+#include <linux/hashtable.h>
+#include <linux/hash.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/smem.h>
+#define IPA_SUBSYSTEM_NAME "ipa_fws"
+#include "ipa_i.h"
+#include "../ipa_rm_i.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+
+#define CREATE_TRACE_POINTS
+#include "ipa_trace.h"
+
+#define IPA_GPIO_IN_QUERY_CLK_IDX 0
+#define IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX 0
+#define IPA_GPIO_OUT_CLK_VOTE_IDX 1
+
+#define IPA_SUMMING_THRESHOLD (0x10)
+#define IPA_PIPE_MEM_START_OFST (0x0)
+#define IPA_PIPE_MEM_SIZE (0x0)
+#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
+			       x == IPA_MODE_MOBILE_AP_WAN || \
+			       x == IPA_MODE_MOBILE_AP_WLAN)
+#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
+#define IPA_A5_MUX_HEADER_LENGTH (8)
+
+#define IPA_AGGR_MAX_STR_LENGTH (10)
+
+#define CLEANUP_TAG_PROCESS_TIMEOUT 150
+
+#define IPA_AGGR_STR_IN_BYTES(str) \
+	(strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
+
+#define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100
+
+#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
+
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
+
+#define IPA_SMEM_SIZE (8 * 1024)
+
+/* round addresses for closes page per SMMU requirements */
+#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \
+	do { \
+		(iova_p) = rounddown((iova), PAGE_SIZE); \
+		(pa_p) = rounddown((pa), PAGE_SIZE); \
+		(size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \
+	} while (0)
+
+
+/* The relative location in /lib/firmware where the FWs will reside */
+#define IPA_FWS_PATH "ipa/ipa_fws.elf"
+
+#ifdef CONFIG_COMPAT
+#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_HDR, \
+					compat_uptr_t)
+#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_HDR, \
+					compat_uptr_t)
+#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_RT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_FLT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_FLT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_RT_TBL, \
+				compat_uptr_t)
+#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_COPY_HDR, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF_TX_PROPS, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_QUERY_INTF_RX_PROPS, \
+					compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
+					compat_uptr_t)
+#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_HDR, \
+				compat_uptr_t)
+#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ALLOC_NAT_MEM, \
+				compat_uptr_t)
+#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_INIT_NAT, \
+				compat_uptr_t)
+#define IPA_IOC_NAT_DMA32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NAT_DMA, \
+				compat_uptr_t)
+#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_DEL_NAT, \
+				compat_uptr_t)
+#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_NAT_OFFSET, \
+				compat_uptr_t)
+#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_PULL_MSG, \
+				compat_uptr_t)
+#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_RM_ADD_DEPENDENCY, \
+				compat_uptr_t)
+#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_RM_DEL_DEPENDENCY, \
+				compat_uptr_t)
+#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GENERATE_FLT_EQ, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_RT_TBL_INDEX, \
+				compat_uptr_t)
+#define IPA_IOC_WRITE_QMAPID32  _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_WRITE_QMAPID, \
+				compat_uptr_t)
+#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_MDFY_FLT_RULE, \
+				compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
+				compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
+				compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
+					compat_uptr_t)
+#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ADD_HDR_PROC_CTX, \
+				compat_uptr_t)
+#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_HDR_PROC_CTX, \
+				compat_uptr_t)
+#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_MDFY_RT_RULE, \
+				compat_uptr_t)
+
+/**
+ * struct ipa3_ioc_nat_alloc_mem32 - nat table memory allocation
+ * properties
+ * @dev_name: input parameter, the name of table
+ * @size: input parameter, size of table in bytes
+ * @offset: output parameter, offset into page in case of system memory
+ */
+struct ipa3_ioc_nat_alloc_mem32 {
+	char dev_name[IPA_RESOURCE_NAME_MAX];
+	compat_size_t size;
+	compat_off_t offset;
+};
+#endif
+
+static void ipa3_start_tag_process(struct work_struct *work);
+static DECLARE_WORK(ipa3_tag_work, ipa3_start_tag_process);
+
+static void ipa3_sps_release_resource(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_sps_release_resource_work,
+	ipa3_sps_release_resource);
+static void ipa_gsi_notify_cb(struct gsi_per_notify *notify);
+
+static void ipa_gsi_request_resource(struct work_struct *work);
+static DECLARE_WORK(ipa_gsi_request_resource_work,
+	ipa_gsi_request_resource);
+
+static void ipa_gsi_release_resource(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa_gsi_release_resource_work,
+	ipa_gsi_release_resource);
+
+static struct ipa3_plat_drv_res ipa3_res = {0, };
+struct msm_bus_scale_pdata *ipa3_bus_scale_table;
+
+static struct clk *ipa3_clk;
+
+struct ipa3_context *ipa3_ctx;
+static struct device *master_dev;
+struct platform_device *ipa3_pdev;
+static struct {
+	bool present;
+	bool arm_smmu;
+	bool disable_htw;
+	bool fast_map;
+	bool s1_bypass;
+	bool use_64_bit_dma_mask;
+	u32 ipa_base;
+	u32 ipa_size;
+} smmu_info;
+
+static char *active_clients_table_buf;
+
+int ipa3_active_clients_log_print_buffer(char *buf, int size)
+{
+	int i;
+	int nbytes;
+	int cnt = 0;
+	int start_idx;
+	int end_idx;
+
+	start_idx = (ipa3_ctx->ipa3_active_clients_logging.log_tail + 1) %
+			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+	end_idx = ipa3_ctx->ipa3_active_clients_logging.log_head;
+	for (i = start_idx; i != end_idx;
+		i = (i + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) {
+		nbytes = scnprintf(buf + cnt, size - cnt, "%s\n",
+				ipa3_ctx->ipa3_active_clients_logging
+				.log_buffer[i]);
+		cnt += nbytes;
+	}
+
+	return cnt;
+}
+
+int ipa3_active_clients_log_print_table(char *buf, int size)
+{
+	int i;
+	struct ipa3_active_client_htable_entry *iterator;
+	int cnt = 0;
+
+	cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n");
+	hash_for_each(ipa3_ctx->ipa3_active_clients_logging.htable, i,
+			iterator, list) {
+		switch (iterator->type) {
+		case IPA3_ACTIVE_CLIENT_LOG_TYPE_EP:
+			cnt += scnprintf(buf + cnt, size - cnt,
+					"%-40s %-3d ENDPOINT\n",
+					iterator->id_string, iterator->count);
+			break;
+		case IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE:
+			cnt += scnprintf(buf + cnt, size - cnt,
+					"%-40s %-3d SIMPLE\n",
+					iterator->id_string, iterator->count);
+			break;
+		case IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE:
+			cnt += scnprintf(buf + cnt, size - cnt,
+					"%-40s %-3d RESOURCE\n",
+					iterator->id_string, iterator->count);
+			break;
+		case IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL:
+			cnt += scnprintf(buf + cnt, size - cnt,
+					"%-40s %-3d SPECIAL\n",
+					iterator->id_string, iterator->count);
+			break;
+		default:
+			IPAERR("Trying to print illegal active_clients type");
+			break;
+		}
+	}
+	cnt += scnprintf(buf + cnt, size - cnt,
+			"\nTotal active clients count: %d\n",
+			ipa3_ctx->ipa3_active_clients.cnt);
+
+	return cnt;
+}
+
+static int ipa3_active_clients_panic_notifier(struct notifier_block *this,
+		unsigned long event, void *ptr)
+{
+	ipa3_active_clients_lock();
+	ipa3_active_clients_log_print_table(active_clients_table_buf,
+			IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
+	IPAERR("%s", active_clients_table_buf);
+	ipa3_active_clients_unlock();
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block ipa3_active_clients_panic_blk = {
+	.notifier_call  = ipa3_active_clients_panic_notifier,
+};
+
+static int ipa3_active_clients_log_insert(const char *string)
+{
+	int head;
+	int tail;
+
+	if (!ipa3_ctx->ipa3_active_clients_logging.log_rdy)
+		return -EPERM;
+
+	head = ipa3_ctx->ipa3_active_clients_logging.log_head;
+	tail = ipa3_ctx->ipa3_active_clients_logging.log_tail;
+
+	memset(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], '_',
+			IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
+	strlcpy(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], string,
+			(size_t)IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
+	head = (head + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+	if (tail == head)
+		tail = (tail + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+
+	ipa3_ctx->ipa3_active_clients_logging.log_tail = tail;
+	ipa3_ctx->ipa3_active_clients_logging.log_head = head;
+
+	return 0;
+}
+
+static int ipa3_active_clients_log_init(void)
+{
+	int i;
+
+	ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] = kzalloc(
+			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES *
+			sizeof(char[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN]),
+			GFP_KERNEL);
+	active_clients_table_buf = kzalloc(sizeof(
+			char[IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL);
+	if (ipa3_ctx->ipa3_active_clients_logging.log_buffer == NULL) {
+		pr_err("Active Clients Logging memory allocation failed");
+		goto bail;
+	}
+	for (i = 0; i < IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; i++) {
+		ipa3_ctx->ipa3_active_clients_logging.log_buffer[i] =
+			ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] +
+			(IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN * i);
+	}
+	ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
+	ipa3_ctx->ipa3_active_clients_logging.log_tail =
+			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+	hash_init(ipa3_ctx->ipa3_active_clients_logging.htable);
+	atomic_notifier_chain_register(&panic_notifier_list,
+			&ipa3_active_clients_panic_blk);
+	ipa3_ctx->ipa3_active_clients_logging.log_rdy = 1;
+
+	return 0;
+
+bail:
+	return -ENOMEM;
+}
+
+void ipa3_active_clients_log_clear(void)
+{
+	ipa3_active_clients_lock();
+	ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
+	ipa3_ctx->ipa3_active_clients_logging.log_tail =
+			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+	ipa3_active_clients_unlock();
+}
+
+static void ipa3_active_clients_log_destroy(void)
+{
+	ipa3_ctx->ipa3_active_clients_logging.log_rdy = 0;
+	kfree(ipa3_ctx->ipa3_active_clients_logging.log_buffer[0]);
+	ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
+	ipa3_ctx->ipa3_active_clients_logging.log_tail =
+			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+}
+
+enum ipa_smmu_cb_type {
+	IPA_SMMU_CB_AP,
+	IPA_SMMU_CB_WLAN,
+	IPA_SMMU_CB_UC,
+	IPA_SMMU_CB_MAX
+
+};
+
+static struct ipa_smmu_cb_ctx smmu_cb[IPA_SMMU_CB_MAX];
+
+struct iommu_domain *ipa3_get_smmu_domain(void)
+{
+	if (smmu_cb[IPA_SMMU_CB_AP].valid)
+		return smmu_cb[IPA_SMMU_CB_AP].mapping->domain;
+
+	IPAERR("CB not valid\n");
+
+	return NULL;
+}
+
+struct iommu_domain *ipa3_get_uc_smmu_domain(void)
+{
+	if (smmu_cb[IPA_SMMU_CB_UC].valid)
+		return smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
+
+	IPAERR("CB not valid\n");
+
+	return NULL;
+}
+
+struct iommu_domain *ipa3_get_wlan_smmu_domain(void)
+{
+	if (smmu_cb[IPA_SMMU_CB_WLAN].valid)
+		return smmu_cb[IPA_SMMU_CB_WLAN].iommu;
+
+	IPAERR("CB not valid\n");
+
+	return NULL;
+}
+
+
+struct device *ipa3_get_dma_dev(void)
+{
+	return ipa3_ctx->pdev;
+}
+
+/**
+ * ipa3_get_smmu_ctx()- Return the wlan smmu context
+ *
+ * Return value: pointer to smmu context address
+ */
+struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(void)
+{
+	return &smmu_cb[IPA_SMMU_CB_AP];
+}
+
+/**
+ * ipa3_get_wlan_smmu_ctx()- Return the wlan smmu context
+ *
+ * Return value: pointer to smmu context address
+ */
+struct ipa_smmu_cb_ctx *ipa3_get_wlan_smmu_ctx(void)
+{
+	return &smmu_cb[IPA_SMMU_CB_WLAN];
+}
+
+/**
+ * ipa3_get_uc_smmu_ctx()- Return the uc smmu context
+ *
+ * Return value: pointer to smmu context address
+ */
+struct ipa_smmu_cb_ctx *ipa3_get_uc_smmu_ctx(void)
+{
+	return &smmu_cb[IPA_SMMU_CB_UC];
+}
+
+static int ipa3_open(struct inode *inode, struct file *filp)
+{
+	struct ipa3_context *ctx = NULL;
+
+	IPADBG_LOW("ENTER\n");
+	ctx = container_of(inode->i_cdev, struct ipa3_context, cdev);
+	filp->private_data = ctx;
+
+	return 0;
+}
+
+/**
+* ipa3_flow_control() - Enable/Disable flow control on a particular client.
+* Return codes:
+* None
+*/
+void ipa3_flow_control(enum ipa_client_type ipa_client,
+		bool enable, uint32_t qmap_id)
+{
+	struct ipa_ep_cfg_ctrl ep_ctrl = {0};
+	int ep_idx;
+	struct ipa3_ep_context *ep;
+
+	/* Check if tethered flow control is needed or not.*/
+	if (!ipa3_ctx->tethered_flow_control) {
+		IPADBG("Apps flow control is not needed\n");
+		return;
+	}
+
+	/* Check if ep is valid. */
+	ep_idx = ipa3_get_ep_mapping(ipa_client);
+	if (ep_idx == -1) {
+		IPADBG("Invalid IPA client\n");
+		return;
+	}
+
+	ep = &ipa3_ctx->ep[ep_idx];
+	if (!ep->valid || (ep->client != IPA_CLIENT_USB_PROD)) {
+		IPADBG("EP not valid/Not applicable for client.\n");
+		return;
+	}
+
+	spin_lock(&ipa3_ctx->disconnect_lock);
+	/* Check if the QMAP_ID matches. */
+	if (ep->cfg.meta.qmap_id != qmap_id) {
+		IPADBG("Flow control ind not for same flow: %u %u\n",
+			ep->cfg.meta.qmap_id, qmap_id);
+		spin_unlock(&ipa3_ctx->disconnect_lock);
+		return;
+	}
+	if (!ep->disconnect_in_progress) {
+		if (enable) {
+			IPADBG("Enabling Flow\n");
+			ep_ctrl.ipa_ep_delay = false;
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.flow_enable);
+		} else {
+			IPADBG("Disabling Flow\n");
+			ep_ctrl.ipa_ep_delay = true;
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.flow_disable);
+		}
+		ep_ctrl.ipa_ep_suspend = false;
+		ipa3_cfg_ep_ctrl(ep_idx, &ep_ctrl);
+	} else {
+		IPADBG("EP disconnect is in progress\n");
+	}
+	spin_unlock(&ipa3_ctx->disconnect_lock);
+}
+
+static void ipa3_wan_msg_free_cb(void *buff, u32 len, u32 type)
+{
+	if (!buff) {
+		IPAERR("Null buffer\n");
+		return;
+	}
+
+	if (type != WAN_UPSTREAM_ROUTE_ADD &&
+	    type != WAN_UPSTREAM_ROUTE_DEL &&
+	    type != WAN_EMBMS_CONNECT) {
+		IPAERR("Wrong type given. buff %p type %d\n", buff, type);
+		return;
+	}
+
+	kfree(buff);
+}
+
+static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type)
+{
+	int retval;
+	struct ipa_wan_msg *wan_msg;
+	struct ipa_msg_meta msg_meta;
+
+	wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
+	if (!wan_msg) {
+		IPAERR("no memory\n");
+		return -ENOMEM;
+	}
+
+	if (copy_from_user((u8 *)wan_msg, (u8 *)usr_param,
+		sizeof(struct ipa_wan_msg))) {
+		kfree(wan_msg);
+		return -EFAULT;
+	}
+
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = msg_type;
+	msg_meta.msg_len = sizeof(struct ipa_wan_msg);
+	retval = ipa3_send_msg(&msg_meta, wan_msg, ipa3_wan_msg_free_cb);
+	if (retval) {
+		IPAERR("ipa3_send_msg failed: %d\n", retval);
+		kfree(wan_msg);
+		return retval;
+	}
+
+	return 0;
+}
+
+
+static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	int retval = 0;
+	u32 pyld_sz;
+	u8 header[128] = { 0 };
+	u8 *param = NULL;
+	struct ipa_ioc_nat_alloc_mem nat_mem;
+	struct ipa_ioc_v4_nat_init nat_init;
+	struct ipa_ioc_v4_nat_del nat_del;
+	struct ipa_ioc_rm_dependency rm_depend;
+	size_t sz;
+
+	IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
+
+	if (!ipa3_is_ready()) {
+		IPAERR("IPA not ready, waiting for init completion\n");
+		wait_for_completion(&ipa3_ctx->init_completion_obj);
+	}
+
+	if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
+		return -ENOTTY;
+	if (_IOC_NR(cmd) >= IPA_IOCTL_MAX)
+		return -ENOTTY;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	switch (cmd) {
+	case IPA_IOC_ALLOC_NAT_MEM:
+		if (copy_from_user((u8 *)&nat_mem, (u8 *)arg,
+					sizeof(struct ipa_ioc_nat_alloc_mem))) {
+			retval = -EFAULT;
+			break;
+		}
+		/* null terminate the string */
+		nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
+
+		if (ipa3_allocate_nat_device(&nat_mem)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, (u8 *)&nat_mem,
+					sizeof(struct ipa_ioc_nat_alloc_mem))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_V4_INIT_NAT:
+		if (copy_from_user((u8 *)&nat_init, (u8 *)arg,
+					sizeof(struct ipa_ioc_v4_nat_init))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_nat_init_cmd(&nat_init)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_NAT_DMA:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_nat_dma_cmd))) {
+			retval = -EFAULT;
+			break;
+		}
+
+		pyld_sz =
+		   sizeof(struct ipa_ioc_nat_dma_cmd) +
+		   ((struct ipa_ioc_nat_dma_cmd *)header)->entries *
+		   sizeof(struct ipa_ioc_nat_dma_one);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (ipa3_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_V4_DEL_NAT:
+		if (copy_from_user((u8 *)&nat_del, (u8 *)arg,
+					sizeof(struct ipa_ioc_v4_nat_del))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_nat_del_cmd(&nat_del)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_HDR:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_add_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_hdr) +
+		   ((struct ipa_ioc_add_hdr *)header)->num_hdrs *
+		   sizeof(struct ipa_hdr_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_add_hdr((struct ipa_ioc_add_hdr *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_HDR:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_del_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_hdr) +
+		   ((struct ipa_ioc_del_hdr *)header)->num_hdls *
+		   sizeof(struct ipa_hdr_del);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_del_hdr((struct ipa_ioc_del_hdr *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_RT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_add_rt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_rt_rule) +
+		   ((struct ipa_ioc_add_rt_rule *)header)->num_rules *
+		   sizeof(struct ipa_rt_rule_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_ADD_RT_RULE_AFTER:
+		if (copy_from_user(header, (u8 *)arg,
+			sizeof(struct ipa_ioc_add_rt_rule_after))) {
+
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_rt_rule_after) +
+		   ((struct ipa_ioc_add_rt_rule_after *)header)->num_rules *
+		   sizeof(struct ipa_rt_rule_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_add_rt_rule_after(
+			(struct ipa_ioc_add_rt_rule_after *)param)) {
+
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_MDFY_RT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_mdfy_rt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_mdfy_rt_rule) +
+		   ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules *
+		   sizeof(struct ipa_rt_rule_mdfy);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_RT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_del_rt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_rt_rule) +
+		   ((struct ipa_ioc_del_rt_rule *)header)->num_hdls *
+		   sizeof(struct ipa_rt_rule_del);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_FLT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_add_flt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_flt_rule) +
+		   ((struct ipa_ioc_add_flt_rule *)header)->num_rules *
+		   sizeof(struct ipa_flt_rule_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_FLT_RULE_AFTER:
+		if (copy_from_user(header, (u8 *)arg,
+				sizeof(struct ipa_ioc_add_flt_rule_after))) {
+
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_flt_rule_after) +
+		   ((struct ipa_ioc_add_flt_rule_after *)header)->num_rules *
+		   sizeof(struct ipa_flt_rule_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_add_flt_rule_after(
+				(struct ipa_ioc_add_flt_rule_after *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_FLT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_del_flt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_flt_rule) +
+		   ((struct ipa_ioc_del_flt_rule *)header)->num_hdls *
+		   sizeof(struct ipa_flt_rule_del);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_MDFY_FLT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_mdfy_flt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_mdfy_flt_rule) +
+		   ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules *
+		   sizeof(struct ipa_flt_rule_mdfy);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_COMMIT_HDR:
+		retval = ipa3_commit_hdr();
+		break;
+	case IPA_IOC_RESET_HDR:
+		retval = ipa3_reset_hdr();
+		break;
+	case IPA_IOC_COMMIT_RT:
+		retval = ipa3_commit_rt(arg);
+		break;
+	case IPA_IOC_RESET_RT:
+		retval = ipa3_reset_rt(arg);
+		break;
+	case IPA_IOC_COMMIT_FLT:
+		retval = ipa3_commit_flt(arg);
+		break;
+	case IPA_IOC_RESET_FLT:
+		retval = ipa3_reset_flt(arg);
+		break;
+	case IPA_IOC_GET_RT_TBL:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_get_rt_tbl))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+					sizeof(struct ipa_ioc_get_rt_tbl))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_PUT_RT_TBL:
+		retval = ipa3_put_rt_tbl(arg);
+		break;
+	case IPA_IOC_GET_HDR:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_get_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_get_hdr((struct ipa_ioc_get_hdr *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+					sizeof(struct ipa_ioc_get_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_PUT_HDR:
+		retval = ipa3_put_hdr(arg);
+		break;
+	case IPA_IOC_SET_FLT:
+		retval = ipa3_cfg_filter(arg);
+		break;
+	case IPA_IOC_COPY_HDR:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_copy_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_copy_hdr((struct ipa_ioc_copy_hdr *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+					sizeof(struct ipa_ioc_copy_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_QUERY_INTF:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_query_intf))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_query_intf((struct ipa_ioc_query_intf *)header)) {
+			retval = -1;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+					sizeof(struct ipa_ioc_query_intf))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_QUERY_INTF_TX_PROPS:
+		sz = sizeof(struct ipa_ioc_query_intf_tx_props);
+		if (copy_from_user(header, (u8 *)arg, sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (((struct ipa_ioc_query_intf_tx_props *)header)->num_tx_props
+				> IPA_NUM_PROPS_MAX) {
+			retval = -EFAULT;
+			break;
+		}
+
+		pyld_sz = sz + ((struct ipa_ioc_query_intf_tx_props *)
+				header)->num_tx_props *
+			sizeof(struct ipa_ioc_tx_intf_prop);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_query_intf_tx_props(
+				(struct ipa_ioc_query_intf_tx_props *)param)) {
+			retval = -1;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_QUERY_INTF_RX_PROPS:
+		sz = sizeof(struct ipa_ioc_query_intf_rx_props);
+		if (copy_from_user(header, (u8 *)arg, sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (((struct ipa_ioc_query_intf_rx_props *)header)->num_rx_props
+				> IPA_NUM_PROPS_MAX) {
+			retval = -EFAULT;
+			break;
+		}
+
+		pyld_sz = sz + ((struct ipa_ioc_query_intf_rx_props *)
+				header)->num_rx_props *
+			sizeof(struct ipa_ioc_rx_intf_prop);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_query_intf_rx_props(
+				(struct ipa_ioc_query_intf_rx_props *)param)) {
+			retval = -1;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_QUERY_INTF_EXT_PROPS:
+		sz = sizeof(struct ipa_ioc_query_intf_ext_props);
+		if (copy_from_user(header, (u8 *)arg, sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (((struct ipa_ioc_query_intf_ext_props *)
+				header)->num_ext_props > IPA_NUM_PROPS_MAX) {
+			retval = -EFAULT;
+			break;
+		}
+
+		pyld_sz = sz + ((struct ipa_ioc_query_intf_ext_props *)
+				header)->num_ext_props *
+			sizeof(struct ipa_ioc_ext_intf_prop);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_query_intf_ext_props(
+				(struct ipa_ioc_query_intf_ext_props *)param)) {
+			retval = -1;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_PULL_MSG:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_msg_meta))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz = sizeof(struct ipa_msg_meta) +
+		   ((struct ipa_msg_meta *)header)->msg_len;
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_pull_msg((struct ipa_msg_meta *)param,
+				 (char *)param + sizeof(struct ipa_msg_meta),
+				 ((struct ipa_msg_meta *)param)->msg_len) !=
+		       ((struct ipa_msg_meta *)param)->msg_len) {
+			retval = -1;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_RM_ADD_DEPENDENCY:
+		if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
+				sizeof(struct ipa_ioc_rm_dependency))) {
+			retval = -EFAULT;
+			break;
+		}
+		retval = ipa_rm_add_dependency_from_ioctl(
+			rm_depend.resource_name, rm_depend.depends_on_name);
+		break;
+	case IPA_IOC_RM_DEL_DEPENDENCY:
+		if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
+				sizeof(struct ipa_ioc_rm_dependency))) {
+			retval = -EFAULT;
+			break;
+		}
+		retval = ipa_rm_delete_dependency_from_ioctl(
+			rm_depend.resource_name, rm_depend.depends_on_name);
+		break;
+	case IPA_IOC_GENERATE_FLT_EQ:
+		{
+			struct ipa_ioc_generate_flt_eq flt_eq;
+
+			if (copy_from_user(&flt_eq, (u8 *)arg,
+				sizeof(struct ipa_ioc_generate_flt_eq))) {
+				retval = -EFAULT;
+				break;
+			}
+			if (ipahal_flt_generate_equation(flt_eq.ip,
+				&flt_eq.attrib, &flt_eq.eq_attrib)) {
+				retval = -EFAULT;
+				break;
+			}
+			if (copy_to_user((u8 *)arg, &flt_eq,
+				sizeof(struct ipa_ioc_generate_flt_eq))) {
+				retval = -EFAULT;
+				break;
+			}
+			break;
+		}
+	case IPA_IOC_QUERY_EP_MAPPING:
+		{
+			retval = ipa3_get_ep_mapping(arg);
+			break;
+		}
+	case IPA_IOC_QUERY_RT_TBL_INDEX:
+		if (copy_from_user(header, (u8 *)arg,
+				sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_query_rt_index(
+			 (struct ipa_ioc_get_rt_tbl_indx *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+				sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_WRITE_QMAPID:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_write_qmapid))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_write_qmap_id((struct ipa_ioc_write_qmapid *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+					sizeof(struct ipa_ioc_write_qmapid))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD:
+		retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD);
+		if (retval) {
+			IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
+			break;
+		}
+		break;
+	case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL:
+		retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL);
+		if (retval) {
+			IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
+			break;
+		}
+		break;
+	case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED:
+		retval = ipa3_send_wan_msg(arg, WAN_EMBMS_CONNECT);
+		if (retval) {
+			IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
+			break;
+		}
+		break;
+	case IPA_IOC_ADD_HDR_PROC_CTX:
+		if (copy_from_user(header, (u8 *)arg,
+			sizeof(struct ipa_ioc_add_hdr_proc_ctx))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_hdr_proc_ctx) +
+		   ((struct ipa_ioc_add_hdr_proc_ctx *)header)->num_proc_ctxs *
+		   sizeof(struct ipa_hdr_proc_ctx_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_add_hdr_proc_ctx(
+			(struct ipa_ioc_add_hdr_proc_ctx *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_DEL_HDR_PROC_CTX:
+		if (copy_from_user(header, (u8 *)arg,
+			sizeof(struct ipa_ioc_del_hdr_proc_ctx))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_hdr_proc_ctx) +
+		   ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls *
+		   sizeof(struct ipa_hdr_proc_ctx_del);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_del_hdr_proc_ctx(
+			(struct ipa_ioc_del_hdr_proc_ctx *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_GET_HW_VERSION:
+		pyld_sz = sizeof(enum ipa_hw_type);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		memcpy(param, &ipa3_ctx->ipa_hw_type, pyld_sz);
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	default:        /* redundant, as cmd was checked against MAXNR */
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return -ENOTTY;
+	}
+	kfree(param);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return retval;
+}
+
+/**
+* ipa3_setup_dflt_rt_tables() - Setup default routing tables
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate memory
+* -EPERM: failed to add the tables
+*/
+int ipa3_setup_dflt_rt_tables(void)
+{
+	struct ipa_ioc_add_rt_rule *rt_rule;
+	struct ipa_rt_rule_add *rt_rule_entry;
+
+	rt_rule =
+	   kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
+			   sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
+	if (!rt_rule) {
+		IPAERR("fail to alloc mem\n");
+		return -ENOMEM;
+	}
+	/* setup a default v4 route to point to Apps */
+	rt_rule->num_rules = 1;
+	rt_rule->commit = 1;
+	rt_rule->ip = IPA_IP_v4;
+	strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
+			IPA_RESOURCE_NAME_MAX);
+
+	rt_rule_entry = &rt_rule->rules[0];
+	rt_rule_entry->at_rear = 1;
+	rt_rule_entry->rule.dst = IPA_CLIENT_APPS_LAN_CONS;
+	rt_rule_entry->rule.hdr_hdl = ipa3_ctx->excp_hdr_hdl;
+	rt_rule_entry->rule.retain_hdr = 1;
+
+	if (ipa3_add_rt_rule(rt_rule)) {
+		IPAERR("fail to add dflt v4 rule\n");
+		kfree(rt_rule);
+		return -EPERM;
+	}
+	IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+	ipa3_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+	/* setup a default v6 route to point to A5 */
+	rt_rule->ip = IPA_IP_v6;
+	if (ipa3_add_rt_rule(rt_rule)) {
+		IPAERR("fail to add dflt v6 rule\n");
+		kfree(rt_rule);
+		return -EPERM;
+	}
+	IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+	ipa3_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+	/*
+	 * because these tables are the very first to be added, they will both
+	 * have the same index (0) which is essential for programming the
+	 * "route" end-point config
+	 */
+
+	kfree(rt_rule);
+
+	return 0;
+}
+
+static int ipa3_setup_exception_path(void)
+{
+	struct ipa_ioc_add_hdr *hdr;
+	struct ipa_hdr_add *hdr_entry;
+	struct ipahal_reg_route route = { 0 };
+	int ret;
+
+	/* install the basic exception header */
+	hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
+		      sizeof(struct ipa_hdr_add), GFP_KERNEL);
+	if (!hdr) {
+		IPAERR("fail to alloc exception hdr\n");
+		return -ENOMEM;
+	}
+	hdr->num_hdrs = 1;
+	hdr->commit = 1;
+	hdr_entry = &hdr->hdr[0];
+
+	strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, IPA_RESOURCE_NAME_MAX);
+	hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
+
+	if (ipa3_add_hdr(hdr)) {
+		IPAERR("fail to add exception hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	if (hdr_entry->status) {
+		IPAERR("fail to add exception hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
+
+	/* set the route register to pass exception packets to Apps */
+	route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+	route.route_frag_def_pipe = ipa3_get_ep_mapping(
+		IPA_CLIENT_APPS_LAN_CONS);
+	route.route_def_hdr_table = !ipa3_ctx->hdr_tbl_lcl;
+	route.route_def_retain_hdr = 1;
+
+	if (ipa3_cfg_route(&route)) {
+		IPAERR("fail to add exception hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	ret = 0;
+bail:
+	kfree(hdr);
+	return ret;
+}
+
+static int ipa3_init_smem_region(int memory_region_size,
+				int memory_region_offset)
+{
+	struct ipahal_imm_cmd_dma_shared_mem cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipa3_desc desc;
+	struct ipa_mem_buffer mem;
+	int rc;
+
+	if (memory_region_size == 0)
+		return 0;
+
+	memset(&desc, 0, sizeof(desc));
+	memset(&cmd, 0, sizeof(cmd));
+	memset(&mem, 0, sizeof(mem));
+
+	mem.size = memory_region_size;
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
+		&mem.phys_base, GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+
+	memset(mem.base, 0, mem.size);
+	cmd.is_read = false;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	cmd.size = mem.size;
+	cmd.system_addr = mem.phys_base;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+		memory_region_offset;
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		return -ENOMEM;
+	}
+	desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	desc.type = IPA_IMM_CMD_DESC;
+
+	rc = ipa3_send_cmd(1, &desc);
+	if (rc) {
+		IPAERR("failed to send immediate command (error %d)\n", rc);
+		rc = -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
+		mem.phys_base);
+
+	return rc;
+}
+
+/**
+* ipa3_init_q6_smem() - Initialize Q6 general memory and
+*                      header memory regions in IPA.
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate dma memory
+* -EFAULT: failed to send IPA command to initialize the memory
+*/
+int ipa3_init_q6_smem(void)
+{
+	int rc;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	rc = ipa3_init_smem_region(IPA_MEM_PART(modem_size),
+		IPA_MEM_PART(modem_ofst));
+	if (rc) {
+		IPAERR("failed to initialize Modem RAM memory\n");
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return rc;
+	}
+
+	rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_size),
+		IPA_MEM_PART(modem_hdr_ofst));
+	if (rc) {
+		IPAERR("failed to initialize Modem HDRs RAM memory\n");
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return rc;
+	}
+
+	rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_proc_ctx_size),
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
+	if (rc) {
+		IPAERR("failed to initialize Modem proc ctx RAM memory\n");
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return rc;
+	}
+
+	rc = ipa3_init_smem_region(IPA_MEM_PART(modem_comp_decomp_size),
+		IPA_MEM_PART(modem_comp_decomp_ofst));
+	if (rc) {
+		IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n");
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return rc;
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return rc;
+}
+
+static void ipa3_destroy_imm(void *user1, int user2)
+{
+	ipahal_destroy_imm_cmd(user1);
+}
+
+static void ipa3_q6_pipe_delay(bool delay)
+{
+	int client_idx;
+	int ep_idx;
+	struct ipa_ep_cfg_ctrl ep_ctrl;
+
+	memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	ep_ctrl.ipa_ep_delay = delay;
+
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+		if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
+			ep_idx = ipa3_get_ep_mapping(client_idx);
+			if (ep_idx == -1)
+				continue;
+
+			ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
+				ep_idx, &ep_ctrl);
+		}
+	}
+}
+
+static void ipa3_q6_avoid_holb(void)
+{
+	int ep_idx;
+	int client_idx;
+	struct ipa_ep_cfg_ctrl ep_suspend;
+	struct ipa_ep_cfg_holb ep_holb;
+
+	memset(&ep_suspend, 0, sizeof(ep_suspend));
+	memset(&ep_holb, 0, sizeof(ep_holb));
+
+	ep_suspend.ipa_ep_suspend = true;
+	ep_holb.tmr_val = 0;
+	ep_holb.en = 1;
+
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+		if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
+			ep_idx = ipa3_get_ep_mapping(client_idx);
+			if (ep_idx == -1)
+				continue;
+
+			/*
+			 * ipa3_cfg_ep_holb is not used here because we are
+			 * setting HOLB on Q6 pipes, and from APPS perspective
+			 * they are not valid, therefore, the above function
+			 * will fail.
+			 */
+			ipahal_write_reg_n_fields(
+				IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
+				ep_idx, &ep_holb);
+			ipahal_write_reg_n_fields(
+				IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+				ep_idx, &ep_holb);
+
+			ipahal_write_reg_n_fields(
+				IPA_ENDP_INIT_CTRL_n,
+				ep_idx, &ep_suspend);
+		}
+	}
+}
+
+static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
+	enum ipa_rule_type rlt)
+{
+	struct ipa3_desc *desc;
+	struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
+	struct ipahal_imm_cmd_pyld **cmd_pyld;
+	int retval = 0;
+	int pipe_idx;
+	int flt_idx = 0;
+	int num_cmds = 0;
+	int index;
+	u32 lcl_addr_mem_part;
+	u32 lcl_hdr_sz;
+	struct ipa_mem_buffer mem;
+
+	IPADBG("Entry\n");
+
+	if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
+		IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
+		return -EINVAL;
+	}
+
+	/* Up to filtering pipes we have filtering tables */
+	desc = kcalloc(ipa3_ctx->ep_flt_num, sizeof(struct ipa3_desc),
+		GFP_KERNEL);
+	if (!desc) {
+		IPAERR("failed to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	cmd_pyld = kcalloc(ipa3_ctx->ep_flt_num,
+		sizeof(struct ipahal_imm_cmd_pyld *), GFP_KERNEL);
+	if (!cmd_pyld) {
+		IPAERR("failed to allocate memory\n");
+		retval = -ENOMEM;
+		goto free_desc;
+	}
+
+	if (ip == IPA_IP_v4) {
+		if (rlt == IPA_RULE_HASHABLE) {
+			lcl_addr_mem_part = IPA_MEM_PART(v4_flt_hash_ofst);
+			lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
+		} else {
+			lcl_addr_mem_part = IPA_MEM_PART(v4_flt_nhash_ofst);
+			lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
+		}
+	} else {
+		if (rlt == IPA_RULE_HASHABLE) {
+			lcl_addr_mem_part = IPA_MEM_PART(v6_flt_hash_ofst);
+			lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
+		} else {
+			lcl_addr_mem_part = IPA_MEM_PART(v6_flt_nhash_ofst);
+			lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
+		}
+	}
+
+	retval = ipahal_flt_generate_empty_img(1, lcl_hdr_sz, lcl_hdr_sz,
+		0, &mem);
+	if (retval) {
+		IPAERR("failed to generate flt single tbl empty img\n");
+		goto free_cmd_pyld;
+	}
+
+	for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes; pipe_idx++) {
+		if (!ipa_is_ep_support_flt(pipe_idx))
+			continue;
+
+		/*
+		 * Iterating over all the filtering pipes which are either
+		 * invalid but connected or connected but not configured by AP.
+		 */
+		if (!ipa3_ctx->ep[pipe_idx].valid ||
+		    ipa3_ctx->ep[pipe_idx].skip_ep_cfg) {
+
+			cmd.is_read = false;
+			cmd.skip_pipeline_clear = false;
+			cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+			cmd.size = mem.size;
+			cmd.system_addr = mem.phys_base;
+			cmd.local_addr =
+				ipa3_ctx->smem_restricted_bytes +
+				lcl_addr_mem_part +
+				ipahal_get_hw_tbl_hdr_width() +
+				flt_idx * ipahal_get_hw_tbl_hdr_width();
+			cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+			if (!cmd_pyld[num_cmds]) {
+				IPAERR("fail construct dma_shared_mem cmd\n");
+				retval = -ENOMEM;
+				goto free_empty_img;
+			}
+			desc[num_cmds].opcode = ipahal_imm_cmd_get_opcode(
+				IPA_IMM_CMD_DMA_SHARED_MEM);
+			desc[num_cmds].pyld = cmd_pyld[num_cmds]->data;
+			desc[num_cmds].len = cmd_pyld[num_cmds]->len;
+			desc[num_cmds].type = IPA_IMM_CMD_DESC;
+			num_cmds++;
+		}
+
+		flt_idx++;
+	}
+
+	IPADBG("Sending %d descriptors for flt tbl clearing\n", num_cmds);
+	retval = ipa3_send_cmd(num_cmds, desc);
+	if (retval) {
+		IPAERR("failed to send immediate command (err %d)\n", retval);
+		retval = -EFAULT;
+	}
+
+free_empty_img:
+	ipahal_free_dma_mem(&mem);
+free_cmd_pyld:
+	for (index = 0; index < num_cmds; index++)
+		ipahal_destroy_imm_cmd(cmd_pyld[index]);
+	kfree(cmd_pyld);
+free_desc:
+	kfree(desc);
+	return retval;
+}
+
+static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip,
+	enum ipa_rule_type rlt)
+{
+	struct ipa3_desc *desc;
+	struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
+	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+	int retval = 0;
+	u32 modem_rt_index_lo;
+	u32 modem_rt_index_hi;
+	u32 lcl_addr_mem_part;
+	u32 lcl_hdr_sz;
+	struct ipa_mem_buffer mem;
+
+	IPADBG("Entry\n");
+
+	if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
+		IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
+		return -EINVAL;
+	}
+
+	if (ip == IPA_IP_v4) {
+		modem_rt_index_lo = IPA_MEM_PART(v4_modem_rt_index_lo);
+		modem_rt_index_hi = IPA_MEM_PART(v4_modem_rt_index_hi);
+		if (rlt == IPA_RULE_HASHABLE) {
+			lcl_addr_mem_part = IPA_MEM_PART(v4_rt_hash_ofst);
+			lcl_hdr_sz =  IPA_MEM_PART(v4_flt_hash_size);
+		} else {
+			lcl_addr_mem_part = IPA_MEM_PART(v4_rt_nhash_ofst);
+			lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
+		}
+	} else {
+		modem_rt_index_lo = IPA_MEM_PART(v6_modem_rt_index_lo);
+		modem_rt_index_hi = IPA_MEM_PART(v6_modem_rt_index_hi);
+		if (rlt == IPA_RULE_HASHABLE) {
+			lcl_addr_mem_part = IPA_MEM_PART(v6_rt_hash_ofst);
+			lcl_hdr_sz =  IPA_MEM_PART(v6_flt_hash_size);
+		} else {
+			lcl_addr_mem_part = IPA_MEM_PART(v6_rt_nhash_ofst);
+			lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
+		}
+	}
+
+	retval = ipahal_rt_generate_empty_img(
+		modem_rt_index_hi - modem_rt_index_lo + 1,
+		lcl_hdr_sz, lcl_hdr_sz, &mem);
+	if (retval) {
+		IPAERR("fail generate empty rt img\n");
+		return -ENOMEM;
+	}
+
+	desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
+	if (!desc) {
+		IPAERR("failed to allocate memory\n");
+		goto free_empty_img;
+	}
+
+	cmd.is_read = false;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	cmd.size = mem.size;
+	cmd.system_addr =  mem.phys_base;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+		lcl_addr_mem_part +
+		modem_rt_index_lo * ipahal_get_hw_tbl_hdr_width();
+	cmd_pyld = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		retval = -ENOMEM;
+		goto free_desc;
+	}
+	desc->opcode =
+		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+	desc->pyld = cmd_pyld->data;
+	desc->len = cmd_pyld->len;
+	desc->type = IPA_IMM_CMD_DESC;
+
+	IPADBG("Sending 1 descriptor for rt tbl clearing\n");
+	retval = ipa3_send_cmd(1, desc);
+	if (retval) {
+		IPAERR("failed to send immediate command (err %d)\n", retval);
+		retval = -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+free_desc:
+	kfree(desc);
+free_empty_img:
+	ipahal_free_dma_mem(&mem);
+	return retval;
+}
+
+static int ipa3_q6_clean_q6_tables(void)
+{
+	struct ipa3_desc *desc;
+	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+	struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
+	int retval;
+	struct ipahal_reg_fltrt_hash_flush flush;
+	struct ipahal_reg_valmask valmask;
+
+	IPADBG("Entry\n");
+
+
+	if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
+		IPAERR("failed to clean q6 flt tbls (v4/hashable)\n");
+		return -EFAULT;
+	}
+	if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
+		IPAERR("failed to clean q6 flt tbls (v6/hashable)\n");
+		return -EFAULT;
+	}
+	if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
+		IPAERR("failed to clean q6 flt tbls (v4/non-hashable)\n");
+		return -EFAULT;
+	}
+	if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
+		IPAERR("failed to clean q6 flt tbls (v6/non-hashable)\n");
+		return -EFAULT;
+	}
+
+	if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
+		IPAERR("failed to clean q6 rt tbls (v4/hashable)\n");
+		return -EFAULT;
+	}
+	if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
+		IPAERR("failed to clean q6 rt tbls (v6/hashable)\n");
+		return -EFAULT;
+	}
+	if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
+		IPAERR("failed to clean q6 rt tbls (v4/non-hashable)\n");
+		return -EFAULT;
+	}
+	if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
+		IPAERR("failed to clean q6 rt tbls (v6/non-hashable)\n");
+		return -EFAULT;
+	}
+
+	/* Flush rules cache */
+	desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
+	if (!desc) {
+		IPAERR("failed to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	flush.v4_flt = true;
+	flush.v4_rt = true;
+	flush.v6_flt = true;
+	flush.v6_rt = true;
+	ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
+	reg_write_cmd.skip_pipeline_clear = false;
+	reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
+	reg_write_cmd.value = valmask.val;
+	reg_write_cmd.value_mask = valmask.mask;
+	cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&reg_write_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("fail construct register_write imm cmd\n");
+		retval = -EFAULT;
+		goto bail_desc;
+	}
+	desc->opcode =
+		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+	desc->pyld = cmd_pyld->data;
+	desc->len = cmd_pyld->len;
+	desc->type = IPA_IMM_CMD_DESC;
+
+	IPADBG("Sending 1 descriptor for tbls flush\n");
+	retval = ipa3_send_cmd(1, desc);
+	if (retval) {
+		IPAERR("failed to send immediate command (err %d)\n", retval);
+		retval = -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+
+bail_desc:
+	kfree(desc);
+	IPADBG("Done - retval = %d\n", retval);
+	return retval;
+}
+
+static int ipa3_q6_set_ex_path_to_apps(void)
+{
+	int ep_idx;
+	int client_idx;
+	struct ipa3_desc *desc;
+	int num_descs = 0;
+	int index;
+	struct ipahal_imm_cmd_register_write reg_write;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	int retval;
+	struct ipahal_reg_valmask valmask;
+
+	desc = kcalloc(ipa3_ctx->ipa_num_pipes, sizeof(struct ipa3_desc),
+			GFP_KERNEL);
+	if (!desc) {
+		IPAERR("failed to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	/* Set the exception path to AP */
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+		ep_idx = ipa3_get_ep_mapping(client_idx);
+		if (ep_idx == -1)
+			continue;
+
+		if (ipa3_ctx->ep[ep_idx].valid &&
+			ipa3_ctx->ep[ep_idx].skip_ep_cfg) {
+			BUG_ON(num_descs >= ipa3_ctx->ipa_num_pipes);
+
+			reg_write.skip_pipeline_clear = false;
+			reg_write.pipeline_clear_options =
+				IPAHAL_HPS_CLEAR;
+			reg_write.offset =
+				ipahal_get_reg_ofst(IPA_ENDP_STATUS_n);
+			ipahal_get_status_ep_valmask(
+				ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS),
+				&valmask);
+			reg_write.value = valmask.val;
+			reg_write.value_mask = valmask.mask;
+			cmd_pyld = ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_REGISTER_WRITE, &reg_write, false);
+			if (!cmd_pyld) {
+				IPAERR("fail construct register_write cmd\n");
+				BUG();
+			}
+
+			desc[num_descs].opcode = ipahal_imm_cmd_get_opcode(
+				IPA_IMM_CMD_REGISTER_WRITE);
+			desc[num_descs].type = IPA_IMM_CMD_DESC;
+			desc[num_descs].callback = ipa3_destroy_imm;
+			desc[num_descs].user1 = cmd_pyld;
+			desc[num_descs].pyld = cmd_pyld->data;
+			desc[num_descs].len = cmd_pyld->len;
+			num_descs++;
+		}
+	}
+
+	/* Will wait 150msecs for IPA tag process completion */
+	retval = ipa3_tag_process(desc, num_descs,
+		msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT));
+	if (retval) {
+		IPAERR("TAG process failed! (error %d)\n", retval);
+		/* For timeout error ipa3_destroy_imm cb will destroy user1 */
+		if (retval != -ETIME) {
+			for (index = 0; index < num_descs; index++)
+				if (desc[index].callback)
+					desc[index].callback(desc[index].user1,
+						desc[index].user2);
+			retval = -EINVAL;
+		}
+	}
+
+	kfree(desc);
+
+	return retval;
+}
+
+/**
+* ipa3_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration
+*                    in IPA HW. This is performed in case of SSR.
+*
+* This is a mandatory procedure, in case one of the steps fails, the
+* AP needs to restart.
+*/
+void ipa3_q6_pre_shutdown_cleanup(void)
+{
+	IPADBG_LOW("ENTER\n");
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	ipa3_q6_pipe_delay(true);
+	ipa3_q6_avoid_holb();
+	if (ipa3_q6_clean_q6_tables()) {
+		IPAERR("Failed to clean Q6 tables\n");
+		BUG();
+	}
+	if (ipa3_q6_set_ex_path_to_apps()) {
+		IPAERR("Failed to redirect exceptions to APPS\n");
+		BUG();
+	}
+	/* Remove delay from Q6 PRODs to avoid pending descriptors
+	  * on pipe reset procedure
+	  */
+	ipa3_q6_pipe_delay(false);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG_LOW("Exit with success\n");
+}
+
+/*
+ * ipa3_q6_post_shutdown_cleanup() - As part of this cleanup
+ * check if GSI channel related to Q6 producer client is empty.
+ *
+ * Q6 GSI channel emptiness is needed to garantee no descriptors with invalid
+ *  info are injected into IPA RX from IPA_IF, while modem is restarting.
+ */
+void ipa3_q6_post_shutdown_cleanup(void)
+{
+	int client_idx;
+
+	IPADBG_LOW("ENTER\n");
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	if (!ipa3_ctx->uc_ctx.uc_loaded) {
+		IPAERR("uC is not loaded. Skipping\n");
+		return;
+	}
+
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
+		if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
+			if (ipa3_uc_is_gsi_channel_empty(client_idx)) {
+				IPAERR("fail to validate Q6 ch emptiness %d\n",
+					client_idx);
+				BUG();
+				return;
+			}
+		}
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG_LOW("Exit with success\n");
+}
+
+static inline void ipa3_sram_set_canary(u32 *sram_mmio, int offset)
+{
+	/* Set 4 bytes of CANARY before the offset */
+	sram_mmio[(offset - 4) / 4] = IPA_MEM_CANARY_VAL;
+}
+
+/**
+ * _ipa_init_sram_v3_0() - Initialize IPA local SRAM.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_sram_v3_0(void)
+{
+	u32 *ipa_sram_mmio;
+	unsigned long phys_addr;
+
+	phys_addr = ipa3_ctx->ipa_wrapper_base +
+		ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
+			ipa3_ctx->smem_restricted_bytes / 4);
+
+	ipa_sram_mmio = ioremap(phys_addr, ipa3_ctx->smem_sz);
+	if (!ipa_sram_mmio) {
+		IPAERR("fail to ioremap IPA SRAM\n");
+		return -ENOMEM;
+	}
+
+	/* Consult with ipa_i.h on the location of the CANARY values */
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio,
+		IPA_MEM_PART(v4_flt_nhash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_nhash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio,
+		IPA_MEM_PART(v6_flt_nhash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_nhash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio,
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio,
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(end_ofst));
+
+	iounmap(ipa_sram_mmio);
+
+	return 0;
+}
+
+/**
+ * _ipa_init_hdr_v3_0() - Initialize IPA header block.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_hdr_v3_0(void)
+{
+	struct ipa3_desc desc = { 0 };
+	struct ipa_mem_buffer mem;
+	struct ipahal_imm_cmd_hdr_init_local cmd = {0};
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_imm_cmd_dma_shared_mem dma_cmd = { 0 };
+
+	mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+	memset(mem.base, 0, mem.size);
+
+	cmd.hdr_table_addr = mem.phys_base;
+	cmd.size_hdr_table = mem.size;
+	cmd.hdr_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(modem_hdr_ofst);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_HDR_INIT_LOCAL, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("fail to construct hdr_init_local imm cmd\n");
+		dma_free_coherent(ipa3_ctx->pdev,
+			mem.size, mem.base,
+			mem.phys_base);
+		return -EFAULT;
+	}
+	desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_HDR_INIT_LOCAL);
+	desc.type = IPA_IMM_CMD_DESC;
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa3_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		ipahal_destroy_imm_cmd(cmd_pyld);
+		dma_free_coherent(ipa3_ctx->pdev,
+			mem.size, mem.base,
+			mem.phys_base);
+		return -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+	mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) +
+		IPA_MEM_PART(apps_hdr_proc_ctx_size);
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+	memset(mem.base, 0, mem.size);
+	memset(&desc, 0, sizeof(desc));
+
+	dma_cmd.is_read = false;
+	dma_cmd.skip_pipeline_clear = false;
+	dma_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	dma_cmd.system_addr = mem.phys_base;
+	dma_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst);
+	dma_cmd.size = mem.size;
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &dma_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("fail to construct dma_shared_mem imm\n");
+		dma_free_coherent(ipa3_ctx->pdev,
+			mem.size, mem.base,
+			mem.phys_base);
+		return -EFAULT;
+	}
+	desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	desc.type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa3_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		ipahal_destroy_imm_cmd(cmd_pyld);
+		dma_free_coherent(ipa3_ctx->pdev,
+			mem.size,
+			mem.base,
+			mem.phys_base);
+		return -EFAULT;
+	}
+	ipahal_destroy_imm_cmd(cmd_pyld);
+
+	ipahal_write_reg(IPA_LOCAL_PKT_PROC_CNTXT_BASE, dma_cmd.local_addr);
+
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+	return 0;
+}
+
+/**
+ * _ipa_init_rt4_v3() - Initialize IPA routing block for IPv4.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_rt4_v3(void)
+{
+	struct ipa3_desc desc = { 0 };
+	struct ipa_mem_buffer mem;
+	struct ipahal_imm_cmd_ip_v4_routing_init v4_cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	int i;
+	int rc = 0;
+
+	for (i = IPA_MEM_PART(v4_modem_rt_index_lo);
+		i <= IPA_MEM_PART(v4_modem_rt_index_hi);
+		i++)
+		ipa3_ctx->rt_idx_bitmap[IPA_IP_v4] |= (1 << i);
+	IPADBG("v4 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v4]);
+
+	rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v4_rt_num_index),
+		IPA_MEM_PART(v4_rt_hash_size), IPA_MEM_PART(v4_rt_nhash_size),
+		&mem);
+	if (rc) {
+		IPAERR("fail generate empty v4 rt img\n");
+		return rc;
+	}
+
+	v4_cmd.hash_rules_addr = mem.phys_base;
+	v4_cmd.hash_rules_size = mem.size;
+	v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v4_rt_hash_ofst);
+	v4_cmd.nhash_rules_addr = mem.phys_base;
+	v4_cmd.nhash_rules_size = mem.size;
+	v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v4_rt_nhash_ofst);
+	IPADBG("putting hashable routing IPv4 rules to phys 0x%x\n",
+				v4_cmd.hash_local_addr);
+	IPADBG("putting non-hashable routing IPv4 rules to phys 0x%x\n",
+				v4_cmd.nhash_local_addr);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_V4_ROUTING_INIT, &v4_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("fail construct ip_v4_rt_init imm cmd\n");
+		rc = -EPERM;
+		goto free_mem;
+	}
+
+	desc.opcode =
+		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_ROUTING_INIT);
+	desc.type = IPA_IMM_CMD_DESC;
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa3_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+
+free_mem:
+	ipahal_free_dma_mem(&mem);
+	return rc;
+}
+
+/**
+ * _ipa_init_rt6_v3() - Initialize IPA routing block for IPv6.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_rt6_v3(void)
+{
+	struct ipa3_desc desc = { 0 };
+	struct ipa_mem_buffer mem;
+	struct ipahal_imm_cmd_ip_v6_routing_init v6_cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	int i;
+	int rc = 0;
+
+	for (i = IPA_MEM_PART(v6_modem_rt_index_lo);
+		i <= IPA_MEM_PART(v6_modem_rt_index_hi);
+		i++)
+		ipa3_ctx->rt_idx_bitmap[IPA_IP_v6] |= (1 << i);
+	IPADBG("v6 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v6]);
+
+	rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v6_rt_num_index),
+		IPA_MEM_PART(v6_rt_hash_size), IPA_MEM_PART(v6_rt_nhash_size),
+		&mem);
+	if (rc) {
+		IPAERR("fail generate empty v6 rt img\n");
+		return rc;
+	}
+
+	v6_cmd.hash_rules_addr = mem.phys_base;
+	v6_cmd.hash_rules_size = mem.size;
+	v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v6_rt_hash_ofst);
+	v6_cmd.nhash_rules_addr = mem.phys_base;
+	v6_cmd.nhash_rules_size = mem.size;
+	v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v6_rt_nhash_ofst);
+	IPADBG("putting hashable routing IPv6 rules to phys 0x%x\n",
+				v6_cmd.hash_local_addr);
+	IPADBG("putting non-hashable routing IPv6 rules to phys 0x%x\n",
+				v6_cmd.nhash_local_addr);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_V6_ROUTING_INIT, &v6_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("fail construct ip_v6_rt_init imm cmd\n");
+		rc = -EPERM;
+		goto free_mem;
+	}
+
+	desc.opcode =
+		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V6_ROUTING_INIT);
+	desc.type = IPA_IMM_CMD_DESC;
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa3_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+
+free_mem:
+	ipahal_free_dma_mem(&mem);
+	return rc;
+}
+
+/**
+ * _ipa_init_flt4_v3() - Initialize IPA filtering block for IPv4.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_flt4_v3(void)
+{
+	struct ipa3_desc desc = { 0 };
+	struct ipa_mem_buffer mem;
+	struct ipahal_imm_cmd_ip_v4_filter_init v4_cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	int rc;
+
+	rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
+		IPA_MEM_PART(v4_flt_hash_size),
+		IPA_MEM_PART(v4_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
+		&mem);
+	if (rc) {
+		IPAERR("fail generate empty v4 flt img\n");
+		return rc;
+	}
+
+	v4_cmd.hash_rules_addr = mem.phys_base;
+	v4_cmd.hash_rules_size = mem.size;
+	v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v4_flt_hash_ofst);
+	v4_cmd.nhash_rules_addr = mem.phys_base;
+	v4_cmd.nhash_rules_size = mem.size;
+	v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v4_flt_nhash_ofst);
+	IPADBG("putting hashable filtering IPv4 rules to phys 0x%x\n",
+				v4_cmd.hash_local_addr);
+	IPADBG("putting non-hashable filtering IPv4 rules to phys 0x%x\n",
+				v4_cmd.nhash_local_addr);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_V4_FILTER_INIT, &v4_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("fail construct ip_v4_flt_init imm cmd\n");
+		rc = -EPERM;
+		goto free_mem;
+	}
+
+	desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_FILTER_INIT);
+	desc.type = IPA_IMM_CMD_DESC;
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa3_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+
+free_mem:
+	ipahal_free_dma_mem(&mem);
+	return rc;
+}
+
+/**
+ * _ipa_init_flt6_v3() - Initialize IPA filtering block for IPv6.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_flt6_v3(void)
+{
+	struct ipa3_desc desc = { 0 };
+	struct ipa_mem_buffer mem;
+	struct ipahal_imm_cmd_ip_v6_filter_init v6_cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	int rc;
+
+	rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
+		IPA_MEM_PART(v6_flt_hash_size),
+		IPA_MEM_PART(v6_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
+		&mem);
+	if (rc) {
+		IPAERR("fail generate empty v6 flt img\n");
+		return rc;
+	}
+
+	v6_cmd.hash_rules_addr = mem.phys_base;
+	v6_cmd.hash_rules_size = mem.size;
+	v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v6_flt_hash_ofst);
+	v6_cmd.nhash_rules_addr = mem.phys_base;
+	v6_cmd.nhash_rules_size = mem.size;
+	v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v6_flt_nhash_ofst);
+	IPADBG("putting hashable filtering IPv6 rules to phys 0x%x\n",
+				v6_cmd.hash_local_addr);
+	IPADBG("putting non-hashable filtering IPv6 rules to phys 0x%x\n",
+				v6_cmd.nhash_local_addr);
+
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_V6_FILTER_INIT, &v6_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("fail construct ip_v6_flt_init imm cmd\n");
+		rc = -EPERM;
+		goto free_mem;
+	}
+
+	desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V6_FILTER_INIT);
+	desc.type = IPA_IMM_CMD_DESC;
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa3_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+
+free_mem:
+	ipahal_free_dma_mem(&mem);
+	return rc;
+}
+
+static int ipa3_setup_flt_hash_tuple(void)
+{
+	int pipe_idx;
+	struct ipahal_reg_hash_tuple tuple;
+
+	memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
+
+	for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes ; pipe_idx++) {
+		if (!ipa_is_ep_support_flt(pipe_idx))
+			continue;
+
+		if (ipa_is_modem_pipe(pipe_idx))
+			continue;
+
+		if (ipa3_set_flt_tuple_mask(pipe_idx, &tuple)) {
+			IPAERR("failed to setup pipe %d flt tuple\n", pipe_idx);
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+static int ipa3_setup_rt_hash_tuple(void)
+{
+	int tbl_idx;
+	struct ipahal_reg_hash_tuple tuple;
+
+	memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
+
+	for (tbl_idx = 0;
+		tbl_idx < max(IPA_MEM_PART(v6_rt_num_index),
+		IPA_MEM_PART(v4_rt_num_index));
+		tbl_idx++) {
+
+		if (tbl_idx >= IPA_MEM_PART(v4_modem_rt_index_lo) &&
+			tbl_idx <= IPA_MEM_PART(v4_modem_rt_index_hi))
+			continue;
+
+		if (tbl_idx >= IPA_MEM_PART(v6_modem_rt_index_lo) &&
+			tbl_idx <= IPA_MEM_PART(v6_modem_rt_index_hi))
+			continue;
+
+		if (ipa3_set_rt_tuple_mask(tbl_idx, &tuple)) {
+			IPAERR("failed to setup tbl %d rt tuple\n", tbl_idx);
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+static int ipa3_setup_apps_pipes(void)
+{
+	struct ipa_sys_connect_params sys_in;
+	int result = 0;
+
+	if (ipa3_ctx->gsi_ch20_wa) {
+		IPADBG("Allocating GSI physical channel 20\n");
+		result = ipa_gsi_ch20_wa();
+		if (result) {
+			IPAERR("ipa_gsi_ch20_wa failed %d\n", result);
+			goto fail_cmd;
+		}
+	}
+
+	/* CMD OUT (AP->IPA) */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_APPS_CMD_PROD;
+	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+	sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS;
+	if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_cmd)) {
+		IPAERR(":setup sys pipe failed.\n");
+		result = -EPERM;
+		goto fail_cmd;
+	}
+	IPADBG("Apps to IPA cmd pipe is connected\n");
+
+	ipa3_ctx->ctrl->ipa_init_sram();
+	IPADBG("SRAM initialized\n");
+
+	ipa3_ctx->ctrl->ipa_init_hdr();
+	IPADBG("HDR initialized\n");
+
+	ipa3_ctx->ctrl->ipa_init_rt4();
+	IPADBG("V4 RT initialized\n");
+
+	ipa3_ctx->ctrl->ipa_init_rt6();
+	IPADBG("V6 RT initialized\n");
+
+	ipa3_ctx->ctrl->ipa_init_flt4();
+	IPADBG("V4 FLT initialized\n");
+
+	ipa3_ctx->ctrl->ipa_init_flt6();
+	IPADBG("V6 FLT initialized\n");
+
+	if (ipa3_setup_flt_hash_tuple()) {
+		IPAERR(":fail to configure flt hash tuple\n");
+		result = -EPERM;
+		goto fail_schedule_delayed_work;
+	}
+	IPADBG("flt hash tuple is configured\n");
+
+	if (ipa3_setup_rt_hash_tuple()) {
+		IPAERR(":fail to configure rt hash tuple\n");
+		result = -EPERM;
+		goto fail_schedule_delayed_work;
+	}
+	IPADBG("rt hash tuple is configured\n");
+
+	if (ipa3_setup_exception_path()) {
+		IPAERR(":fail to setup excp path\n");
+		result = -EPERM;
+		goto fail_schedule_delayed_work;
+	}
+	IPADBG("Exception path was successfully set");
+
+	if (ipa3_setup_dflt_rt_tables()) {
+		IPAERR(":fail to setup dflt routes\n");
+		result = -EPERM;
+		goto fail_schedule_delayed_work;
+	}
+	IPADBG("default routing was set\n");
+
+	/* LAN IN (IPA->A5) */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_APPS_LAN_CONS;
+	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	sys_in.notify = ipa3_lan_rx_cb;
+	sys_in.priv = NULL;
+	sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
+	sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
+	sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
+	sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
+	sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
+	sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
+	sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
+	sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_ENABLE_CS_OFFLOAD_DL;
+
+	/**
+	 * ipa_lan_rx_cb() intended to notify the source EP about packet
+	 * being received on the LAN_CONS via calling the source EP call-back.
+	 * There could be a race condition with calling this call-back. Other
+	 * thread may nullify it - e.g. on EP disconnect.
+	 * This lock intended to protect the access to the source EP call-back
+	 */
+	spin_lock_init(&ipa3_ctx->disconnect_lock);
+	if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
+		IPAERR(":setup sys pipe failed.\n");
+		result = -EPERM;
+		goto fail_schedule_delayed_work;
+	}
+
+	/* LAN-WAN OUT (AP->IPA) */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_APPS_LAN_WAN_PROD;
+	sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+	if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_out)) {
+		IPAERR(":setup sys pipe failed.\n");
+		result = -EPERM;
+		goto fail_data_out;
+	}
+
+	return 0;
+
+fail_data_out:
+	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
+fail_schedule_delayed_work:
+	if (ipa3_ctx->dflt_v6_rt_rule_hdl)
+		__ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
+	if (ipa3_ctx->dflt_v4_rt_rule_hdl)
+		__ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
+	if (ipa3_ctx->excp_hdr_hdl)
+		__ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl);
+	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
+fail_cmd:
+	return result;
+}
+
+static void ipa3_teardown_apps_pipes(void)
+{
+	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out);
+	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
+	__ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
+	__ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
+	__ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl);
+	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
+}
+
+#ifdef CONFIG_COMPAT
+long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	int retval = 0;
+	struct ipa3_ioc_nat_alloc_mem32 nat_mem32;
+	struct ipa_ioc_nat_alloc_mem nat_mem;
+
+	switch (cmd) {
+	case IPA_IOC_ADD_HDR32:
+		cmd = IPA_IOC_ADD_HDR;
+		break;
+	case IPA_IOC_DEL_HDR32:
+		cmd = IPA_IOC_DEL_HDR;
+		break;
+	case IPA_IOC_ADD_RT_RULE32:
+		cmd = IPA_IOC_ADD_RT_RULE;
+		break;
+	case IPA_IOC_DEL_RT_RULE32:
+		cmd = IPA_IOC_DEL_RT_RULE;
+		break;
+	case IPA_IOC_ADD_FLT_RULE32:
+		cmd = IPA_IOC_ADD_FLT_RULE;
+		break;
+	case IPA_IOC_DEL_FLT_RULE32:
+		cmd = IPA_IOC_DEL_FLT_RULE;
+		break;
+	case IPA_IOC_GET_RT_TBL32:
+		cmd = IPA_IOC_GET_RT_TBL;
+		break;
+	case IPA_IOC_COPY_HDR32:
+		cmd = IPA_IOC_COPY_HDR;
+		break;
+	case IPA_IOC_QUERY_INTF32:
+		cmd = IPA_IOC_QUERY_INTF;
+		break;
+	case IPA_IOC_QUERY_INTF_TX_PROPS32:
+		cmd = IPA_IOC_QUERY_INTF_TX_PROPS;
+		break;
+	case IPA_IOC_QUERY_INTF_RX_PROPS32:
+		cmd = IPA_IOC_QUERY_INTF_RX_PROPS;
+		break;
+	case IPA_IOC_QUERY_INTF_EXT_PROPS32:
+		cmd = IPA_IOC_QUERY_INTF_EXT_PROPS;
+		break;
+	case IPA_IOC_GET_HDR32:
+		cmd = IPA_IOC_GET_HDR;
+		break;
+	case IPA_IOC_ALLOC_NAT_MEM32:
+		if (copy_from_user((u8 *)&nat_mem32, (u8 *)arg,
+			sizeof(struct ipa3_ioc_nat_alloc_mem32))) {
+			retval = -EFAULT;
+			goto ret;
+		}
+		memcpy(nat_mem.dev_name, nat_mem32.dev_name,
+				IPA_RESOURCE_NAME_MAX);
+		nat_mem.size = (size_t)nat_mem32.size;
+		nat_mem.offset = (off_t)nat_mem32.offset;
+
+		/* null terminate the string */
+		nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
+
+		if (ipa3_allocate_nat_device(&nat_mem)) {
+			retval = -EFAULT;
+			goto ret;
+		}
+		nat_mem32.offset = (compat_off_t)nat_mem.offset;
+		if (copy_to_user((u8 *)arg, (u8 *)&nat_mem32,
+			sizeof(struct ipa3_ioc_nat_alloc_mem32))) {
+			retval = -EFAULT;
+		}
+ret:
+		return retval;
+	case IPA_IOC_V4_INIT_NAT32:
+		cmd = IPA_IOC_V4_INIT_NAT;
+		break;
+	case IPA_IOC_NAT_DMA32:
+		cmd = IPA_IOC_NAT_DMA;
+		break;
+	case IPA_IOC_V4_DEL_NAT32:
+		cmd = IPA_IOC_V4_DEL_NAT;
+		break;
+	case IPA_IOC_GET_NAT_OFFSET32:
+		cmd = IPA_IOC_GET_NAT_OFFSET;
+		break;
+	case IPA_IOC_PULL_MSG32:
+		cmd = IPA_IOC_PULL_MSG;
+		break;
+	case IPA_IOC_RM_ADD_DEPENDENCY32:
+		cmd = IPA_IOC_RM_ADD_DEPENDENCY;
+		break;
+	case IPA_IOC_RM_DEL_DEPENDENCY32:
+		cmd = IPA_IOC_RM_DEL_DEPENDENCY;
+		break;
+	case IPA_IOC_GENERATE_FLT_EQ32:
+		cmd = IPA_IOC_GENERATE_FLT_EQ;
+		break;
+	case IPA_IOC_QUERY_RT_TBL_INDEX32:
+		cmd = IPA_IOC_QUERY_RT_TBL_INDEX;
+		break;
+	case IPA_IOC_WRITE_QMAPID32:
+		cmd = IPA_IOC_WRITE_QMAPID;
+		break;
+	case IPA_IOC_MDFY_FLT_RULE32:
+		cmd = IPA_IOC_MDFY_FLT_RULE;
+		break;
+	case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32:
+		cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD;
+		break;
+	case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32:
+		cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL;
+		break;
+	case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32:
+		cmd = IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED;
+		break;
+	case IPA_IOC_MDFY_RT_RULE32:
+		cmd = IPA_IOC_MDFY_RT_RULE;
+		break;
+	case IPA_IOC_COMMIT_HDR:
+	case IPA_IOC_RESET_HDR:
+	case IPA_IOC_COMMIT_RT:
+	case IPA_IOC_RESET_RT:
+	case IPA_IOC_COMMIT_FLT:
+	case IPA_IOC_RESET_FLT:
+	case IPA_IOC_DUMP:
+	case IPA_IOC_PUT_RT_TBL:
+	case IPA_IOC_PUT_HDR:
+	case IPA_IOC_SET_FLT:
+	case IPA_IOC_QUERY_EP_MAPPING:
+		break;
+	default:
+		return -ENOIOCTLCMD;
+	}
+	return ipa3_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
+static ssize_t ipa3_write(struct file *file, const char __user *buf,
+			  size_t count, loff_t *ppos);
+
+static const struct file_operations ipa3_drv_fops = {
+	.owner = THIS_MODULE,
+	.open = ipa3_open,
+	.read = ipa3_read,
+	.write = ipa3_write,
+	.unlocked_ioctl = ipa3_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = compat_ipa3_ioctl,
+#endif
+};
+
+static int ipa3_get_clks(struct device *dev)
+{
+	ipa3_clk = clk_get(dev, "core_clk");
+	if (IS_ERR(ipa3_clk)) {
+		if (ipa3_clk != ERR_PTR(-EPROBE_DEFER))
+			IPAERR("fail to get ipa clk\n");
+		return PTR_ERR(ipa3_clk);
+	}
+	return 0;
+}
+
+/**
+ * _ipa_enable_clks_v3_0() - Enable IPA clocks.
+ */
+void _ipa_enable_clks_v3_0(void)
+{
+	IPADBG_LOW("enabling gcc_ipa_clk\n");
+	if (ipa3_clk) {
+		clk_prepare(ipa3_clk);
+		clk_enable(ipa3_clk);
+		IPADBG_LOW("curr_ipa_clk_rate=%d", ipa3_ctx->curr_ipa_clk_rate);
+		clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
+		ipa3_uc_notify_clk_state(true);
+	} else {
+		WARN_ON(1);
+	}
+
+	ipa3_suspend_apps_pipes(false);
+}
+
+static unsigned int ipa3_get_bus_vote(void)
+{
+	unsigned int idx = 1;
+
+	if (ipa3_ctx->curr_ipa_clk_rate == ipa3_ctx->ctrl->ipa_clk_rate_svs) {
+		idx = 1;
+	} else if (ipa3_ctx->curr_ipa_clk_rate ==
+			ipa3_ctx->ctrl->ipa_clk_rate_nominal) {
+		if (ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases <= 2)
+			idx = 1;
+		else
+			idx = 2;
+	} else if (ipa3_ctx->curr_ipa_clk_rate ==
+			ipa3_ctx->ctrl->ipa_clk_rate_turbo) {
+		idx = ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases - 1;
+	} else {
+		WARN_ON(1);
+	}
+
+	IPADBG("curr %d idx %d\n", ipa3_ctx->curr_ipa_clk_rate, idx);
+
+	return idx;
+}
+
+/**
+* ipa3_enable_clks() - Turn on IPA clocks
+*
+* Return codes:
+* None
+*/
+void ipa3_enable_clks(void)
+{
+	IPADBG("enabling IPA clocks and bus voting\n");
+
+	ipa3_ctx->ctrl->ipa3_enable_clks();
+
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL)
+		if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
+		    ipa3_get_bus_vote()))
+			WARN_ON(1);
+}
+
+
+/**
+ * _ipa_disable_clks_v3_0() - Disable IPA clocks.
+ */
+void _ipa_disable_clks_v3_0(void)
+{
+	IPADBG_LOW("disabling gcc_ipa_clk\n");
+	ipa3_suspend_apps_pipes(true);
+	ipa3_uc_notify_clk_state(false);
+	if (ipa3_clk)
+		clk_disable_unprepare(ipa3_clk);
+	else
+		WARN_ON(1);
+}
+
+/**
+* ipa3_disable_clks() - Turn off IPA clocks
+*
+* Return codes:
+* None
+*/
+void ipa3_disable_clks(void)
+{
+	IPADBG("disabling IPA clocks and bus voting\n");
+
+	ipa3_ctx->ctrl->ipa3_disable_clks();
+
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL)
+		if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
+		    0))
+			WARN_ON(1);
+}
+
+/**
+ * ipa3_start_tag_process() - Send TAG packet and wait for it to come back
+ *
+ * This function is called prior to clock gating when active client counter
+ * is 1. TAG process ensures that there are no packets inside IPA HW that
+ * were not submitted to peer's BAM. During TAG process all aggregation frames
+ * are (force) closed.
+ *
+ * Return codes:
+ * None
+ */
+static void ipa3_start_tag_process(struct work_struct *work)
+{
+	int res;
+
+	IPADBG("starting TAG process\n");
+	/* close aggregation frames on all pipes */
+	res = ipa3_tag_aggr_force_close(-1);
+	if (res)
+		IPAERR("ipa3_tag_aggr_force_close failed %d\n", res);
+	IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS");
+
+	IPADBG("TAG process done\n");
+}
+
+/**
+* ipa3_active_clients_log_mod() - Log a modification in the active clients
+* reference count
+*
+* This method logs any modification in the active clients reference count:
+* It logs the modification in the circular history buffer
+* It logs the modification in the hash table - looking for an entry,
+* creating one if needed and deleting one if needed.
+*
+* @id: ipa3_active client logging info struct to hold the log information
+* @inc: a boolean variable to indicate whether the modification is an increase
+* or decrease
+* @int_ctx: a boolean variable to indicate whether this call is being made from
+* an interrupt context and therefore should allocate GFP_ATOMIC memory
+*
+* Method process:
+* - Hash the unique identifier string
+* - Find the hash in the table
+*    1)If found, increase or decrease the reference count
+*    2)If not found, allocate a new hash table entry struct and initialize it
+* - Remove and deallocate unneeded data structure
+* - Log the call in the circular history buffer (unless it is a simple call)
+*/
+void ipa3_active_clients_log_mod(struct ipa_active_client_logging_info *id,
+		bool inc, bool int_ctx)
+{
+	char temp_str[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN];
+	unsigned long long t;
+	unsigned long nanosec_rem;
+	struct ipa3_active_client_htable_entry *hentry;
+	struct ipa3_active_client_htable_entry *hfound;
+	u32 hkey;
+	char str_to_hash[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
+
+	hfound = NULL;
+	memset(str_to_hash, 0, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
+	strlcpy(str_to_hash, id->id_string, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
+	hkey = arch_fast_hash(str_to_hash, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN,
+			0);
+	hash_for_each_possible(ipa3_ctx->ipa3_active_clients_logging.htable,
+			hentry, list, hkey) {
+		if (!strcmp(hentry->id_string, id->id_string)) {
+			hentry->count = hentry->count + (inc ? 1 : -1);
+			hfound = hentry;
+		}
+	}
+	if (hfound == NULL) {
+		hentry = NULL;
+		hentry = kzalloc(sizeof(
+				struct ipa3_active_client_htable_entry),
+				int_ctx ? GFP_ATOMIC : GFP_KERNEL);
+		if (hentry == NULL) {
+			IPAERR("failed allocating active clients hash entry");
+			return;
+		}
+		hentry->type = id->type;
+		strlcpy(hentry->id_string, id->id_string,
+				IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
+		INIT_HLIST_NODE(&hentry->list);
+		hentry->count = inc ? 1 : -1;
+		hash_add(ipa3_ctx->ipa3_active_clients_logging.htable,
+				&hentry->list, hkey);
+	} else if (hfound->count == 0) {
+		hash_del(&hfound->list);
+		kfree(hfound);
+	}
+
+	if (id->type != SIMPLE) {
+		t = local_clock();
+		nanosec_rem = do_div(t, 1000000000) / 1000;
+		snprintf(temp_str, IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN,
+				inc ? "[%5lu.%06lu] ^ %s, %s: %d" :
+						"[%5lu.%06lu] v %s, %s: %d",
+				(unsigned long)t, nanosec_rem,
+				id->id_string, id->file, id->line);
+		ipa3_active_clients_log_insert(temp_str);
+	}
+}
+
+void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
+		bool int_ctx)
+{
+	ipa3_active_clients_log_mod(id, false, int_ctx);
+}
+
+void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id,
+		bool int_ctx)
+{
+	ipa3_active_clients_log_mod(id, true, int_ctx);
+}
+
+/**
+* ipa3_inc_client_enable_clks() - Increase active clients counter, and
+* enable ipa clocks if necessary
+*
+* Return codes:
+* None
+*/
+void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
+{
+	ipa3_active_clients_lock();
+	ipa3_active_clients_log_inc(id, false);
+	ipa3_ctx->ipa3_active_clients.cnt++;
+	if (ipa3_ctx->ipa3_active_clients.cnt == 1)
+		ipa3_enable_clks();
+	IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
+	ipa3_active_clients_unlock();
+}
+
+/**
+* ipa3_inc_client_enable_clks_no_block() - Only increment the number of active
+* clients if no asynchronous actions should be done. Asynchronous actions are
+* locking a mutex and waking up IPA HW.
+*
+* Return codes: 0 for success
+*		-EPERM if an asynchronous action should have been done
+*/
+int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
+		*id)
+{
+	int res = 0;
+	unsigned long flags;
+
+	if (ipa3_active_clients_trylock(&flags) == 0)
+		return -EPERM;
+
+	if (ipa3_ctx->ipa3_active_clients.cnt == 0) {
+		res = -EPERM;
+		goto bail;
+	}
+	ipa3_active_clients_log_inc(id, true);
+	ipa3_ctx->ipa3_active_clients.cnt++;
+	IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
+bail:
+	ipa3_active_clients_trylock_unlock(&flags);
+
+	return res;
+}
+
+/**
+ * ipa3_dec_client_disable_clks() - Decrease active clients counter
+ *
+ * In case that there are no active clients this function also starts
+ * TAG process. When TAG progress ends ipa clocks will be gated.
+ * start_tag_process_again flag is set during this function to signal TAG
+ * process to start again as there was another client that may send data to ipa
+ *
+ * Return codes:
+ * None
+ */
+void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
+{
+	struct ipa_active_client_logging_info log_info;
+
+	ipa3_active_clients_lock();
+	ipa3_active_clients_log_dec(id, false);
+	ipa3_ctx->ipa3_active_clients.cnt--;
+	IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
+	if (ipa3_ctx->ipa3_active_clients.cnt == 0) {
+		if (ipa3_ctx->tag_process_before_gating) {
+			ipa3_ctx->tag_process_before_gating = false;
+			/*
+			 * When TAG process ends, active clients will be
+			 * decreased
+			 */
+			IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info,
+					"TAG_PROCESS");
+			ipa3_active_clients_log_inc(&log_info, false);
+			ipa3_ctx->ipa3_active_clients.cnt = 1;
+			queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_tag_work);
+		} else {
+			ipa3_disable_clks();
+		}
+	}
+	ipa3_active_clients_unlock();
+}
+
+/**
+* ipa3_inc_acquire_wakelock() - Increase active clients counter, and
+* acquire wakelock if necessary
+*
+* Return codes:
+* None
+*/
+void ipa3_inc_acquire_wakelock(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
+	ipa3_ctx->wakelock_ref_cnt.cnt++;
+	if (ipa3_ctx->wakelock_ref_cnt.cnt == 1)
+		__pm_stay_awake(&ipa3_ctx->w_lock);
+	IPADBG_LOW("active wakelock ref cnt = %d\n",
+		ipa3_ctx->wakelock_ref_cnt.cnt);
+	spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
+}
+
+/**
+ * ipa3_dec_release_wakelock() - Decrease active clients counter
+ *
+ * In case if the ref count is 0, release the wakelock.
+ *
+ * Return codes:
+ * None
+ */
+void ipa3_dec_release_wakelock(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
+	ipa3_ctx->wakelock_ref_cnt.cnt--;
+	IPADBG_LOW("active wakelock ref cnt = %d\n",
+		ipa3_ctx->wakelock_ref_cnt.cnt);
+	if (ipa3_ctx->wakelock_ref_cnt.cnt == 0)
+		__pm_relax(&ipa3_ctx->w_lock);
+	spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
+}
+
+int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+				  u32 bandwidth_mbps)
+{
+	enum ipa_voltage_level needed_voltage;
+	u32 clk_rate;
+
+	IPADBG_LOW("floor_voltage=%d, bandwidth_mbps=%u",
+					floor_voltage, bandwidth_mbps);
+
+	if (floor_voltage < IPA_VOLTAGE_UNSPECIFIED ||
+		floor_voltage >= IPA_VOLTAGE_MAX) {
+		IPAERR("bad voltage\n");
+		return -EINVAL;
+	}
+
+	if (ipa3_ctx->enable_clock_scaling) {
+		IPADBG_LOW("Clock scaling is enabled\n");
+		if (bandwidth_mbps >=
+			ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo)
+			needed_voltage = IPA_VOLTAGE_TURBO;
+		else if (bandwidth_mbps >=
+			ipa3_ctx->ctrl->clock_scaling_bw_threshold_nominal)
+			needed_voltage = IPA_VOLTAGE_NOMINAL;
+		else
+			needed_voltage = IPA_VOLTAGE_SVS;
+	} else {
+		IPADBG_LOW("Clock scaling is disabled\n");
+		needed_voltage = IPA_VOLTAGE_NOMINAL;
+	}
+
+	needed_voltage = max(needed_voltage, floor_voltage);
+	switch (needed_voltage) {
+	case IPA_VOLTAGE_SVS:
+		clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs;
+		break;
+	case IPA_VOLTAGE_NOMINAL:
+		clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal;
+		break;
+	case IPA_VOLTAGE_TURBO:
+		clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
+		break;
+	default:
+		IPAERR("bad voltage\n");
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) {
+		IPADBG_LOW("Same voltage\n");
+		return 0;
+	}
+
+	ipa3_active_clients_lock();
+	ipa3_ctx->curr_ipa_clk_rate = clk_rate;
+	IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
+	if (ipa3_ctx->ipa3_active_clients.cnt > 0) {
+		clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
+		if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL)
+			if (msm_bus_scale_client_update_request(
+			    ipa3_ctx->ipa_bus_hdl, ipa3_get_bus_vote()))
+				WARN_ON(1);
+	} else {
+		IPADBG_LOW("clocks are gated, not setting rate\n");
+	}
+	ipa3_active_clients_unlock();
+	IPADBG_LOW("Done\n");
+	return 0;
+}
+
+static void ipa3_sps_process_irq_schedule_rel(void)
+{
+	queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq,
+		&ipa3_sps_release_resource_work,
+		msecs_to_jiffies(IPA_TRANSPORT_PROD_TIMEOUT_MSEC));
+}
+
+/**
+* ipa3_suspend_handler() - Handles the suspend interrupt:
+* wakes up the suspended peripheral by requesting its consumer
+* @interrupt:		Interrupt type
+* @private_data:	The client's private data
+* @interrupt_data:	Interrupt specific information data
+*/
+void ipa3_suspend_handler(enum ipa_irq_type interrupt,
+				void *private_data,
+				void *interrupt_data)
+{
+	enum ipa_rm_resource_name resource;
+	u32 suspend_data =
+		((struct ipa_tx_suspend_irq_data *)interrupt_data)->endpoints;
+	u32 bmsk = 1;
+	u32 i = 0;
+	int res;
+	struct ipa_ep_cfg_holb holb_cfg;
+
+	IPADBG("interrupt=%d, interrupt_data=%u\n",
+		interrupt, suspend_data);
+	memset(&holb_cfg, 0, sizeof(holb_cfg));
+	holb_cfg.tmr_val = 0;
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if ((suspend_data & bmsk) && (ipa3_ctx->ep[i].valid)) {
+			if (IPA_CLIENT_IS_APPS_CONS(ipa3_ctx->ep[i].client)) {
+				/*
+				 * pipe will be unsuspended as part of
+				 * enabling IPA clocks
+				 */
+				if (!atomic_read(
+					&ipa3_ctx->transport_pm.dec_clients)
+					) {
+					IPA_ACTIVE_CLIENTS_INC_EP(
+						ipa3_ctx->ep[i].client);
+					IPADBG_LOW("Pipes un-suspended.\n");
+					IPADBG_LOW("Enter poll mode.\n");
+					atomic_set(
+					&ipa3_ctx->transport_pm.dec_clients,
+					1);
+					ipa3_sps_process_irq_schedule_rel();
+				}
+			} else {
+				resource = ipa3_get_rm_resource_from_ep(i);
+				res =
+				ipa_rm_request_resource_with_timer(resource);
+				if (res == -EPERM &&
+					IPA_CLIENT_IS_CONS(
+					   ipa3_ctx->ep[i].client)) {
+					holb_cfg.en = 1;
+					res = ipa3_cfg_ep_holb_by_client(
+					   ipa3_ctx->ep[i].client, &holb_cfg);
+					if (res) {
+						IPAERR("holb en fail, stall\n");
+						BUG();
+					}
+				}
+			}
+		}
+		bmsk = bmsk << 1;
+	}
+}
+
+/**
+* ipa3_restore_suspend_handler() - restores the original suspend IRQ handler
+* as it was registered in the IPA init sequence.
+* Return codes:
+* 0: success
+* -EPERM: failed to remove current handler or failed to add original handler
+*/
+int ipa3_restore_suspend_handler(void)
+{
+	int result = 0;
+
+	result  = ipa3_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ);
+	if (result) {
+		IPAERR("remove handler for suspend interrupt failed\n");
+		return -EPERM;
+	}
+
+	result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
+			ipa3_suspend_handler, false, NULL);
+	if (result) {
+		IPAERR("register handler for suspend interrupt failed\n");
+		result = -EPERM;
+	}
+
+	IPADBG("suspend handler successfully restored\n");
+
+	return result;
+}
+
+static int ipa3_apps_cons_release_resource(void)
+{
+	return 0;
+}
+
+static int ipa3_apps_cons_request_resource(void)
+{
+	return 0;
+}
+
+static void ipa3_sps_release_resource(struct work_struct *work)
+{
+	/* check whether still need to decrease client usage */
+	if (atomic_read(&ipa3_ctx->transport_pm.dec_clients)) {
+		if (atomic_read(&ipa3_ctx->transport_pm.eot_activity)) {
+			IPADBG("EOT pending Re-scheduling\n");
+			ipa3_sps_process_irq_schedule_rel();
+		} else {
+			atomic_set(&ipa3_ctx->transport_pm.dec_clients, 0);
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("SPS_RESOURCE");
+		}
+	}
+	atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
+}
+
+int ipa3_create_apps_resource(void)
+{
+	struct ipa_rm_create_params apps_cons_create_params;
+	struct ipa_rm_perf_profile profile;
+	int result = 0;
+
+	memset(&apps_cons_create_params, 0,
+				sizeof(apps_cons_create_params));
+	apps_cons_create_params.name = IPA_RM_RESOURCE_APPS_CONS;
+	apps_cons_create_params.request_resource =
+		ipa3_apps_cons_request_resource;
+	apps_cons_create_params.release_resource =
+		ipa3_apps_cons_release_resource;
+	result = ipa_rm_create_resource(&apps_cons_create_params);
+	if (result) {
+		IPAERR("ipa_rm_create_resource failed\n");
+		return result;
+	}
+
+	profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
+	ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile);
+
+	return result;
+}
+
+/**
+ * ipa3_init_interrupts() - Register to IPA IRQs
+ *
+ * Return codes: 0 in success, negative in failure
+ *
+ */
+int ipa3_init_interrupts(void)
+{
+	int result;
+
+	/*register IPA IRQ handler*/
+	result = ipa3_interrupts_init(ipa3_res.ipa_irq, 0,
+			master_dev);
+	if (result) {
+		IPAERR("ipa interrupts initialization failed\n");
+		return -ENODEV;
+	}
+
+	/*add handler for suspend interrupt*/
+	result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
+			ipa3_suspend_handler, false, NULL);
+	if (result) {
+		IPAERR("register handler for suspend interrupt failed\n");
+		result = -ENODEV;
+		goto fail_add_interrupt_handler;
+	}
+
+	return 0;
+
+fail_add_interrupt_handler:
+	free_irq(ipa3_res.ipa_irq, master_dev);
+	return result;
+}
+
+/**
+ * ipa3_destroy_flt_tbl_idrs() - destroy the idr structure for flt tables
+ *  The idr strcuture per filtering table is intended for rule id generation
+ *  per filtering rule.
+ */
+static void ipa3_destroy_flt_tbl_idrs(void)
+{
+	int i;
+	struct ipa3_flt_tbl *flt_tbl;
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i))
+			continue;
+
+		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
+		idr_destroy(&flt_tbl->rule_ids);
+		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
+		idr_destroy(&flt_tbl->rule_ids);
+	}
+}
+
+static void ipa3_freeze_clock_vote_and_notify_modem(void)
+{
+	int res;
+	u32 ipa_clk_state;
+	struct ipa_active_client_logging_info log_info;
+
+	if (ipa3_ctx->smp2p_info.res_sent)
+		return;
+
+	IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "FREEZE_VOTE");
+	res = ipa3_inc_client_enable_clks_no_block(&log_info);
+	if (res)
+		ipa_clk_state = 0;
+	else
+		ipa_clk_state = 1;
+
+	if (ipa3_ctx->smp2p_info.out_base_id) {
+		gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
+			IPA_GPIO_OUT_CLK_VOTE_IDX, ipa_clk_state);
+		gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
+			IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 1);
+		ipa3_ctx->smp2p_info.res_sent = true;
+	} else {
+		IPAERR("smp2p out gpio not assigned\n");
+	}
+
+	IPADBG("IPA clocks are %s\n", ipa_clk_state ? "ON" : "OFF");
+}
+
+static int ipa3_panic_notifier(struct notifier_block *this,
+	unsigned long event, void *ptr)
+{
+	int res;
+
+	ipa3_freeze_clock_vote_and_notify_modem();
+
+	IPADBG("Calling uC panic handler\n");
+	res = ipa3_uc_panic_notifier(this, event, ptr);
+	if (res)
+		IPAERR("uC panic handler failed %d\n", res);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block ipa3_panic_blk = {
+	.notifier_call = ipa3_panic_notifier,
+	/* IPA panic handler needs to run before modem shuts down */
+	.priority = INT_MAX,
+};
+
+static void ipa3_register_panic_hdlr(void)
+{
+	atomic_notifier_chain_register(&panic_notifier_list,
+		&ipa3_panic_blk);
+}
+
+static void ipa3_trigger_ipa_ready_cbs(void)
+{
+	struct ipa3_ready_cb_info *info;
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	/* Call all the CBs */
+	list_for_each_entry(info, &ipa3_ctx->ipa_ready_cb_list, link)
+		if (info->ready_cb)
+			info->ready_cb(info->user_data);
+
+	mutex_unlock(&ipa3_ctx->lock);
+}
+
+static int ipa3_gsi_pre_fw_load_init(void)
+{
+	int result;
+
+	result = gsi_configure_regs(ipa3_res.transport_mem_base,
+		ipa3_res.transport_mem_size,
+		ipa3_res.ipa_mem_base);
+	if (result) {
+		IPAERR("Failed to configure GSI registers\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_post_init() - Initialize the IPA Driver (Part II).
+ * This part contains all initialization which requires interaction with
+ * IPA HW (via SPS BAM or GSI).
+ *
+ * @resource_p:	contain platform specific values from DST file
+ * @pdev:	The platform device structure representing the IPA driver
+ *
+ * Function initialization process:
+ * - Register BAM/SPS or GSI
+ * - Setup APPS pipes
+ * - Initialize tethering bridge
+ * - Initialize IPA debugfs
+ * - Initialize IPA uC interface
+ * - Initialize WDI interface
+ * - Initialize USB interface
+ * - Register for panic handler
+ * - Trigger IPA ready callbacks (to all subscribers)
+ * - Trigger IPA completion object (to all who wait on it)
+ */
+static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
+			  struct device *ipa_dev)
+{
+	int result;
+	struct sps_bam_props bam_props = { 0 };
+	struct gsi_per_props gsi_props;
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		memset(&gsi_props, 0, sizeof(gsi_props));
+		gsi_props.ee = resource_p->ee;
+		gsi_props.intr = GSI_INTR_IRQ;
+		gsi_props.irq = resource_p->transport_irq;
+		gsi_props.phys_addr = resource_p->transport_mem_base;
+		gsi_props.size = resource_p->transport_mem_size;
+		gsi_props.notify_cb = ipa_gsi_notify_cb;
+		gsi_props.req_clk_cb = NULL;
+		gsi_props.rel_clk_cb = NULL;
+
+		result = gsi_register_device(&gsi_props,
+			&ipa3_ctx->gsi_dev_hdl);
+		if (result != GSI_STATUS_SUCCESS) {
+			IPAERR(":gsi register error - %d\n", result);
+			result = -ENODEV;
+			goto fail_register_device;
+		}
+		IPADBG("IPA gsi is registered\n");
+	} else {
+		/* register IPA with SPS driver */
+		bam_props.phys_addr = resource_p->transport_mem_base;
+		bam_props.virt_size = resource_p->transport_mem_size;
+		bam_props.irq = resource_p->transport_irq;
+		bam_props.num_pipes = ipa3_ctx->ipa_num_pipes;
+		bam_props.summing_threshold = IPA_SUMMING_THRESHOLD;
+		bam_props.event_threshold = IPA_EVENT_THRESHOLD;
+		bam_props.options |= SPS_BAM_NO_LOCAL_CLK_GATING;
+		if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL)
+			bam_props.options |= SPS_BAM_OPT_IRQ_WAKEUP;
+		if (ipa3_ctx->ipa_bam_remote_mode == true)
+			bam_props.manage |= SPS_BAM_MGR_DEVICE_REMOTE;
+		if (!ipa3_ctx->smmu_s1_bypass)
+			bam_props.options |= SPS_BAM_SMMU_EN;
+		bam_props.ee = resource_p->ee;
+		bam_props.ipc_loglevel = 3;
+
+		result = sps_register_bam_device(&bam_props,
+			&ipa3_ctx->bam_handle);
+		if (result) {
+			IPAERR(":bam register error - %d\n", result);
+			result = -EPROBE_DEFER;
+			goto fail_register_device;
+		}
+		IPADBG("IPA BAM is registered\n");
+	}
+
+	/* setup the AP-IPA pipes */
+	if (ipa3_setup_apps_pipes()) {
+		IPAERR(":failed to setup IPA-Apps pipes\n");
+		result = -ENODEV;
+		goto fail_setup_apps_pipes;
+	}
+	IPADBG("IPA System2Bam pipes were connected\n");
+
+	if (ipa3_ctx->use_ipa_teth_bridge) {
+		/* Initialize the tethering bridge driver */
+		result = ipa3_teth_bridge_driver_init();
+		if (result) {
+			IPAERR(":teth_bridge init failed (%d)\n", -result);
+			result = -ENODEV;
+			goto fail_teth_bridge_driver_init;
+		}
+		IPADBG("teth_bridge initialized");
+	}
+
+	ipa3_debugfs_init();
+
+	result = ipa3_uc_interface_init();
+	if (result)
+		IPAERR(":ipa Uc interface init failed (%d)\n", -result);
+	else
+		IPADBG(":ipa Uc interface init ok\n");
+
+	result = ipa3_wdi_init();
+	if (result)
+		IPAERR(":wdi init failed (%d)\n", -result);
+	else
+		IPADBG(":wdi init ok\n");
+
+	result = ipa3_ntn_init();
+	if (result)
+		IPAERR(":ntn init failed (%d)\n", -result);
+	else
+		IPADBG(":ntn init ok\n");
+
+	ipa3_register_panic_hdlr();
+
+	ipa3_ctx->q6_proxy_clk_vote_valid = true;
+
+	mutex_lock(&ipa3_ctx->lock);
+	ipa3_ctx->ipa_initialization_complete = true;
+	mutex_unlock(&ipa3_ctx->lock);
+
+	ipa3_trigger_ipa_ready_cbs();
+	complete_all(&ipa3_ctx->init_completion_obj);
+	pr_info("IPA driver initialization was successful.\n");
+
+	return 0;
+
+fail_teth_bridge_driver_init:
+	ipa3_teardown_apps_pipes();
+fail_setup_apps_pipes:
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
+		gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
+	else
+		sps_deregister_bam_device(ipa3_ctx->bam_handle);
+fail_register_device:
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
+	ipa_rm_exit();
+	cdev_del(&ipa3_ctx->cdev);
+	device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
+	unregister_chrdev_region(ipa3_ctx->dev_num, 1);
+	if (ipa3_ctx->pipe_mem_pool)
+		gen_pool_destroy(ipa3_ctx->pipe_mem_pool);
+	ipa3_destroy_flt_tbl_idrs();
+	idr_destroy(&ipa3_ctx->ipa_idr);
+	kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
+	kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
+	kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
+	kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
+	kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
+	kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
+	kmem_cache_destroy(ipa3_ctx->hdr_cache);
+	kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
+	kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
+	destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
+	destroy_workqueue(ipa3_ctx->power_mgmt_wq);
+	iounmap(ipa3_ctx->mmio);
+	ipa3_disable_clks();
+	msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
+	if (ipa3_bus_scale_table) {
+		msm_bus_cl_clear_pdata(ipa3_bus_scale_table);
+		ipa3_bus_scale_table = NULL;
+	}
+	kfree(ipa3_ctx->ctrl);
+	kfree(ipa3_ctx);
+	ipa3_ctx = NULL;
+	return result;
+}
+
+static int ipa3_trigger_fw_loading_mdms(void)
+{
+	int result;
+	const struct firmware *fw;
+
+	IPADBG("FW loading process initiated\n");
+
+	result = request_firmware(&fw, IPA_FWS_PATH, ipa3_ctx->dev);
+	if (result < 0) {
+		IPAERR("request_firmware failed, error %d\n", result);
+		return result;
+	}
+	if (fw == NULL) {
+		IPAERR("Firmware is NULL!\n");
+		return -EINVAL;
+	}
+
+	IPADBG("FWs are available for loading\n");
+
+	result = ipa3_load_fws(fw);
+	if (result) {
+		IPAERR("IPA FWs loading has failed\n");
+		release_firmware(fw);
+		return result;
+	}
+
+	result = gsi_enable_fw(ipa3_res.transport_mem_base,
+				ipa3_res.transport_mem_size);
+	if (result) {
+		IPAERR("Failed to enable GSI FW\n");
+		release_firmware(fw);
+		return result;
+	}
+
+	release_firmware(fw);
+
+	IPADBG("FW loading process is complete\n");
+	return 0;
+}
+
+static int ipa3_trigger_fw_loading_msms(void)
+{
+	void *subsystem_get_retval = NULL;
+
+	IPADBG("FW loading process initiated\n");
+
+	subsystem_get_retval = subsystem_get(IPA_SUBSYSTEM_NAME);
+	if (IS_ERR_OR_NULL(subsystem_get_retval)) {
+		IPAERR("Unable to trigger PIL process for FW loading\n");
+		return -EINVAL;
+	}
+
+	IPADBG("FW loading process is complete\n");
+	return 0;
+}
+
+static ssize_t ipa3_write(struct file *file, const char __user *buf,
+			  size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	int result = -EINVAL;
+
+	char dbg_buff[16] = { 0 };
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+
+	if (missing) {
+		IPAERR("Unable to copy data from user\n");
+		return -EFAULT;
+	}
+
+	/* Prevent consequent calls from trying to load the FW again. */
+	if (ipa3_is_ready())
+		return count;
+
+	/*
+	 * We will trigger the process only if we're in GSI mode, otherwise,
+	 * we just ignore the write.
+	 */
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+		if (ipa3_is_msm_device())
+			result = ipa3_trigger_fw_loading_msms();
+		else
+			result = ipa3_trigger_fw_loading_mdms();
+		/* No IPAv3.x chipsets that don't support FW loading */
+
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+		if (result) {
+			IPAERR("FW loading process has failed\n");
+			BUG();
+		} else
+			ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
+	}
+	return count;
+}
+
+/**
+* ipa3_pre_init() - Initialize the IPA Driver.
+* This part contains all initialization which doesn't require IPA HW, such
+* as structure allocations and initializations, register writes, etc.
+*
+* @resource_p:	contain platform specific values from DST file
+* @pdev:	The platform device structure representing the IPA driver
+*
+* Function initialization process:
+* - Allocate memory for the driver context data struct
+* - Initializing the ipa3_ctx with:
+*    1)parsed values from the dts file
+*    2)parameters passed to the module initialization
+*    3)read HW values(such as core memory size)
+* - Map IPA core registers to CPU memory
+* - Restart IPA core(HW reset)
+* - Set configuration for IPA BAM via BAM_CNFG_BITS
+* - Initialize the look-aside caches(kmem_cache/slab) for filter,
+*   routing and IPA-tree
+* - Create memory pool with 4 objects for DMA operations(each object
+*   is 512Bytes long), this object will be use for tx(A5->IPA)
+* - Initialize lists head(routing,filter,hdr,system pipes)
+* - Initialize mutexes (for ipa_ctx and NAT memory mutexes)
+* - Initialize spinlocks (for list related to A5<->IPA pipes)
+* - Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
+* - Initialize Red-Black-Tree(s) for handles of header,routing rule,
+*   routing table ,filtering rule
+* - Initialize the filter block by committing IPV4 and IPV6 default rules
+* - Create empty routing table in system memory(no committing)
+* - Initialize pipes memory pool with ipa3_pipe_mem_init for supported platforms
+* - Create a char-device for IPA
+* - Initialize IPA RM (resource manager)
+* - Configure GSI registers (in GSI case)
+*/
+static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
+		struct device *ipa_dev)
+{
+	int result = 0;
+	int i;
+	struct ipa3_flt_tbl *flt_tbl;
+	struct ipa3_rt_tbl_set *rset;
+	struct ipa_active_client_logging_info log_info;
+
+	IPADBG("IPA Driver initialization started\n");
+
+	ipa3_ctx = kzalloc(sizeof(*ipa3_ctx), GFP_KERNEL);
+	if (!ipa3_ctx) {
+		IPAERR(":kzalloc err.\n");
+		result = -ENOMEM;
+		goto fail_mem_ctx;
+	}
+
+	ipa3_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0);
+	if (ipa3_ctx->logbuf == NULL) {
+		IPAERR("failed to get logbuf\n");
+		result = -ENOMEM;
+		goto fail_logbuf;
+	}
+
+	ipa3_ctx->pdev = ipa_dev;
+	ipa3_ctx->uc_pdev = ipa_dev;
+	ipa3_ctx->smmu_present = smmu_info.present;
+	if (!ipa3_ctx->smmu_present)
+		ipa3_ctx->smmu_s1_bypass = true;
+	else
+		ipa3_ctx->smmu_s1_bypass = smmu_info.s1_bypass;
+	ipa3_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
+	ipa3_ctx->ipa_wrapper_size = resource_p->ipa_mem_size;
+	ipa3_ctx->ipa_hw_type = resource_p->ipa_hw_type;
+	ipa3_ctx->ipa3_hw_mode = resource_p->ipa3_hw_mode;
+	ipa3_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
+	ipa3_ctx->ipa_bam_remote_mode = resource_p->ipa_bam_remote_mode;
+	ipa3_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt;
+	ipa3_ctx->ipa_wdi2 = resource_p->ipa_wdi2;
+	ipa3_ctx->use_64_bit_dma_mask = resource_p->use_64_bit_dma_mask;
+	ipa3_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size;
+	ipa3_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size;
+	ipa3_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset;
+	ipa3_ctx->tethered_flow_control = resource_p->tethered_flow_control;
+	ipa3_ctx->transport_prototype = resource_p->transport_prototype;
+	ipa3_ctx->ee = resource_p->ee;
+	ipa3_ctx->apply_rg10_wa = resource_p->apply_rg10_wa;
+	ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
+	ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
+
+	/* default aggregation parameters */
+	ipa3_ctx->aggregation_type = IPA_MBIM_16;
+	ipa3_ctx->aggregation_byte_limit = 1;
+	ipa3_ctx->aggregation_time_limit = 0;
+
+	ipa3_ctx->ctrl = kzalloc(sizeof(*ipa3_ctx->ctrl), GFP_KERNEL);
+	if (!ipa3_ctx->ctrl) {
+		IPAERR("memory allocation error for ctrl\n");
+		result = -ENOMEM;
+		goto fail_mem_ctrl;
+	}
+	result = ipa3_controller_static_bind(ipa3_ctx->ctrl,
+			ipa3_ctx->ipa_hw_type);
+	if (result) {
+		IPAERR("fail to static bind IPA ctrl.\n");
+		result = -EFAULT;
+		goto fail_bind;
+	}
+
+	result = ipa3_init_mem_partition(master_dev->of_node);
+	if (result) {
+		IPAERR(":ipa3_init_mem_partition failed!\n");
+		result = -ENODEV;
+		goto fail_init_mem_partition;
+	}
+
+	if (ipa3_bus_scale_table) {
+		IPADBG("Use bus scaling info from device tree\n");
+		ipa3_ctx->ctrl->msm_bus_data_ptr = ipa3_bus_scale_table;
+	}
+
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL) {
+		/* get BUS handle */
+		ipa3_ctx->ipa_bus_hdl =
+			msm_bus_scale_register_client(
+				ipa3_ctx->ctrl->msm_bus_data_ptr);
+		if (!ipa3_ctx->ipa_bus_hdl) {
+			IPAERR("fail to register with bus mgr!\n");
+			result = -ENODEV;
+			goto fail_bus_reg;
+		}
+	} else {
+		IPADBG("Skipping bus scaling registration on Virtual plat\n");
+	}
+
+	/* get IPA clocks */
+	result = ipa3_get_clks(master_dev);
+	if (result)
+		goto fail_clk;
+
+	/* init active_clients_log after getting ipa-clk */
+	if (ipa3_active_clients_log_init())
+		goto fail_init_active_client;
+
+	/* Enable ipa3_ctx->enable_clock_scaling */
+	ipa3_ctx->enable_clock_scaling = 1;
+	ipa3_ctx->curr_ipa_clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
+
+	/* enable IPA clocks explicitly to allow the initialization */
+	ipa3_enable_clks();
+
+	/* setup IPA register access */
+	IPADBG("Mapping 0x%x\n", resource_p->ipa_mem_base +
+		ipa3_ctx->ctrl->ipa_reg_base_ofst);
+	ipa3_ctx->mmio = ioremap(resource_p->ipa_mem_base +
+			ipa3_ctx->ctrl->ipa_reg_base_ofst,
+			resource_p->ipa_mem_size);
+	if (!ipa3_ctx->mmio) {
+		IPAERR(":ipa-base ioremap err.\n");
+		result = -EFAULT;
+		goto fail_remap;
+	}
+
+	if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
+		ipa3_ctx->pdev)) {
+		IPAERR("fail to init ipahal\n");
+		result = -EFAULT;
+		goto fail_ipahal;
+	}
+
+	result = ipa3_init_hw();
+	if (result) {
+		IPAERR(":error initializing HW.\n");
+		result = -ENODEV;
+		goto fail_init_hw;
+	}
+	IPADBG("IPA HW initialization sequence completed");
+
+	ipa3_ctx->ipa_num_pipes = ipa3_get_num_pipes();
+	if (ipa3_ctx->ipa_num_pipes > IPA3_MAX_NUM_PIPES) {
+		IPAERR("IPA has more pipes then supported! has %d, max %d\n",
+			ipa3_ctx->ipa_num_pipes, IPA3_MAX_NUM_PIPES);
+		result = -ENODEV;
+		goto fail_init_hw;
+	}
+
+	ipa_init_ep_flt_bitmap();
+	IPADBG("EP with flt support bitmap 0x%x (%u pipes)\n",
+		ipa3_ctx->ep_flt_bitmap, ipa3_ctx->ep_flt_num);
+
+	ipa3_ctx->ctrl->ipa_sram_read_settings();
+	IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n",
+		ipa3_ctx->smem_sz, ipa3_ctx->smem_restricted_bytes);
+
+	IPADBG("hdr_lcl=%u ip4_rt_hash=%u ip4_rt_nonhash=%u\n",
+		ipa3_ctx->hdr_tbl_lcl, ipa3_ctx->ip4_rt_tbl_hash_lcl,
+		ipa3_ctx->ip4_rt_tbl_nhash_lcl);
+
+	IPADBG("ip6_rt_hash=%u ip6_rt_nonhash=%u\n",
+		ipa3_ctx->ip6_rt_tbl_hash_lcl, ipa3_ctx->ip6_rt_tbl_nhash_lcl);
+
+	IPADBG("ip4_flt_hash=%u ip4_flt_nonhash=%u\n",
+		ipa3_ctx->ip4_flt_tbl_hash_lcl,
+		ipa3_ctx->ip4_flt_tbl_nhash_lcl);
+
+	IPADBG("ip6_flt_hash=%u ip6_flt_nonhash=%u\n",
+		ipa3_ctx->ip6_flt_tbl_hash_lcl,
+		ipa3_ctx->ip6_flt_tbl_nhash_lcl);
+
+	if (ipa3_ctx->smem_reqd_sz > ipa3_ctx->smem_sz) {
+		IPAERR("SW expect more core memory, needed %d, avail %d\n",
+			ipa3_ctx->smem_reqd_sz, ipa3_ctx->smem_sz);
+		result = -ENOMEM;
+		goto fail_init_hw;
+	}
+
+	mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
+	spin_lock_init(&ipa3_ctx->ipa3_active_clients.spinlock);
+	IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
+	ipa3_active_clients_log_inc(&log_info, false);
+	ipa3_ctx->ipa3_active_clients.cnt = 1;
+
+	/* Assign resource limitation to each group */
+	ipa3_set_resorce_groups_min_max_limits();
+
+	/* Create workqueues for power management */
+	ipa3_ctx->power_mgmt_wq =
+		create_singlethread_workqueue("ipa_power_mgmt");
+	if (!ipa3_ctx->power_mgmt_wq) {
+		IPAERR("failed to create power mgmt wq\n");
+		result = -ENOMEM;
+		goto fail_init_hw;
+	}
+
+	ipa3_ctx->transport_power_mgmt_wq =
+		create_singlethread_workqueue("transport_power_mgmt");
+	if (!ipa3_ctx->transport_power_mgmt_wq) {
+		IPAERR("failed to create transport power mgmt wq\n");
+		result = -ENOMEM;
+		goto fail_create_transport_wq;
+	}
+
+	spin_lock_init(&ipa3_ctx->transport_pm.lock);
+	ipa3_ctx->transport_pm.res_granted = false;
+	ipa3_ctx->transport_pm.res_rel_in_prog = false;
+
+	/* init the lookaside cache */
+	ipa3_ctx->flt_rule_cache = kmem_cache_create("IPA_FLT",
+			sizeof(struct ipa3_flt_entry), 0, 0, NULL);
+	if (!ipa3_ctx->flt_rule_cache) {
+		IPAERR(":ipa flt cache create failed\n");
+		result = -ENOMEM;
+		goto fail_flt_rule_cache;
+	}
+	ipa3_ctx->rt_rule_cache = kmem_cache_create("IPA_RT",
+			sizeof(struct ipa3_rt_entry), 0, 0, NULL);
+	if (!ipa3_ctx->rt_rule_cache) {
+		IPAERR(":ipa rt cache create failed\n");
+		result = -ENOMEM;
+		goto fail_rt_rule_cache;
+	}
+	ipa3_ctx->hdr_cache = kmem_cache_create("IPA_HDR",
+			sizeof(struct ipa3_hdr_entry), 0, 0, NULL);
+	if (!ipa3_ctx->hdr_cache) {
+		IPAERR(":ipa hdr cache create failed\n");
+		result = -ENOMEM;
+		goto fail_hdr_cache;
+	}
+	ipa3_ctx->hdr_offset_cache =
+	   kmem_cache_create("IPA_HDR_OFFSET",
+			   sizeof(struct ipa_hdr_offset_entry), 0, 0, NULL);
+	if (!ipa3_ctx->hdr_offset_cache) {
+		IPAERR(":ipa hdr off cache create failed\n");
+		result = -ENOMEM;
+		goto fail_hdr_offset_cache;
+	}
+	ipa3_ctx->hdr_proc_ctx_cache = kmem_cache_create("IPA_HDR_PROC_CTX",
+		sizeof(struct ipa3_hdr_proc_ctx_entry), 0, 0, NULL);
+	if (!ipa3_ctx->hdr_proc_ctx_cache) {
+		IPAERR(":ipa hdr proc ctx cache create failed\n");
+		result = -ENOMEM;
+		goto fail_hdr_proc_ctx_cache;
+	}
+	ipa3_ctx->hdr_proc_ctx_offset_cache =
+		kmem_cache_create("IPA_HDR_PROC_CTX_OFFSET",
+		sizeof(struct ipa3_hdr_proc_ctx_offset_entry), 0, 0, NULL);
+	if (!ipa3_ctx->hdr_proc_ctx_offset_cache) {
+		IPAERR(":ipa hdr proc ctx off cache create failed\n");
+		result = -ENOMEM;
+		goto fail_hdr_proc_ctx_offset_cache;
+	}
+	ipa3_ctx->rt_tbl_cache = kmem_cache_create("IPA_RT_TBL",
+			sizeof(struct ipa3_rt_tbl), 0, 0, NULL);
+	if (!ipa3_ctx->rt_tbl_cache) {
+		IPAERR(":ipa rt tbl cache create failed\n");
+		result = -ENOMEM;
+		goto fail_rt_tbl_cache;
+	}
+	ipa3_ctx->tx_pkt_wrapper_cache =
+	   kmem_cache_create("IPA_TX_PKT_WRAPPER",
+			   sizeof(struct ipa3_tx_pkt_wrapper), 0, 0, NULL);
+	if (!ipa3_ctx->tx_pkt_wrapper_cache) {
+		IPAERR(":ipa tx pkt wrapper cache create failed\n");
+		result = -ENOMEM;
+		goto fail_tx_pkt_wrapper_cache;
+	}
+	ipa3_ctx->rx_pkt_wrapper_cache =
+	   kmem_cache_create("IPA_RX_PKT_WRAPPER",
+			   sizeof(struct ipa3_rx_pkt_wrapper), 0, 0, NULL);
+	if (!ipa3_ctx->rx_pkt_wrapper_cache) {
+		IPAERR(":ipa rx pkt wrapper cache create failed\n");
+		result = -ENOMEM;
+		goto fail_rx_pkt_wrapper_cache;
+	}
+
+	/* Setup DMA pool */
+	ipa3_ctx->dma_pool = dma_pool_create("ipa_tx", ipa3_ctx->pdev,
+		IPA_NUM_DESC_PER_SW_TX * sizeof(struct sps_iovec),
+		0, 0);
+	if (!ipa3_ctx->dma_pool) {
+		IPAERR("cannot alloc DMA pool.\n");
+		result = -ENOMEM;
+		goto fail_dma_pool;
+	}
+
+	/* init the various list heads */
+	INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_hdr_entry_list);
+	for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+		INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_offset_list[i]);
+		INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_free_offset_list[i]);
+	}
+	INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list);
+	for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
+		INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i]);
+		INIT_LIST_HEAD(&ipa3_ctx->
+				hdr_proc_ctx_tbl.head_free_offset_list[i]);
+	}
+	INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
+	INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i))
+			continue;
+
+		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
+		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+		flt_tbl->in_sys[IPA_RULE_HASHABLE] =
+			!ipa3_ctx->ip4_flt_tbl_hash_lcl;
+		flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
+			!ipa3_ctx->ip4_flt_tbl_nhash_lcl;
+		idr_init(&flt_tbl->rule_ids);
+
+		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
+		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+		flt_tbl->in_sys[IPA_RULE_HASHABLE] =
+			!ipa3_ctx->ip6_flt_tbl_hash_lcl;
+		flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
+			!ipa3_ctx->ip6_flt_tbl_nhash_lcl;
+		idr_init(&flt_tbl->rule_ids);
+	}
+
+	rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
+	INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+	rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
+	INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+
+	INIT_LIST_HEAD(&ipa3_ctx->intf_list);
+	INIT_LIST_HEAD(&ipa3_ctx->msg_list);
+	INIT_LIST_HEAD(&ipa3_ctx->pull_msg_list);
+	init_waitqueue_head(&ipa3_ctx->msg_waitq);
+	mutex_init(&ipa3_ctx->msg_lock);
+
+	mutex_init(&ipa3_ctx->lock);
+	mutex_init(&ipa3_ctx->nat_mem.lock);
+
+	idr_init(&ipa3_ctx->ipa_idr);
+	spin_lock_init(&ipa3_ctx->idr_lock);
+
+	/* wlan related member */
+	memset(&ipa3_ctx->wc_memb, 0, sizeof(ipa3_ctx->wc_memb));
+	spin_lock_init(&ipa3_ctx->wc_memb.wlan_spinlock);
+	spin_lock_init(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
+	INIT_LIST_HEAD(&ipa3_ctx->wc_memb.wlan_comm_desc_list);
+
+	/* setup the IPA pipe mem pool */
+	if (resource_p->ipa_pipe_mem_size)
+		ipa3_pipe_mem_init(resource_p->ipa_pipe_mem_start_ofst,
+				resource_p->ipa_pipe_mem_size);
+
+	ipa3_ctx->class = class_create(THIS_MODULE, DRV_NAME);
+
+	result = alloc_chrdev_region(&ipa3_ctx->dev_num, 0, 1, DRV_NAME);
+	if (result) {
+		IPAERR("alloc_chrdev_region err.\n");
+		result = -ENODEV;
+		goto fail_alloc_chrdev_region;
+	}
+
+	ipa3_ctx->dev = device_create(ipa3_ctx->class, NULL, ipa3_ctx->dev_num,
+			ipa3_ctx, DRV_NAME);
+	if (IS_ERR(ipa3_ctx->dev)) {
+		IPAERR(":device_create err.\n");
+		result = -ENODEV;
+		goto fail_device_create;
+	}
+
+	cdev_init(&ipa3_ctx->cdev, &ipa3_drv_fops);
+	ipa3_ctx->cdev.owner = THIS_MODULE;
+	ipa3_ctx->cdev.ops = &ipa3_drv_fops;  /* from LDD3 */
+
+	result = cdev_add(&ipa3_ctx->cdev, ipa3_ctx->dev_num, 1);
+	if (result) {
+		IPAERR(":cdev_add err=%d\n", -result);
+		result = -ENODEV;
+		goto fail_cdev_add;
+	}
+	IPADBG("ipa cdev added successful. major:%d minor:%d\n",
+			MAJOR(ipa3_ctx->dev_num),
+			MINOR(ipa3_ctx->dev_num));
+
+	if (ipa3_create_nat_device()) {
+		IPAERR("unable to create nat device\n");
+		result = -ENODEV;
+		goto fail_nat_dev_add;
+	}
+
+	/* Create a wakeup source. */
+	wakeup_source_init(&ipa3_ctx->w_lock, "IPA_WS");
+	spin_lock_init(&ipa3_ctx->wakelock_ref_cnt.spinlock);
+
+	/* Initialize IPA RM (resource manager) */
+	result = ipa_rm_initialize();
+	if (result) {
+		IPAERR("RM initialization failed (%d)\n", -result);
+		result = -ENODEV;
+		goto fail_ipa_rm_init;
+	}
+	IPADBG("IPA resource manager initialized");
+
+	result = ipa3_create_apps_resource();
+	if (result) {
+		IPAERR("Failed to create APPS_CONS resource\n");
+		result = -ENODEV;
+		goto fail_create_apps_resource;
+	}
+
+	if (!ipa3_ctx->apply_rg10_wa) {
+		result = ipa3_init_interrupts();
+		if (result) {
+			IPAERR("ipa initialization of interrupts failed\n");
+			result = -ENODEV;
+			goto fail_ipa_init_interrupts;
+		}
+	} else {
+		IPADBG("Initialization of ipa interrupts skipped\n");
+	}
+
+	INIT_LIST_HEAD(&ipa3_ctx->ipa_ready_cb_list);
+
+	init_completion(&ipa3_ctx->init_completion_obj);
+
+	/*
+	 * For GSI, we can't register the GSI driver yet, as it expects
+	 * the GSI FW to be up and running before the registration.
+	 */
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		/*
+		 * For IPA3.0, the GSI configuration is done by the GSI driver.
+		 * For IPA3.1 (and on), the GSI configuration is done by TZ.
+		 */
+		if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0) {
+			result = ipa3_gsi_pre_fw_load_init();
+			if (result) {
+				IPAERR("gsi pre FW loading config failed\n");
+				result = -ENODEV;
+				goto fail_ipa_init_interrupts;
+			}
+		}
+	}
+	/* For BAM (No other mode), we can just carry on with initialization */
+	else
+		return ipa3_post_init(resource_p, ipa_dev);
+
+	return 0;
+
+fail_ipa_init_interrupts:
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
+fail_create_apps_resource:
+	ipa_rm_exit();
+fail_ipa_rm_init:
+fail_nat_dev_add:
+	cdev_del(&ipa3_ctx->cdev);
+fail_cdev_add:
+	device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
+fail_device_create:
+	unregister_chrdev_region(ipa3_ctx->dev_num, 1);
+fail_alloc_chrdev_region:
+	if (ipa3_ctx->pipe_mem_pool)
+		gen_pool_destroy(ipa3_ctx->pipe_mem_pool);
+	ipa3_destroy_flt_tbl_idrs();
+	idr_destroy(&ipa3_ctx->ipa_idr);
+fail_dma_pool:
+	kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
+fail_rx_pkt_wrapper_cache:
+	kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
+fail_tx_pkt_wrapper_cache:
+	kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
+fail_rt_tbl_cache:
+	kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
+fail_hdr_proc_ctx_offset_cache:
+	kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
+fail_hdr_proc_ctx_cache:
+	kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
+fail_hdr_offset_cache:
+	kmem_cache_destroy(ipa3_ctx->hdr_cache);
+fail_hdr_cache:
+	kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
+fail_rt_rule_cache:
+	kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
+fail_flt_rule_cache:
+	destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
+fail_create_transport_wq:
+	destroy_workqueue(ipa3_ctx->power_mgmt_wq);
+fail_init_hw:
+	ipahal_destroy();
+fail_ipahal:
+	iounmap(ipa3_ctx->mmio);
+fail_remap:
+	ipa3_disable_clks();
+	ipa3_active_clients_log_destroy();
+fail_init_active_client:
+fail_clk:
+	msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
+fail_bus_reg:
+fail_init_mem_partition:
+fail_bind:
+	kfree(ipa3_ctx->ctrl);
+fail_mem_ctrl:
+	ipc_log_context_destroy(ipa3_ctx->logbuf);
+fail_logbuf:
+	kfree(ipa3_ctx);
+	ipa3_ctx = NULL;
+fail_mem_ctx:
+	return result;
+}
+
+static int get_ipa_dts_configuration(struct platform_device *pdev,
+		struct ipa3_plat_drv_res *ipa_drv_res)
+{
+	int result;
+	struct resource *resource;
+
+	/* initialize ipa3_res */
+	ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
+	ipa_drv_res->ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
+	ipa_drv_res->ipa_hw_type = 0;
+	ipa_drv_res->ipa3_hw_mode = 0;
+	ipa_drv_res->ipa_bam_remote_mode = false;
+	ipa_drv_res->modem_cfg_emb_pipe_flt = false;
+	ipa_drv_res->ipa_wdi2 = false;
+	ipa_drv_res->use_64_bit_dma_mask = false;
+	ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
+	ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
+	ipa_drv_res->apply_rg10_wa = false;
+	ipa_drv_res->gsi_ch20_wa = false;
+
+	smmu_info.disable_htw = of_property_read_bool(pdev->dev.of_node,
+			"qcom,smmu-disable-htw");
+
+	/* Get IPA HW Version */
+	result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
+					&ipa_drv_res->ipa_hw_type);
+	if ((result) || (ipa_drv_res->ipa_hw_type == 0)) {
+		IPAERR(":get resource failed for ipa-hw-ver!\n");
+		return -ENODEV;
+	}
+	IPADBG(": ipa_hw_type = %d", ipa_drv_res->ipa_hw_type);
+
+	if (ipa_drv_res->ipa_hw_type < IPA_HW_v3_0) {
+		IPAERR(":IPA version below 3.0 not supported!\n");
+		return -ENODEV;
+	}
+
+	/* Get IPA HW mode */
+	result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-mode",
+			&ipa_drv_res->ipa3_hw_mode);
+	if (result)
+		IPADBG("using default (IPA_MODE_NORMAL) for ipa-hw-mode\n");
+	else
+		IPADBG(": found ipa_drv_res->ipa3_hw_mode = %d",
+				ipa_drv_res->ipa3_hw_mode);
+
+	/* Get IPA WAN / LAN RX pool size */
+	result = of_property_read_u32(pdev->dev.of_node,
+			"qcom,wan-rx-ring-size",
+			&ipa_drv_res->wan_rx_ring_size);
+	if (result)
+		IPADBG("using default for wan-rx-ring-size = %u\n",
+				ipa_drv_res->wan_rx_ring_size);
+	else
+		IPADBG(": found ipa_drv_res->wan-rx-ring-size = %u",
+				ipa_drv_res->wan_rx_ring_size);
+
+	result = of_property_read_u32(pdev->dev.of_node,
+			"qcom,lan-rx-ring-size",
+			&ipa_drv_res->lan_rx_ring_size);
+	if (result)
+		IPADBG("using default for lan-rx-ring-size = %u\n",
+			ipa_drv_res->lan_rx_ring_size);
+	else
+		IPADBG(": found ipa_drv_res->lan-rx-ring-size = %u",
+			ipa_drv_res->lan_rx_ring_size);
+
+	ipa_drv_res->use_ipa_teth_bridge =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,use-ipa-tethering-bridge");
+	IPADBG(": using TBDr = %s",
+		ipa_drv_res->use_ipa_teth_bridge
+		? "True" : "False");
+
+	ipa_drv_res->ipa_bam_remote_mode =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,ipa-bam-remote-mode");
+	IPADBG(": ipa bam remote mode = %s\n",
+			ipa_drv_res->ipa_bam_remote_mode
+			? "True" : "False");
+
+	ipa_drv_res->modem_cfg_emb_pipe_flt =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,modem-cfg-emb-pipe-flt");
+	IPADBG(": modem configure embedded pipe filtering = %s\n",
+			ipa_drv_res->modem_cfg_emb_pipe_flt
+			? "True" : "False");
+
+	ipa_drv_res->ipa_wdi2 =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,ipa-wdi2");
+	IPADBG(": WDI-2.0 = %s\n",
+			ipa_drv_res->ipa_wdi2
+			? "True" : "False");
+
+	ipa_drv_res->use_64_bit_dma_mask =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,use-64-bit-dma-mask");
+	IPADBG(": use_64_bit_dma_mask = %s\n",
+			ipa_drv_res->use_64_bit_dma_mask
+			? "True" : "False");
+
+	ipa_drv_res->skip_uc_pipe_reset =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,skip-uc-pipe-reset");
+	IPADBG(": skip uC pipe reset = %s\n",
+		ipa_drv_res->skip_uc_pipe_reset
+		? "True" : "False");
+
+	ipa_drv_res->tethered_flow_control =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,tethered-flow-control");
+	IPADBG(": Use apps based flow control = %s\n",
+		ipa_drv_res->tethered_flow_control
+		? "True" : "False");
+
+	if (of_property_read_bool(pdev->dev.of_node,
+		"qcom,use-gsi"))
+		ipa_drv_res->transport_prototype = IPA_TRANSPORT_TYPE_GSI;
+	else
+		ipa_drv_res->transport_prototype = IPA_TRANSPORT_TYPE_SPS;
+
+	IPADBG(": transport type = %s\n",
+		ipa_drv_res->transport_prototype == IPA_TRANSPORT_TYPE_SPS
+		? "SPS" : "GSI");
+
+	/* Get IPA wrapper address */
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"ipa-base");
+	if (!resource) {
+		IPAERR(":get resource failed for ipa-base!\n");
+		return -ENODEV;
+	}
+	ipa_drv_res->ipa_mem_base = resource->start;
+	ipa_drv_res->ipa_mem_size = resource_size(resource);
+	IPADBG(": ipa-base = 0x%x, size = 0x%x\n",
+			ipa_drv_res->ipa_mem_base,
+			ipa_drv_res->ipa_mem_size);
+
+	smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
+	smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
+
+	if (ipa_drv_res->transport_prototype == IPA_TRANSPORT_TYPE_SPS) {
+		/* Get IPA BAM address */
+		resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+				"bam-base");
+		if (!resource) {
+			IPAERR(":get resource failed for bam-base!\n");
+			return -ENODEV;
+		}
+		ipa_drv_res->transport_mem_base = resource->start;
+		ipa_drv_res->transport_mem_size = resource_size(resource);
+		IPADBG(": bam-base = 0x%x, size = 0x%x\n",
+				ipa_drv_res->transport_mem_base,
+				ipa_drv_res->transport_mem_size);
+
+		/* Get IPA BAM IRQ number */
+		resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+				"bam-irq");
+		if (!resource) {
+			IPAERR(":get resource failed for bam-irq!\n");
+			return -ENODEV;
+		}
+		ipa_drv_res->transport_irq = resource->start;
+		IPADBG(": bam-irq = %d\n", ipa_drv_res->transport_irq);
+	} else {
+		/* Get IPA GSI address */
+		resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+				"gsi-base");
+		if (!resource) {
+			IPAERR(":get resource failed for gsi-base!\n");
+			return -ENODEV;
+		}
+		ipa_drv_res->transport_mem_base = resource->start;
+		ipa_drv_res->transport_mem_size = resource_size(resource);
+		IPADBG(": gsi-base = 0x%x, size = 0x%x\n",
+				ipa_drv_res->transport_mem_base,
+				ipa_drv_res->transport_mem_size);
+
+		/* Get IPA GSI IRQ number */
+		resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+				"gsi-irq");
+		if (!resource) {
+			IPAERR(":get resource failed for gsi-irq!\n");
+			return -ENODEV;
+		}
+		ipa_drv_res->transport_irq = resource->start;
+		IPADBG(": gsi-irq = %d\n", ipa_drv_res->transport_irq);
+	}
+
+	/* Get IPA pipe mem start ofst */
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"ipa-pipe-mem");
+	if (!resource) {
+		IPADBG(":not using pipe memory - resource nonexisting\n");
+	} else {
+		ipa_drv_res->ipa_pipe_mem_start_ofst = resource->start;
+		ipa_drv_res->ipa_pipe_mem_size = resource_size(resource);
+		IPADBG(":using pipe memory - at 0x%x of size 0x%x\n",
+				ipa_drv_res->ipa_pipe_mem_start_ofst,
+				ipa_drv_res->ipa_pipe_mem_size);
+	}
+
+	/* Get IPA IRQ number */
+	resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+			"ipa-irq");
+	if (!resource) {
+		IPAERR(":get resource failed for ipa-irq!\n");
+		return -ENODEV;
+	}
+	ipa_drv_res->ipa_irq = resource->start;
+	IPADBG(":ipa-irq = %d\n", ipa_drv_res->ipa_irq);
+
+	result = of_property_read_u32(pdev->dev.of_node, "qcom,ee",
+			&ipa_drv_res->ee);
+	if (result)
+		ipa_drv_res->ee = 0;
+
+	ipa_drv_res->apply_rg10_wa =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,use-rg10-limitation-mitigation");
+	IPADBG(": Use Register Group 10 limitation mitigation = %s\n",
+		ipa_drv_res->apply_rg10_wa
+		? "True" : "False");
+
+	ipa_drv_res->gsi_ch20_wa =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,do-not-use-ch-gsi-20");
+	IPADBG(": GSI CH 20 WA is = %s\n",
+		ipa_drv_res->apply_rg10_wa
+		? "Needed" : "Not needed");
+
+	return 0;
+}
+
+static int ipa_smmu_wlan_cb_probe(struct device *dev)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx();
+	int disable_htw = 1;
+	int atomic_ctx = 1;
+	int fast = 1;
+	int bypass = 1;
+	int ret;
+	u32 add_map_size;
+	const u32 *add_map;
+	int i;
+
+	IPADBG("sub pdev=%p\n", dev);
+
+	cb->dev = dev;
+	cb->iommu = iommu_domain_alloc(msm_iommu_get_bus(dev));
+	if (!cb->iommu) {
+		IPAERR("could not alloc iommu domain\n");
+		/* assume this failure is because iommu driver is not ready */
+		return -EPROBE_DEFER;
+	}
+	cb->valid = true;
+
+	if (smmu_info.disable_htw) {
+		ret = iommu_domain_set_attr(cb->iommu,
+			DOMAIN_ATTR_COHERENT_HTW_DISABLE,
+			&disable_htw);
+		if (ret) {
+			IPAERR("couldn't disable coherent HTW\n");
+			cb->valid = false;
+			return -EIO;
+		}
+	}
+
+	if (smmu_info.s1_bypass) {
+		if (iommu_domain_set_attr(cb->iommu,
+					DOMAIN_ATTR_S1_BYPASS,
+					&bypass)) {
+			IPAERR("couldn't set bypass\n");
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU S1 BYPASS\n");
+	} else {
+		if (iommu_domain_set_attr(cb->iommu,
+					DOMAIN_ATTR_ATOMIC,
+					&atomic_ctx)) {
+			IPAERR("couldn't disable coherent HTW\n");
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU ATTR ATOMIC\n");
+
+		if (smmu_info.fast_map) {
+			if (iommu_domain_set_attr(cb->iommu,
+						DOMAIN_ATTR_FAST,
+						&fast)) {
+				IPAERR("couldn't set fast map\n");
+				cb->valid = false;
+				return -EIO;
+			}
+			IPADBG("SMMU fast map set\n");
+		}
+	}
+
+	ret = iommu_attach_device(cb->iommu, dev);
+	if (ret) {
+		IPAERR("could not attach device ret=%d\n", ret);
+		cb->valid = false;
+		return ret;
+	}
+	/* MAP ipa-uc ram */
+	add_map = of_get_property(dev->of_node,
+		"qcom,additional-mapping", &add_map_size);
+	if (add_map) {
+		/* mapping size is an array of 3-tuple of u32 */
+		if (add_map_size % (3 * sizeof(u32))) {
+			IPAERR("wrong additional mapping format\n");
+			cb->valid = false;
+			return -EFAULT;
+		}
+
+		/* iterate of each entry of the additional mapping array */
+		for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
+			u32 iova = be32_to_cpu(add_map[i]);
+			u32 pa = be32_to_cpu(add_map[i + 1]);
+			u32 size = be32_to_cpu(add_map[i + 2]);
+			unsigned long iova_p;
+			phys_addr_t pa_p;
+			u32 size_p;
+
+			IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
+				iova_p, pa_p, size_p);
+			IPADBG("mapping 0x%lx to 0x%pa size %d\n",
+				iova_p, &pa_p, size_p);
+			ipa3_iommu_map(cb->iommu,
+				iova_p, pa_p, size_p,
+				IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+		}
+	}
+	return 0;
+}
+
+static int ipa_smmu_uc_cb_probe(struct device *dev)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
+	int disable_htw = 1;
+	int atomic_ctx = 1;
+	int bypass = 1;
+	int fast = 1;
+	int ret;
+	u32 iova_ap_mapping[2];
+
+	IPADBG("UC CB PROBE sub pdev=%p\n", dev);
+
+	ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
+			iova_ap_mapping, 2);
+	if (ret) {
+		IPAERR("Fail to read UC start/size iova addresses\n");
+		return ret;
+	}
+	cb->va_start = iova_ap_mapping[0];
+	cb->va_size = iova_ap_mapping[1];
+	cb->va_end = cb->va_start + cb->va_size;
+	IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
+
+	if (smmu_info.use_64_bit_dma_mask) {
+		if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
+				dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
+			IPAERR("DMA set 64bit mask failed\n");
+			return -EOPNOTSUPP;
+		}
+	} else {
+		if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
+				dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
+			IPAERR("DMA set 32bit mask failed\n");
+			return -EOPNOTSUPP;
+		}
+	}
+	IPADBG("UC CB PROBE=%p create IOMMU mapping\n", dev);
+
+	cb->dev = dev;
+	cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
+			cb->va_start, cb->va_size);
+	if (IS_ERR_OR_NULL(cb->mapping)) {
+		IPADBG("Fail to create mapping\n");
+		/* assume this failure is because iommu driver is not ready */
+		return -EPROBE_DEFER;
+	}
+	IPADBG("SMMU mapping created\n");
+	cb->valid = true;
+
+	IPADBG("UC CB PROBE sub pdev=%p disable htw\n", dev);
+	if (smmu_info.disable_htw) {
+		if (iommu_domain_set_attr(cb->mapping->domain,
+				DOMAIN_ATTR_COHERENT_HTW_DISABLE,
+				 &disable_htw)) {
+			IPAERR("couldn't disable coherent HTW\n");
+			arm_iommu_release_mapping(cb->mapping);
+			cb->valid = false;
+			return -EIO;
+		}
+	}
+
+	IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
+	if (smmu_info.s1_bypass) {
+		if (iommu_domain_set_attr(cb->mapping->domain,
+				DOMAIN_ATTR_S1_BYPASS,
+				&bypass)) {
+			IPAERR("couldn't set bypass\n");
+			arm_iommu_release_mapping(cb->mapping);
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU S1 BYPASS\n");
+	} else {
+		if (iommu_domain_set_attr(cb->mapping->domain,
+				DOMAIN_ATTR_ATOMIC,
+				&atomic_ctx)) {
+			IPAERR("couldn't set domain as atomic\n");
+			arm_iommu_release_mapping(cb->mapping);
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU atomic set\n");
+
+		if (smmu_info.fast_map) {
+			if (iommu_domain_set_attr(cb->mapping->domain,
+					DOMAIN_ATTR_FAST,
+					&fast)) {
+				IPAERR("couldn't set fast map\n");
+				arm_iommu_release_mapping(cb->mapping);
+				cb->valid = false;
+				return -EIO;
+			}
+			IPADBG("SMMU fast map set\n");
+		}
+	}
+
+	IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev);
+	ret = arm_iommu_attach_device(cb->dev, cb->mapping);
+	if (ret) {
+		IPAERR("could not attach device ret=%d\n", ret);
+		arm_iommu_release_mapping(cb->mapping);
+		cb->valid = false;
+		return ret;
+	}
+
+	cb->next_addr = cb->va_end;
+	ipa3_ctx->uc_pdev = dev;
+
+	return 0;
+}
+
+static int ipa_smmu_ap_cb_probe(struct device *dev)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
+	int result;
+	int disable_htw = 1;
+	int atomic_ctx = 1;
+	int fast = 1;
+	int bypass = 1;
+	u32 iova_ap_mapping[2];
+	u32 add_map_size;
+	const u32 *add_map;
+	void *smem_addr;
+	int i;
+
+	IPADBG("AP CB probe: sub pdev=%p\n", dev);
+
+	result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
+		iova_ap_mapping, 2);
+	if (result) {
+		IPAERR("Fail to read AP start/size iova addresses\n");
+		return result;
+	}
+	cb->va_start = iova_ap_mapping[0];
+	cb->va_size = iova_ap_mapping[1];
+	cb->va_end = cb->va_start + cb->va_size;
+	IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
+
+	if (smmu_info.use_64_bit_dma_mask) {
+		if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
+				dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
+			IPAERR("DMA set 64bit mask failed\n");
+			return -EOPNOTSUPP;
+		}
+	} else {
+		if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
+				dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
+			IPAERR("DMA set 32bit mask failed\n");
+			return -EOPNOTSUPP;
+		}
+	}
+
+	cb->dev = dev;
+	cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
+					cb->va_start, cb->va_size);
+	if (IS_ERR_OR_NULL(cb->mapping)) {
+		IPADBG("Fail to create mapping\n");
+		/* assume this failure is because iommu driver is not ready */
+		return -EPROBE_DEFER;
+	}
+	IPADBG("SMMU mapping created\n");
+	cb->valid = true;
+
+	if (smmu_info.disable_htw) {
+		if (iommu_domain_set_attr(cb->mapping->domain,
+				DOMAIN_ATTR_COHERENT_HTW_DISABLE,
+				 &disable_htw)) {
+			IPAERR("couldn't disable coherent HTW\n");
+			arm_iommu_release_mapping(cb->mapping);
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU disable HTW\n");
+	}
+	if (smmu_info.s1_bypass) {
+		if (iommu_domain_set_attr(cb->mapping->domain,
+				DOMAIN_ATTR_S1_BYPASS,
+				&bypass)) {
+			IPAERR("couldn't set bypass\n");
+			arm_iommu_release_mapping(cb->mapping);
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU S1 BYPASS\n");
+	} else {
+		if (iommu_domain_set_attr(cb->mapping->domain,
+				DOMAIN_ATTR_ATOMIC,
+				&atomic_ctx)) {
+			IPAERR("couldn't set domain as atomic\n");
+			arm_iommu_release_mapping(cb->mapping);
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU atomic set\n");
+
+		if (iommu_domain_set_attr(cb->mapping->domain,
+				DOMAIN_ATTR_FAST,
+				&fast)) {
+			IPAERR("couldn't set fast map\n");
+			arm_iommu_release_mapping(cb->mapping);
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU fast map set\n");
+	}
+
+	result = arm_iommu_attach_device(cb->dev, cb->mapping);
+	if (result) {
+		IPAERR("couldn't attach to IOMMU ret=%d\n", result);
+		cb->valid = false;
+		return result;
+	}
+
+	add_map = of_get_property(dev->of_node,
+		"qcom,additional-mapping", &add_map_size);
+	if (add_map) {
+		/* mapping size is an array of 3-tuple of u32 */
+		if (add_map_size % (3 * sizeof(u32))) {
+			IPAERR("wrong additional mapping format\n");
+			cb->valid = false;
+			return -EFAULT;
+		}
+
+		/* iterate of each entry of the additional mapping array */
+		for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
+			u32 iova = be32_to_cpu(add_map[i]);
+			u32 pa = be32_to_cpu(add_map[i + 1]);
+			u32 size = be32_to_cpu(add_map[i + 2]);
+			unsigned long iova_p;
+			phys_addr_t pa_p;
+			u32 size_p;
+
+			IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
+				iova_p, pa_p, size_p);
+			IPADBG("mapping 0x%lx to 0x%pa size %d\n",
+				iova_p, &pa_p, size_p);
+			ipa3_iommu_map(cb->mapping->domain,
+				iova_p, pa_p, size_p,
+				IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+		}
+	}
+
+	/* map SMEM memory for IPA table accesses */
+	smem_addr = smem_alloc(SMEM_IPA_FILTER_TABLE, IPA_SMEM_SIZE,
+		SMEM_MODEM, 0);
+	if (smem_addr) {
+		phys_addr_t iova = smem_virt_to_phys(smem_addr);
+		phys_addr_t pa = iova;
+		unsigned long iova_p;
+		phys_addr_t pa_p;
+		u32 size_p;
+
+		IPA_SMMU_ROUND_TO_PAGE(iova, pa, IPA_SMEM_SIZE,
+			iova_p, pa_p, size_p);
+		IPADBG("mapping 0x%lx to 0x%pa size %d\n",
+			iova_p, &pa_p, size_p);
+		ipa3_iommu_map(cb->mapping->domain,
+			iova_p, pa_p, size_p,
+			IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+	}
+
+
+	smmu_info.present = true;
+
+	if (!ipa3_bus_scale_table)
+		ipa3_bus_scale_table = msm_bus_cl_get_pdata(ipa3_pdev);
+
+	/* Proceed to real initialization */
+	result = ipa3_pre_init(&ipa3_res, dev);
+	if (result) {
+		IPAERR("ipa_init failed\n");
+		arm_iommu_detach_device(cb->dev);
+		arm_iommu_release_mapping(cb->mapping);
+		cb->valid = false;
+		return result;
+	}
+
+	return result;
+}
+
+static irqreturn_t ipa3_smp2p_modem_clk_query_isr(int irq, void *ctxt)
+{
+	ipa3_freeze_clock_vote_and_notify_modem();
+
+	return IRQ_HANDLED;
+}
+
+static int ipa3_smp2p_probe(struct device *dev)
+{
+	struct device_node *node = dev->of_node;
+	int res;
+
+	IPADBG("node->name=%s\n", node->name);
+	if (strcmp("qcom,smp2pgpio_map_ipa_1_out", node->name) == 0) {
+		res = of_get_gpio(node, 0);
+		if (res < 0) {
+			IPADBG("of_get_gpio returned %d\n", res);
+			return res;
+		}
+
+		ipa3_ctx->smp2p_info.out_base_id = res;
+		IPADBG("smp2p out_base_id=%d\n",
+			ipa3_ctx->smp2p_info.out_base_id);
+	} else if (strcmp("qcom,smp2pgpio_map_ipa_1_in", node->name) == 0) {
+		int irq;
+
+		res = of_get_gpio(node, 0);
+		if (res < 0) {
+			IPADBG("of_get_gpio returned %d\n", res);
+			return res;
+		}
+
+		ipa3_ctx->smp2p_info.in_base_id = res;
+		IPADBG("smp2p in_base_id=%d\n",
+			ipa3_ctx->smp2p_info.in_base_id);
+
+		/* register for modem clk query */
+		irq = gpio_to_irq(ipa3_ctx->smp2p_info.in_base_id +
+			IPA_GPIO_IN_QUERY_CLK_IDX);
+		if (irq < 0) {
+			IPAERR("gpio_to_irq failed %d\n", irq);
+			return -ENODEV;
+		}
+		IPADBG("smp2p irq#=%d\n", irq);
+		res = request_irq(irq,
+			(irq_handler_t)ipa3_smp2p_modem_clk_query_isr,
+			IRQF_TRIGGER_RISING, "ipa_smp2p_clk_vote", dev);
+		if (res) {
+			IPAERR("fail to register smp2p irq=%d\n", irq);
+			return -ENODEV;
+		}
+		res = enable_irq_wake(ipa3_ctx->smp2p_info.in_base_id +
+			IPA_GPIO_IN_QUERY_CLK_IDX);
+		if (res)
+			IPAERR("failed to enable irq wake\n");
+	}
+
+	return 0;
+}
+
+int ipa3_plat_drv_probe(struct platform_device *pdev_p,
+	struct ipa_api_controller *api_ctrl,
+	const struct of_device_id *pdrv_match)
+{
+	int result;
+	struct device *dev = &pdev_p->dev;
+
+	IPADBG("IPA driver probing started\n");
+	IPADBG("dev->of_node->name = %s\n", dev->of_node->name);
+
+	if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb"))
+		return ipa_smmu_ap_cb_probe(dev);
+
+	if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-wlan-cb"))
+		return ipa_smmu_wlan_cb_probe(dev);
+
+	if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-uc-cb"))
+		return ipa_smmu_uc_cb_probe(dev);
+
+	if (of_device_is_compatible(dev->of_node,
+	    "qcom,smp2pgpio-map-ipa-1-in"))
+		return ipa3_smp2p_probe(dev);
+
+	if (of_device_is_compatible(dev->of_node,
+	    "qcom,smp2pgpio-map-ipa-1-out"))
+		return ipa3_smp2p_probe(dev);
+
+	master_dev = dev;
+	if (!ipa3_pdev)
+		ipa3_pdev = pdev_p;
+
+	result = get_ipa_dts_configuration(pdev_p, &ipa3_res);
+	if (result) {
+		IPAERR("IPA dts parsing failed\n");
+		return result;
+	}
+
+	result = ipa3_bind_api_controller(ipa3_res.ipa_hw_type, api_ctrl);
+	if (result) {
+		IPAERR("IPA API binding failed\n");
+		return result;
+	}
+
+	result = of_platform_populate(pdev_p->dev.of_node,
+		pdrv_match, NULL, &pdev_p->dev);
+	if (result) {
+		IPAERR("failed to populate platform\n");
+		return result;
+	}
+
+	if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
+		if (of_property_read_bool(pdev_p->dev.of_node,
+		    "qcom,smmu-s1-bypass"))
+			smmu_info.s1_bypass = true;
+		if (of_property_read_bool(pdev_p->dev.of_node,
+			"qcom,smmu-fast-map"))
+			smmu_info.fast_map = true;
+		if (of_property_read_bool(pdev_p->dev.of_node,
+			"qcom,use-64-bit-dma-mask"))
+			smmu_info.use_64_bit_dma_mask = true;
+		smmu_info.arm_smmu = true;
+		pr_info("IPA smmu_info.s1_bypass=%d smmu_info.fast_map=%d\n",
+			smmu_info.s1_bypass, smmu_info.fast_map);
+	} else if (of_property_read_bool(pdev_p->dev.of_node,
+				"qcom,msm-smmu")) {
+		IPAERR("Legacy IOMMU not supported\n");
+		result = -EOPNOTSUPP;
+	} else {
+		if (of_property_read_bool(pdev_p->dev.of_node,
+			"qcom,use-64-bit-dma-mask")) {
+			if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(64)) ||
+			    dma_set_coherent_mask(&pdev_p->dev,
+			    DMA_BIT_MASK(64))) {
+				IPAERR("DMA set 64bit mask failed\n");
+				return -EOPNOTSUPP;
+			}
+		} else {
+			if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(32)) ||
+			    dma_set_coherent_mask(&pdev_p->dev,
+			    DMA_BIT_MASK(32))) {
+				IPAERR("DMA set 32bit mask failed\n");
+				return -EOPNOTSUPP;
+			}
+		}
+
+		if (!ipa3_bus_scale_table)
+			ipa3_bus_scale_table = msm_bus_cl_get_pdata(pdev_p);
+		/* Proceed to real initialization */
+		result = ipa3_pre_init(&ipa3_res, dev);
+		if (result) {
+			IPAERR("ipa3_init failed\n");
+			return result;
+		}
+	}
+
+	return result;
+}
+
+/**
+ * ipa3_ap_suspend() - suspend callback for runtime_pm
+ * @dev: pointer to device
+ *
+ * This callback will be invoked by the runtime_pm framework when an AP suspend
+ * operation is invoked, usually by pressing a suspend button.
+ *
+ * Returns -EAGAIN to runtime_pm framework in case IPA is in use by AP.
+ * This will postpone the suspend operation until IPA is no longer used by AP.
+*/
+int ipa3_ap_suspend(struct device *dev)
+{
+	int i;
+
+	IPADBG("Enter...\n");
+
+	/* In case there is a tx/rx handler in polling mode fail to suspend */
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (ipa3_ctx->ep[i].sys &&
+			atomic_read(&ipa3_ctx->ep[i].sys->curr_polling_state)) {
+			IPAERR("EP %d is in polling state, do not suspend\n",
+				i);
+			return -EAGAIN;
+		}
+	}
+
+	/* release SPS IPA resource without waiting for inactivity timer */
+	atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
+	ipa3_sps_release_resource(NULL);
+	IPADBG("Exit\n");
+
+	return 0;
+}
+
+/**
+* ipa3_ap_resume() - resume callback for runtime_pm
+* @dev: pointer to device
+*
+* This callback will be invoked by the runtime_pm framework when an AP resume
+* operation is invoked.
+*
+* Always returns 0 since resume should always succeed.
+*/
+int ipa3_ap_resume(struct device *dev)
+{
+	return 0;
+}
+
+struct ipa3_context *ipa3_get_ctx(void)
+{
+	return ipa3_ctx;
+}
+
+static void ipa_gsi_request_resource(struct work_struct *work)
+{
+	unsigned long flags;
+	int ret;
+
+	/* request IPA clocks */
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	/* mark transport resource as granted */
+	spin_lock_irqsave(&ipa3_ctx->transport_pm.lock, flags);
+	ipa3_ctx->transport_pm.res_granted = true;
+
+	IPADBG("IPA is ON, calling gsi driver\n");
+	ret = gsi_complete_clk_grant(ipa3_ctx->gsi_dev_hdl);
+	if (ret != GSI_STATUS_SUCCESS)
+		IPAERR("gsi_complete_clk_grant failed %d\n", ret);
+
+	spin_unlock_irqrestore(&ipa3_ctx->transport_pm.lock, flags);
+}
+
+void ipa_gsi_req_res_cb(void *user_data, bool *granted)
+{
+	unsigned long flags;
+	struct ipa_active_client_logging_info log_info;
+
+	spin_lock_irqsave(&ipa3_ctx->transport_pm.lock, flags);
+
+	/* make sure no release will happen */
+	cancel_delayed_work(&ipa_gsi_release_resource_work);
+	ipa3_ctx->transport_pm.res_rel_in_prog = false;
+
+	if (ipa3_ctx->transport_pm.res_granted) {
+		*granted = true;
+	} else {
+		IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "GSI_RESOURCE");
+		if (ipa3_inc_client_enable_clks_no_block(&log_info) == 0) {
+			ipa3_ctx->transport_pm.res_granted = true;
+			*granted = true;
+		} else {
+			queue_work(ipa3_ctx->transport_power_mgmt_wq,
+				   &ipa_gsi_request_resource_work);
+			*granted = false;
+		}
+	}
+	spin_unlock_irqrestore(&ipa3_ctx->transport_pm.lock, flags);
+}
+
+static void ipa_gsi_release_resource(struct work_struct *work)
+{
+	unsigned long flags;
+	bool dec_clients = false;
+
+	spin_lock_irqsave(&ipa3_ctx->transport_pm.lock, flags);
+	/* check whether still need to decrease client usage */
+	if (ipa3_ctx->transport_pm.res_rel_in_prog) {
+		dec_clients = true;
+		ipa3_ctx->transport_pm.res_rel_in_prog = false;
+		ipa3_ctx->transport_pm.res_granted = false;
+	}
+	spin_unlock_irqrestore(&ipa3_ctx->transport_pm.lock, flags);
+	if (dec_clients)
+		IPA_ACTIVE_CLIENTS_DEC_SPECIAL("GSI_RESOURCE");
+}
+
+int ipa_gsi_rel_res_cb(void *user_data)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa3_ctx->transport_pm.lock, flags);
+
+	ipa3_ctx->transport_pm.res_rel_in_prog = true;
+	queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq,
+			   &ipa_gsi_release_resource_work,
+			   msecs_to_jiffies(IPA_TRANSPORT_PROD_TIMEOUT_MSEC));
+
+	spin_unlock_irqrestore(&ipa3_ctx->transport_pm.lock, flags);
+	return 0;
+}
+
+static void ipa_gsi_notify_cb(struct gsi_per_notify *notify)
+{
+	switch (notify->evt_id) {
+	case GSI_PER_EVT_GLOB_ERROR:
+		IPAERR("Got GSI_PER_EVT_GLOB_ERROR\n");
+		IPAERR("Err_desc = 0x%04x\n", notify->data.err_desc);
+		break;
+	case GSI_PER_EVT_GLOB_GP1:
+		IPAERR("Got GSI_PER_EVT_GLOB_GP1\n");
+		BUG();
+		break;
+	case GSI_PER_EVT_GLOB_GP2:
+		IPAERR("Got GSI_PER_EVT_GLOB_GP2\n");
+		BUG();
+		break;
+	case GSI_PER_EVT_GLOB_GP3:
+		IPAERR("Got GSI_PER_EVT_GLOB_GP3\n");
+		BUG();
+		break;
+	case GSI_PER_EVT_GENERAL_BREAK_POINT:
+		IPAERR("Got GSI_PER_EVT_GENERAL_BREAK_POINT\n");
+		break;
+	case GSI_PER_EVT_GENERAL_BUS_ERROR:
+		IPAERR("Got GSI_PER_EVT_GENERAL_BUS_ERROR\n");
+		BUG();
+		break;
+	case GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW:
+		IPAERR("Got GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW\n");
+		BUG();
+		break;
+	case GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW:
+		IPAERR("Got GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW\n");
+		BUG();
+		break;
+	default:
+		IPAERR("Received unexpected evt: %d\n",
+			notify->evt_id);
+		BUG();
+	}
+}
+
+int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data)
+{
+	struct ipa3_ready_cb_info *cb_info = NULL;
+
+	/* check ipa3_ctx existed or not */
+	if (!ipa3_ctx) {
+		IPADBG("IPA driver haven't initialized\n");
+		return -ENXIO;
+	}
+	mutex_lock(&ipa3_ctx->lock);
+	if (ipa3_ctx->ipa_initialization_complete) {
+		mutex_unlock(&ipa3_ctx->lock);
+		IPADBG("IPA driver finished initialization already\n");
+		return -EEXIST;
+	}
+
+	cb_info = kmalloc(sizeof(struct ipa3_ready_cb_info), GFP_KERNEL);
+	if (!cb_info) {
+		mutex_unlock(&ipa3_ctx->lock);
+		return -ENOMEM;
+	}
+
+	cb_info->ready_cb = ipa_ready_cb;
+	cb_info->user_data = user_data;
+
+	list_add_tail(&cb_info->link, &ipa3_ctx->ipa_ready_cb_list);
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return 0;
+}
+
+int ipa3_iommu_map(struct iommu_domain *domain,
+	unsigned long iova, phys_addr_t paddr, size_t size, int prot)
+{
+	struct ipa_smmu_cb_ctx *ap_cb = ipa3_get_smmu_ctx();
+	struct ipa_smmu_cb_ctx *uc_cb = ipa3_get_uc_smmu_ctx();
+
+	IPADBG("domain =0x%p iova 0x%lx\n", domain, iova);
+	IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);
+
+	/* make sure no overlapping */
+	if (domain == ipa3_get_smmu_domain()) {
+		if (iova >= ap_cb->va_start && iova < ap_cb->va_end) {
+			IPAERR("iommu AP overlap addr 0x%lx\n", iova);
+			ipa_assert();
+			return -EFAULT;
+		}
+	} else if (domain == ipa3_get_wlan_smmu_domain()) {
+		/* wlan is one time map */
+	} else if (domain == ipa3_get_uc_smmu_domain()) {
+		if (iova >= uc_cb->va_start && iova < uc_cb->va_end) {
+			IPAERR("iommu uC overlap addr 0x%lx\n", iova);
+			ipa_assert();
+			return -EFAULT;
+		}
+	} else {
+		IPAERR("Unexpected domain 0x%p\n", domain);
+		ipa_assert();
+		return -EFAULT;
+	}
+
+	return iommu_map(domain, iova, paddr, size, prot);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA HW device driver");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
new file mode 100644
index 0000000..f583a36
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -0,0 +1,1988 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <asm/barrier.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include "ipa_i.h"
+#include "linux/msm_gsi.h"
+
+/*
+ * These values were determined empirically and shows good E2E bi-
+ * directional throughputs
+ */
+#define IPA_HOLB_TMR_EN 0x1
+#define IPA_HOLB_TMR_DIS 0x0
+#define IPA_HOLB_TMR_DEFAULT_VAL 0x1ff
+#define IPA_POLL_AGGR_STATE_RETRIES_NUM 3
+#define IPA_POLL_AGGR_STATE_SLEEP_MSEC 1
+
+#define IPA_PKT_FLUSH_TO_US 100
+
+#define IPA_POLL_FOR_EMPTINESS_NUM 50
+#define IPA_POLL_FOR_EMPTINESS_SLEEP_USEC 20
+#define IPA_CHANNEL_STOP_IN_PROC_TO_MSEC 5
+#define IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC 200
+
+/* xfer_rsc_idx should be 7 bits */
+#define IPA_XFER_RSC_IDX_MAX 127
+
+static int ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep,
+	bool *is_empty);
+
+int ipa3_enable_data_path(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep = &ipa3_ctx->ep[clnt_hdl];
+	struct ipa_ep_cfg_holb holb_cfg;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	int res = 0;
+	struct ipahal_reg_endp_init_rsrc_grp rsrc_grp;
+
+	IPADBG("Enabling data path\n");
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		memset(&holb_cfg, 0, sizeof(holb_cfg));
+		holb_cfg.en = IPA_HOLB_TMR_DIS;
+		holb_cfg.tmr_val = 0;
+		res = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
+	}
+
+	/* Enable the pipe */
+	if (IPA_CLIENT_IS_CONS(ep->client) &&
+	    (ep->keep_ipa_awake ||
+	     ipa3_ctx->resume_on_connect[ep->client] ||
+	     !ipa3_should_pipe_be_suspended(ep->client))) {
+		memset(&ep_cfg_ctrl, 0, sizeof(ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_suspend = false;
+		ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+	}
+
+	/* Assign the resource group for pipe */
+	memset(&rsrc_grp, 0, sizeof(rsrc_grp));
+	rsrc_grp.rsrc_grp = ipa_get_ep_group(ep->client);
+	if (rsrc_grp.rsrc_grp == -1) {
+		IPAERR("invalid group for client %d\n", ep->client);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	IPADBG("Setting group %d for pipe %d\n",
+		rsrc_grp.rsrc_grp, clnt_hdl);
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_RSRC_GRP_n, clnt_hdl,
+		&rsrc_grp);
+
+	return res;
+}
+
+int ipa3_disable_data_path(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep = &ipa3_ctx->ep[clnt_hdl];
+	struct ipa_ep_cfg_holb holb_cfg;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	struct ipa_ep_cfg_aggr ep_aggr;
+	int res = 0;
+
+	IPADBG("Disabling data path\n");
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		memset(&holb_cfg, 0, sizeof(holb_cfg));
+		holb_cfg.en = IPA_HOLB_TMR_EN;
+		holb_cfg.tmr_val = 0;
+		res = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
+	}
+
+	/* Suspend the pipe */
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_suspend = true;
+		ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+	}
+
+	udelay(IPA_PKT_FLUSH_TO_US);
+	ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl, &ep_aggr);
+	if (ep_aggr.aggr_en) {
+		res = ipa3_tag_aggr_force_close(clnt_hdl);
+		if (res) {
+			IPAERR("tag process timeout, client:%d err:%d\n",
+				   clnt_hdl, res);
+			BUG();
+		}
+	}
+
+	return res;
+}
+
+static int ipa3_smmu_map_peer_bam(unsigned long dev)
+{
+	phys_addr_t base;
+	u32 size;
+	struct iommu_domain *smmu_domain;
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
+
+	if (!ipa3_ctx->smmu_s1_bypass) {
+		if (ipa3_ctx->peer_bam_map_cnt == 0) {
+			if (sps_get_bam_addr(dev, &base, &size)) {
+				IPAERR("Fail to get addr\n");
+				return -EINVAL;
+			}
+			smmu_domain = ipa3_get_smmu_domain();
+			if (smmu_domain != NULL) {
+				if (ipa3_iommu_map(smmu_domain,
+					cb->va_end,
+					rounddown(base, PAGE_SIZE),
+					roundup(size + base -
+					rounddown(base, PAGE_SIZE), PAGE_SIZE),
+					IOMMU_READ | IOMMU_WRITE |
+					IOMMU_DEVICE)) {
+					IPAERR("Fail to ipa3_iommu_map\n");
+					return -EINVAL;
+				}
+			}
+
+			ipa3_ctx->peer_bam_iova = cb->va_end;
+			ipa3_ctx->peer_bam_pa = base;
+			ipa3_ctx->peer_bam_map_size = size;
+			ipa3_ctx->peer_bam_dev = dev;
+
+			IPADBG("Peer bam %lu mapped\n", dev);
+		} else {
+			WARN_ON(dev != ipa3_ctx->peer_bam_dev);
+		}
+
+		ipa3_ctx->peer_bam_map_cnt++;
+	}
+
+	return 0;
+}
+
+static int ipa3_connect_configure_sps(const struct ipa_connect_params *in,
+				     struct ipa3_ep_context *ep, int ipa_ep_idx)
+{
+	int result = -EFAULT;
+
+	/* Default Config */
+	ep->ep_hdl = sps_alloc_endpoint();
+
+	if (ipa3_smmu_map_peer_bam(in->client_bam_hdl)) {
+		IPAERR("fail to iommu map peer BAM.\n");
+		return -EFAULT;
+	}
+
+	if (ep->ep_hdl == NULL) {
+		IPAERR("SPS EP alloc failed EP.\n");
+		return -EFAULT;
+	}
+
+	result = sps_get_config(ep->ep_hdl,
+		&ep->connect);
+	if (result) {
+		IPAERR("fail to get config.\n");
+		return -EFAULT;
+	}
+
+	/* Specific Config */
+	if (IPA_CLIENT_IS_CONS(in->client)) {
+		ep->connect.mode = SPS_MODE_SRC;
+		ep->connect.destination =
+			in->client_bam_hdl;
+		ep->connect.dest_iova = ipa3_ctx->peer_bam_iova;
+		ep->connect.source = ipa3_ctx->bam_handle;
+		ep->connect.dest_pipe_index =
+			in->client_ep_idx;
+		ep->connect.src_pipe_index = ipa_ep_idx;
+	} else {
+		ep->connect.mode = SPS_MODE_DEST;
+		ep->connect.source = in->client_bam_hdl;
+		ep->connect.source_iova = ipa3_ctx->peer_bam_iova;
+		ep->connect.destination = ipa3_ctx->bam_handle;
+		ep->connect.src_pipe_index = in->client_ep_idx;
+		ep->connect.dest_pipe_index = ipa_ep_idx;
+	}
+
+	return 0;
+}
+
+static int ipa3_connect_allocate_fifo(const struct ipa_connect_params *in,
+				     struct sps_mem_buffer *mem_buff_ptr,
+				     bool *fifo_in_pipe_mem_ptr,
+				     u32 *fifo_pipe_mem_ofst_ptr,
+				     u32 fifo_size, int ipa_ep_idx)
+{
+	dma_addr_t dma_addr;
+	u32 ofst;
+	int result = -EFAULT;
+	struct iommu_domain *smmu_domain;
+
+	mem_buff_ptr->size = fifo_size;
+	if (in->pipe_mem_preferred) {
+		if (ipa3_pipe_mem_alloc(&ofst, fifo_size)) {
+			IPAERR("FIFO pipe mem alloc fail ep %u\n",
+				ipa_ep_idx);
+			mem_buff_ptr->base =
+				dma_alloc_coherent(ipa3_ctx->pdev,
+				mem_buff_ptr->size,
+				&dma_addr, GFP_KERNEL);
+		} else {
+			memset(mem_buff_ptr, 0, sizeof(struct sps_mem_buffer));
+			result = sps_setup_bam2bam_fifo(mem_buff_ptr, ofst,
+				fifo_size, 1);
+			WARN_ON(result);
+			*fifo_in_pipe_mem_ptr = 1;
+			dma_addr = mem_buff_ptr->phys_base;
+			*fifo_pipe_mem_ofst_ptr = ofst;
+		}
+	} else {
+		mem_buff_ptr->base =
+			dma_alloc_coherent(ipa3_ctx->pdev, mem_buff_ptr->size,
+			&dma_addr, GFP_KERNEL);
+	}
+	if (ipa3_ctx->smmu_s1_bypass) {
+		mem_buff_ptr->phys_base = dma_addr;
+	} else {
+		mem_buff_ptr->iova = dma_addr;
+		smmu_domain = ipa_get_smmu_domain();
+		if (smmu_domain != NULL) {
+			mem_buff_ptr->phys_base =
+				iommu_iova_to_phys(smmu_domain, dma_addr);
+		}
+	}
+	if (mem_buff_ptr->base == NULL) {
+		IPAERR("fail to get DMA memory.\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_connect() - low-level IPA client connect
+ * @in:	[in] input parameters from client
+ * @sps:	[out] sps output from IPA needed by client for sps_connect
+ * @clnt_hdl:	[out] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to connect to
+ * IPA in BAM-BAM mode. these peripherals are USB and HSIC. this api
+ * expects caller to take responsibility to add any needed headers, routing
+ * and filtering tables and rules as needed.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_connect(const struct ipa_connect_params *in,
+		struct ipa_sps_params *sps,
+		u32 *clnt_hdl)
+{
+	int ipa_ep_idx;
+	int result = -EFAULT;
+	struct ipa3_ep_context *ep;
+	struct ipahal_reg_ep_cfg_status ep_status;
+	unsigned long base;
+	struct iommu_domain *smmu_domain;
+
+	IPADBG("connecting client\n");
+
+	if (in == NULL || sps == NULL || clnt_hdl == NULL ||
+	    in->client >= IPA_CLIENT_MAX ||
+	    in->desc_fifo_sz == 0 || in->data_fifo_sz == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ipa_ep_idx = ipa3_get_ep_mapping(in->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("fail to alloc EP.\n");
+		goto fail;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (ep->valid) {
+		IPAERR("EP already allocated.\n");
+		goto fail;
+	}
+
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+	IPA_ACTIVE_CLIENTS_INC_EP(in->client);
+
+	ep->skip_ep_cfg = in->skip_ep_cfg;
+	ep->valid = 1;
+	ep->client = in->client;
+	ep->client_notify = in->notify;
+	ep->priv = in->priv;
+	ep->keep_ipa_awake = in->keep_ipa_awake;
+
+	result = ipa3_enable_data_path(ipa_ep_idx);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d.\n", result,
+				ipa_ep_idx);
+		goto ipa_cfg_ep_fail;
+	}
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa3_cfg_ep(ipa_ep_idx, &in->ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto ipa_cfg_ep_fail;
+		}
+		/* Setting EP status 0 */
+		memset(&ep_status, 0, sizeof(ep_status));
+		if (ipa3_cfg_ep_status(ipa_ep_idx, &ep_status)) {
+			IPAERR("fail to configure status of EP.\n");
+			goto ipa_cfg_ep_fail;
+		}
+		IPADBG("ep configuration successful\n");
+	} else {
+		IPADBG("Skipping endpoint configuration.\n");
+	}
+
+	result = ipa3_connect_configure_sps(in, ep, ipa_ep_idx);
+	if (result) {
+		IPAERR("fail to configure SPS.\n");
+		goto ipa_cfg_ep_fail;
+	}
+
+	if (!ipa3_ctx->smmu_s1_bypass &&
+			(in->desc.base == NULL ||
+			 in->data.base == NULL)) {
+		IPAERR(" allocate FIFOs data_fifo=0x%p desc_fifo=0x%p.\n",
+				in->data.base, in->desc.base);
+		goto desc_mem_alloc_fail;
+	}
+
+	if (in->desc.base == NULL) {
+		result = ipa3_connect_allocate_fifo(in, &ep->connect.desc,
+						  &ep->desc_fifo_in_pipe_mem,
+						  &ep->desc_fifo_pipe_mem_ofst,
+						  in->desc_fifo_sz, ipa_ep_idx);
+		if (result) {
+			IPAERR("fail to allocate DESC FIFO.\n");
+			goto desc_mem_alloc_fail;
+		}
+	} else {
+		IPADBG("client allocated DESC FIFO\n");
+		ep->connect.desc = in->desc;
+		ep->desc_fifo_client_allocated = 1;
+	}
+	IPADBG("Descriptor FIFO pa=%pa, size=%d\n", &ep->connect.desc.phys_base,
+	       ep->connect.desc.size);
+
+	if (in->data.base == NULL) {
+		result = ipa3_connect_allocate_fifo(in, &ep->connect.data,
+						&ep->data_fifo_in_pipe_mem,
+						&ep->data_fifo_pipe_mem_ofst,
+						in->data_fifo_sz, ipa_ep_idx);
+		if (result) {
+			IPAERR("fail to allocate DATA FIFO.\n");
+			goto data_mem_alloc_fail;
+		}
+	} else {
+		IPADBG("client allocated DATA FIFO\n");
+		ep->connect.data = in->data;
+		ep->data_fifo_client_allocated = 1;
+	}
+	IPADBG("Data FIFO pa=%pa, size=%d\n", &ep->connect.data.phys_base,
+	       ep->connect.data.size);
+
+	if (!ipa3_ctx->smmu_s1_bypass) {
+		ep->connect.data.iova = ep->connect.data.phys_base;
+		base = ep->connect.data.iova;
+		smmu_domain = ipa_get_smmu_domain();
+		if (smmu_domain != NULL) {
+			if (ipa3_iommu_map(smmu_domain,
+				rounddown(base, PAGE_SIZE),
+				rounddown(base, PAGE_SIZE),
+				roundup(ep->connect.data.size + base -
+					rounddown(base, PAGE_SIZE), PAGE_SIZE),
+				IOMMU_READ | IOMMU_WRITE)) {
+				IPAERR("Fail to ipa3_iommu_map data FIFO\n");
+				goto iommu_map_data_fail;
+			}
+		}
+		ep->connect.desc.iova = ep->connect.desc.phys_base;
+		base = ep->connect.desc.iova;
+		if (smmu_domain != NULL) {
+			if (ipa3_iommu_map(smmu_domain,
+				rounddown(base, PAGE_SIZE),
+				rounddown(base, PAGE_SIZE),
+				roundup(ep->connect.desc.size + base -
+					rounddown(base, PAGE_SIZE), PAGE_SIZE),
+				IOMMU_READ | IOMMU_WRITE)) {
+				IPAERR("Fail to ipa3_iommu_map desc FIFO\n");
+				goto iommu_map_desc_fail;
+			}
+		}
+	}
+
+	if (IPA_CLIENT_IS_USB_CONS(in->client))
+		ep->connect.event_thresh = IPA_USB_EVENT_THRESHOLD;
+	else
+		ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
+	ep->connect.options = SPS_O_AUTO_ENABLE;    /* BAM-to-BAM */
+
+	result = ipa3_sps_connect_safe(ep->ep_hdl, &ep->connect, in->client);
+	if (result) {
+		IPAERR("sps_connect fails.\n");
+		goto sps_connect_fail;
+	}
+
+	sps->ipa_bam_hdl = ipa3_ctx->bam_handle;
+	sps->ipa_ep_idx = ipa_ep_idx;
+	*clnt_hdl = ipa_ep_idx;
+	memcpy(&sps->desc, &ep->connect.desc, sizeof(struct sps_mem_buffer));
+	memcpy(&sps->data, &ep->connect.data, sizeof(struct sps_mem_buffer));
+
+	ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->client))
+		ipa3_install_dflt_flt_rules(ipa_ep_idx);
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(in->client);
+
+	IPADBG("client %d (ep: %d) connected\n", in->client, ipa_ep_idx);
+
+	return 0;
+
+sps_connect_fail:
+	if (!ipa3_ctx->smmu_s1_bypass) {
+		base = ep->connect.desc.iova;
+		smmu_domain = ipa_get_smmu_domain();
+		if (smmu_domain != NULL) {
+			iommu_unmap(smmu_domain,
+				rounddown(base, PAGE_SIZE),
+				roundup(ep->connect.desc.size + base -
+					rounddown(base, PAGE_SIZE), PAGE_SIZE));
+		}
+	}
+iommu_map_desc_fail:
+	if (!ipa3_ctx->smmu_s1_bypass) {
+		base = ep->connect.data.iova;
+		smmu_domain = ipa_get_smmu_domain();
+		if (smmu_domain != NULL) {
+			iommu_unmap(smmu_domain,
+				rounddown(base, PAGE_SIZE),
+				roundup(ep->connect.data.size + base -
+					rounddown(base, PAGE_SIZE), PAGE_SIZE));
+		}
+	}
+iommu_map_data_fail:
+	if (!ep->data_fifo_client_allocated) {
+		if (!ep->data_fifo_in_pipe_mem)
+			dma_free_coherent(ipa3_ctx->pdev,
+				  ep->connect.data.size,
+				  ep->connect.data.base,
+				  ep->connect.data.phys_base);
+		else
+			ipa3_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
+				  ep->connect.data.size);
+	}
+data_mem_alloc_fail:
+	if (!ep->desc_fifo_client_allocated) {
+		if (!ep->desc_fifo_in_pipe_mem)
+			dma_free_coherent(ipa3_ctx->pdev,
+				  ep->connect.desc.size,
+				  ep->connect.desc.base,
+				  ep->connect.desc.phys_base);
+		else
+			ipa3_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
+				  ep->connect.desc.size);
+	}
+desc_mem_alloc_fail:
+	sps_free_endpoint(ep->ep_hdl);
+ipa_cfg_ep_fail:
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+	IPA_ACTIVE_CLIENTS_DEC_EP(in->client);
+fail:
+	return result;
+}
+
+static int ipa3_smmu_unmap_peer_bam(unsigned long dev)
+{
+	size_t len;
+	struct iommu_domain *smmu_domain;
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
+
+	if (!ipa3_ctx->smmu_s1_bypass) {
+		WARN_ON(dev != ipa3_ctx->peer_bam_dev);
+		ipa3_ctx->peer_bam_map_cnt--;
+		if (ipa3_ctx->peer_bam_map_cnt == 0) {
+			len = roundup(ipa3_ctx->peer_bam_map_size +
+					ipa3_ctx->peer_bam_pa -
+					rounddown(ipa3_ctx->peer_bam_pa,
+						PAGE_SIZE), PAGE_SIZE);
+			smmu_domain = ipa3_get_smmu_domain();
+			if (smmu_domain != NULL) {
+				if (iommu_unmap(smmu_domain,
+					cb->va_end, len) != len) {
+					IPAERR("Fail to iommu_unmap\n");
+					return -EINVAL;
+				}
+				IPADBG("Peer bam %lu unmapped\n", dev);
+			}
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_disconnect() - low-level IPA client disconnect
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to disconnect
+ * from IPA in BAM-BAM mode. this api expects caller to take responsibility to
+ * free any needed headers, routing and filtering tables and rules as needed.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_disconnect(u32 clnt_hdl)
+{
+	int result;
+	struct ipa3_ep_context *ep;
+	unsigned long peer_bam;
+	unsigned long base;
+	struct iommu_domain *smmu_domain;
+	struct ipa_disable_force_clear_datapath_req_msg_v01 req = {0};
+	int res;
+	enum ipa_client_type client_type;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+	client_type = ipa3_get_client_mapping(clnt_hdl);
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(client_type);
+
+	/* Set Disconnect in Progress flag. */
+	spin_lock(&ipa3_ctx->disconnect_lock);
+	ep->disconnect_in_progress = true;
+	spin_unlock(&ipa3_ctx->disconnect_lock);
+
+	result = ipa3_disable_data_path(clnt_hdl);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+				clnt_hdl);
+		return -EPERM;
+	}
+
+	result = sps_disconnect(ep->ep_hdl);
+	if (result) {
+		IPAERR("SPS disconnect failed.\n");
+		return -EPERM;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ep->client))
+		peer_bam = ep->connect.destination;
+	else
+		peer_bam = ep->connect.source;
+
+	if (ipa3_smmu_unmap_peer_bam(peer_bam)) {
+		IPAERR("fail to iommu unmap peer BAM.\n");
+		return -EPERM;
+	}
+
+	if (!ep->desc_fifo_client_allocated &&
+	     ep->connect.desc.base) {
+		if (!ep->desc_fifo_in_pipe_mem)
+			dma_free_coherent(ipa3_ctx->pdev,
+					  ep->connect.desc.size,
+					  ep->connect.desc.base,
+					  ep->connect.desc.phys_base);
+		else
+			ipa3_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
+					  ep->connect.desc.size);
+	}
+
+	if (!ep->data_fifo_client_allocated &&
+	     ep->connect.data.base) {
+		if (!ep->data_fifo_in_pipe_mem)
+			dma_free_coherent(ipa3_ctx->pdev,
+					  ep->connect.data.size,
+					  ep->connect.data.base,
+					  ep->connect.data.phys_base);
+		else
+			ipa3_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
+					  ep->connect.data.size);
+	}
+
+	if (!ipa3_ctx->smmu_s1_bypass) {
+		base = ep->connect.desc.iova;
+		smmu_domain = ipa_get_smmu_domain();
+		if (smmu_domain != NULL) {
+			iommu_unmap(smmu_domain,
+				rounddown(base, PAGE_SIZE),
+				roundup(ep->connect.desc.size + base -
+					rounddown(base, PAGE_SIZE), PAGE_SIZE));
+		}
+	}
+
+	if (!ipa3_ctx->smmu_s1_bypass) {
+		base = ep->connect.data.iova;
+		smmu_domain = ipa_get_smmu_domain();
+		if (smmu_domain != NULL) {
+			iommu_unmap(smmu_domain,
+				rounddown(base, PAGE_SIZE),
+				roundup(ep->connect.data.size + base -
+					rounddown(base, PAGE_SIZE), PAGE_SIZE));
+		}
+	}
+
+	result = sps_free_endpoint(ep->ep_hdl);
+	if (result) {
+		IPAERR("SPS de-alloc EP failed.\n");
+		return -EPERM;
+	}
+
+	ipa3_delete_dflt_flt_rules(clnt_hdl);
+
+	/* If APPS flow control is not enabled, send a message to modem to
+	 * enable flow control honoring.
+	 */
+	if (!ipa3_ctx->tethered_flow_control && ep->qmi_request_sent) {
+		/* Send a message to modem to disable flow control honoring. */
+		req.request_id = clnt_hdl;
+		res = ipa3_qmi_disable_force_clear_datapath_send(&req);
+		if (res) {
+			IPADBG("disable_force_clear_datapath failed %d\n",
+				res);
+		}
+	}
+
+	spin_lock(&ipa3_ctx->disconnect_lock);
+	memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context));
+	spin_unlock(&ipa3_ctx->disconnect_lock);
+	IPA_ACTIVE_CLIENTS_DEC_EP(client_type);
+
+	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+	return 0;
+}
+
+/**
+* ipa3_reset_endpoint() - reset an endpoint from BAM perspective
+* @clnt_hdl: [in] IPA client handle
+*
+* Returns:	0 on success, negative on failure
+*
+* Note:	Should not be called from atomic context
+*/
+int ipa3_reset_endpoint(u32 clnt_hdl)
+{
+	int res;
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes) {
+		IPAERR("Bad parameters.\n");
+		return -EFAULT;
+	}
+	ep = &ipa3_ctx->ep[clnt_hdl];
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	res = sps_disconnect(ep->ep_hdl);
+	if (res) {
+		IPAERR("sps_disconnect() failed, res=%d.\n", res);
+		goto bail;
+	} else {
+		res = ipa3_sps_connect_safe(ep->ep_hdl, &ep->connect,
+			ep->client);
+		if (res) {
+			IPAERR("sps_connect() failed, res=%d.\n", res);
+			goto bail;
+		}
+	}
+
+bail:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return res;
+}
+
+/**
+ * ipa3_sps_connect_safe() - connect endpoint from BAM prespective
+ * @h: [in] sps pipe handle
+ * @connect: [in] sps connect parameters
+ * @ipa_client: [in] ipa client handle representing the pipe
+ *
+ * This function connects a BAM pipe using SPS driver sps_connect() API
+ * and by requesting uC interface to reset the pipe, avoids an IPA HW
+ * limitation that does not allow resetting a BAM pipe during traffic in
+ * IPA TX command queue.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_sps_connect_safe(struct sps_pipe *h, struct sps_connect *connect,
+			 enum ipa_client_type ipa_client)
+{
+	int res;
+
+	if (ipa3_ctx->ipa_hw_type > IPA_HW_v2_5 ||
+			ipa3_ctx->skip_uc_pipe_reset) {
+		IPADBG("uC pipe reset is not required\n");
+	} else {
+		res = ipa3_uc_reset_pipe(ipa_client);
+		if (res)
+			return res;
+	}
+	return sps_connect(h, connect);
+}
+
+static void ipa_chan_err_cb(struct gsi_chan_err_notify *notify)
+{
+	if (notify) {
+		switch (notify->evt_id) {
+		case GSI_CHAN_INVALID_TRE_ERR:
+			IPAERR("Received GSI_CHAN_INVALID_TRE_ERR\n");
+			break;
+		case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
+			IPAERR("Received GSI_CHAN_NON_ALLOC_EVT_ACCESS_ERR\n");
+			break;
+		case GSI_CHAN_OUT_OF_BUFFERS_ERR:
+			IPAERR("Received GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
+			break;
+		case GSI_CHAN_OUT_OF_RESOURCES_ERR:
+			IPAERR("Received GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
+			break;
+		case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
+			IPAERR("Received GSI_CHAN_UNSUPP_INTER_EE_OP_ERR\n");
+			break;
+		case GSI_CHAN_HWO_1_ERR:
+			IPAERR("Received GSI_CHAN_HWO_1_ERR\n");
+			break;
+		default:
+			IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+		}
+		BUG();
+	}
+}
+
+static void ipa_xfer_cb(struct gsi_chan_xfer_notify *notify)
+{
+}
+
+static int ipa3_reconfigure_channel_to_gpi(struct ipa3_ep_context *ep,
+	struct gsi_chan_props *orig_chan_props,
+	struct ipa_mem_buffer *chan_dma)
+{
+	struct gsi_chan_props chan_props;
+	enum gsi_status gsi_res;
+	dma_addr_t chan_dma_addr;
+	int result;
+
+	/* Set up channel properties */
+	memset(&chan_props, 0, sizeof(struct gsi_chan_props));
+	chan_props.prot = GSI_CHAN_PROT_GPI;
+	chan_props.dir = GSI_CHAN_DIR_FROM_GSI;
+	chan_props.ch_id = orig_chan_props->ch_id;
+	chan_props.evt_ring_hdl = orig_chan_props->evt_ring_hdl;
+	chan_props.re_size = GSI_CHAN_RE_SIZE_16B;
+	chan_props.ring_len = 2 * GSI_CHAN_RE_SIZE_16B;
+	chan_props.ring_base_vaddr =
+		dma_alloc_coherent(ipa3_ctx->pdev, chan_props.ring_len,
+		&chan_dma_addr, 0);
+	chan_props.ring_base_addr = chan_dma_addr;
+	chan_dma->base = chan_props.ring_base_vaddr;
+	chan_dma->phys_base = chan_props.ring_base_addr;
+	chan_dma->size = chan_props.ring_len;
+	chan_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
+	chan_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	chan_props.low_weight = 1;
+	chan_props.chan_user_data = NULL;
+	chan_props.err_cb = ipa_chan_err_cb;
+	chan_props.xfer_cb = ipa_xfer_cb;
+
+	gsi_res = gsi_set_channel_cfg(ep->gsi_chan_hdl, &chan_props, NULL);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error setting channel properties\n");
+		result = -EFAULT;
+		goto set_chan_cfg_fail;
+	}
+
+	return 0;
+
+set_chan_cfg_fail:
+	dma_free_coherent(ipa3_ctx->pdev, chan_dma->size,
+		chan_dma->base, chan_dma->phys_base);
+	return result;
+
+}
+
+static int ipa3_restore_channel_properties(struct ipa3_ep_context *ep,
+	struct gsi_chan_props *chan_props,
+	union gsi_channel_scratch *chan_scratch)
+{
+	enum gsi_status gsi_res;
+
+	gsi_res = gsi_set_channel_cfg(ep->gsi_chan_hdl, chan_props,
+		chan_scratch);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error restoring channel properties\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int ipa3_reset_with_open_aggr_frame_wa(u32 clnt_hdl,
+	struct ipa3_ep_context *ep)
+{
+	int result = -EFAULT;
+	enum gsi_status gsi_res;
+	struct gsi_chan_props orig_chan_props;
+	union gsi_channel_scratch orig_chan_scratch;
+	struct ipa_mem_buffer chan_dma;
+	void *buff;
+	dma_addr_t dma_addr;
+	struct gsi_xfer_elem xfer_elem;
+	int i;
+	int aggr_active_bitmap = 0;
+
+	IPADBG("Applying reset channel with open aggregation frame WA\n");
+	ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << clnt_hdl));
+
+	/* Reset channel */
+	gsi_res = gsi_reset_channel(ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error resetting channel: %d\n", gsi_res);
+		return -EFAULT;
+	}
+
+	/* Reconfigure channel to dummy GPI channel */
+	memset(&orig_chan_props, 0, sizeof(struct gsi_chan_props));
+	memset(&orig_chan_scratch, 0, sizeof(union gsi_channel_scratch));
+	gsi_res = gsi_get_channel_cfg(ep->gsi_chan_hdl, &orig_chan_props,
+		&orig_chan_scratch);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error getting channel properties: %d\n", gsi_res);
+		return -EFAULT;
+	}
+	memset(&chan_dma, 0, sizeof(struct ipa_mem_buffer));
+	result = ipa3_reconfigure_channel_to_gpi(ep, &orig_chan_props,
+		&chan_dma);
+	if (result)
+		return -EFAULT;
+
+	/* Start channel and put 1 Byte descriptor on it */
+	gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error starting channel: %d\n", gsi_res);
+		goto start_chan_fail;
+	}
+
+	memset(&xfer_elem, 0, sizeof(struct gsi_xfer_elem));
+	buff = dma_alloc_coherent(ipa3_ctx->pdev, 1, &dma_addr,
+		GFP_KERNEL);
+	xfer_elem.addr = dma_addr;
+	xfer_elem.len = 1;
+	xfer_elem.flags = GSI_XFER_FLAG_EOT;
+	xfer_elem.type = GSI_XFER_ELEM_DATA;
+
+	gsi_res = gsi_queue_xfer(ep->gsi_chan_hdl, 1, &xfer_elem,
+		true);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error queueing xfer: %d\n", gsi_res);
+		result = -EFAULT;
+		goto queue_xfer_fail;
+	}
+
+	/* Wait for aggregation frame to be closed and stop channel*/
+	for (i = 0; i < IPA_POLL_AGGR_STATE_RETRIES_NUM; i++) {
+		aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+		if (!(aggr_active_bitmap & (1 << clnt_hdl)))
+			break;
+		msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC);
+	}
+
+	if (aggr_active_bitmap & (1 << clnt_hdl)) {
+		IPAERR("Failed closing aggr frame for client: %d\n",
+			clnt_hdl);
+		BUG();
+	}
+
+	dma_free_coherent(ipa3_ctx->pdev, 1, buff, dma_addr);
+
+	result = ipa3_stop_gsi_channel(clnt_hdl);
+	if (result) {
+		IPAERR("Error stopping channel: %d\n", result);
+		goto start_chan_fail;
+	}
+
+	/* Reset channel */
+	gsi_res = gsi_reset_channel(ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error resetting channel: %d\n", gsi_res);
+		result = -EFAULT;
+		goto start_chan_fail;
+	}
+
+	/*
+	 * Need to sleep for 1ms as required by H/W verified
+	 * sequence for resetting GSI channel
+	 */
+	msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC);
+
+	/* Restore channels properties */
+	result = ipa3_restore_channel_properties(ep, &orig_chan_props,
+		&orig_chan_scratch);
+	if (result)
+		goto restore_props_fail;
+	dma_free_coherent(ipa3_ctx->pdev, chan_dma.size,
+		chan_dma.base, chan_dma.phys_base);
+
+	return 0;
+
+queue_xfer_fail:
+	ipa3_stop_gsi_channel(clnt_hdl);
+	dma_free_coherent(ipa3_ctx->pdev, 1, buff, dma_addr);
+start_chan_fail:
+	ipa3_restore_channel_properties(ep, &orig_chan_props,
+		&orig_chan_scratch);
+restore_props_fail:
+	dma_free_coherent(ipa3_ctx->pdev, chan_dma.size,
+		chan_dma.base, chan_dma.phys_base);
+	return result;
+}
+
+int ipa3_reset_gsi_channel(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int result = -EFAULT;
+	enum gsi_status gsi_res;
+	int aggr_active_bitmap = 0;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	/*
+	 * Check for open aggregation frame on Consumer EP -
+	 * reset with open aggregation frame WA
+	 */
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+		if (aggr_active_bitmap & (1 << clnt_hdl)) {
+			result = ipa3_reset_with_open_aggr_frame_wa(clnt_hdl,
+				ep);
+			if (result)
+				goto reset_chan_fail;
+			goto finish_reset;
+		}
+	}
+
+	/*
+	 * Reset channel
+	 * If the reset called after stop, need to wait 1ms
+	 */
+	msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC);
+	gsi_res = gsi_reset_channel(ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error resetting channel: %d\n", gsi_res);
+		result = -EFAULT;
+		goto reset_chan_fail;
+	}
+
+finish_reset:
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("exit\n");
+	return 0;
+
+reset_chan_fail:
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+
+int ipa3_reset_gsi_event_ring(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int result = -EFAULT;
+	enum gsi_status gsi_res;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	/* Reset event ring */
+	gsi_res = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error resetting event: %d\n", gsi_res);
+		result = -EFAULT;
+		goto reset_evt_fail;
+	}
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("exit\n");
+	return 0;
+
+reset_evt_fail:
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+
+static bool ipa3_is_legal_params(struct ipa_request_gsi_channel_params *params)
+{
+	if (params->client >= IPA_CLIENT_MAX)
+		return false;
+	else
+		return true;
+}
+
+int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map)
+{
+	struct iommu_domain *smmu_domain;
+	int res;
+
+	if (ipa3_ctx->smmu_s1_bypass)
+		return 0;
+
+	smmu_domain = ipa3_get_smmu_domain();
+	if (!smmu_domain) {
+		IPAERR("invalid smmu domain\n");
+		return -EINVAL;
+	}
+
+	if (map) {
+		res = ipa3_iommu_map(smmu_domain, phys_addr, phys_addr,
+			PAGE_SIZE, IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+	} else {
+		res = iommu_unmap(smmu_domain, phys_addr, PAGE_SIZE);
+		res = (res != PAGE_SIZE);
+	}
+	if (res) {
+		IPAERR("Fail to %s reg 0x%pa\n", map ? "map" : "unmap",
+			&phys_addr);
+		return -EINVAL;
+	}
+
+	IPADBG("Peer reg 0x%pa %s\n", &phys_addr, map ? "map" : "unmap");
+
+	return 0;
+}
+
+int ipa3_smmu_map_peer_buff(u64 iova, phys_addr_t phys_addr, u32 size, bool map)
+{
+	struct iommu_domain *smmu_domain;
+	int res;
+
+	if (ipa3_ctx->smmu_s1_bypass)
+		return 0;
+
+	smmu_domain = ipa3_get_smmu_domain();
+	if (!smmu_domain) {
+		IPAERR("invalid smmu domain\n");
+		return -EINVAL;
+	}
+
+	if (map) {
+		res = ipa3_iommu_map(smmu_domain,
+			rounddown(iova, PAGE_SIZE),
+			rounddown(phys_addr, PAGE_SIZE),
+			roundup(size + iova - rounddown(iova, PAGE_SIZE),
+			PAGE_SIZE),
+			IOMMU_READ | IOMMU_WRITE);
+		if (res) {
+			IPAERR("Fail to map 0x%llx->0x%pa\n", iova, &phys_addr);
+			return -EINVAL;
+		}
+	} else {
+		res = iommu_unmap(smmu_domain,
+			rounddown(iova, PAGE_SIZE),
+			roundup(size + iova - rounddown(iova, PAGE_SIZE),
+			PAGE_SIZE));
+		if (res != roundup(size + iova - rounddown(iova, PAGE_SIZE),
+			PAGE_SIZE)) {
+			IPAERR("Fail to unmap 0x%llx->0x%pa\n",
+				iova, &phys_addr);
+			return -EINVAL;
+		}
+	}
+
+	IPADBG("Peer buff %s 0x%llx->0x%pa\n", map ? "map" : "unmap",
+		iova, &phys_addr);
+
+	return 0;
+}
+
+
+int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
+			     struct ipa_req_chan_out_params *out_params)
+{
+	int ipa_ep_idx;
+	int result = -EFAULT;
+	struct ipa3_ep_context *ep;
+	struct ipahal_reg_ep_cfg_status ep_status;
+	unsigned long gsi_dev_hdl;
+	enum gsi_status gsi_res;
+	struct ipa_gsi_ep_config gsi_ep_cfg;
+	struct ipa_gsi_ep_config *gsi_ep_cfg_ptr = &gsi_ep_cfg;
+
+	IPADBG("entry\n");
+	if (params == NULL || out_params == NULL ||
+		!ipa3_is_legal_params(params)) {
+		IPAERR("bad parameters\n");
+		return -EINVAL;
+	}
+
+	ipa_ep_idx = ipa3_get_ep_mapping(params->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("fail to alloc EP.\n");
+		goto fail;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (ep->valid) {
+		IPAERR("EP already allocated.\n");
+		goto fail;
+	}
+
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	ep->skip_ep_cfg = params->skip_ep_cfg;
+	ep->valid = 1;
+	ep->client = params->client;
+	ep->client_notify = params->notify;
+	ep->priv = params->priv;
+	ep->keep_ipa_awake = params->keep_ipa_awake;
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa3_cfg_ep(ipa_ep_idx, &params->ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto ipa_cfg_ep_fail;
+		}
+		/* Setting EP status 0 */
+		memset(&ep_status, 0, sizeof(ep_status));
+		if (ipa3_cfg_ep_status(ipa_ep_idx, &ep_status)) {
+			IPAERR("fail to configure status of EP.\n");
+			goto ipa_cfg_ep_fail;
+		}
+		IPADBG("ep configuration successful\n");
+	} else {
+		IPADBG("Skipping endpoint configuration.\n");
+	}
+
+	out_params->clnt_hdl = ipa_ep_idx;
+
+	result = ipa3_enable_data_path(out_params->clnt_hdl);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d.\n", result,
+				out_params->clnt_hdl);
+		goto ipa_cfg_ep_fail;
+	}
+
+	gsi_dev_hdl = ipa3_ctx->gsi_dev_hdl;
+	gsi_res = gsi_alloc_evt_ring(&params->evt_ring_params, gsi_dev_hdl,
+		&ep->gsi_evt_ring_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error allocating event ring: %d\n", gsi_res);
+		result = -EFAULT;
+		goto ipa_cfg_ep_fail;
+	}
+
+	gsi_res = gsi_write_evt_ring_scratch(ep->gsi_evt_ring_hdl,
+		params->evt_scratch);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error writing event ring scratch: %d\n", gsi_res);
+		result = -EFAULT;
+		goto write_evt_scratch_fail;
+	}
+
+	memset(gsi_ep_cfg_ptr, 0, sizeof(struct ipa_gsi_ep_config));
+	gsi_ep_cfg_ptr = ipa_get_gsi_ep_info(ipa_ep_idx);
+	params->chan_params.evt_ring_hdl = ep->gsi_evt_ring_hdl;
+	params->chan_params.ch_id = gsi_ep_cfg_ptr->ipa_gsi_chan_num;
+	gsi_res = gsi_alloc_channel(&params->chan_params, gsi_dev_hdl,
+		&ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error allocating channel: %d, chan_id: %d\n", gsi_res,
+			params->chan_params.ch_id);
+		result = -EFAULT;
+		goto write_evt_scratch_fail;
+	}
+
+	memcpy(&ep->chan_scratch, &params->chan_scratch,
+		sizeof(union __packed gsi_channel_scratch));
+	ep->chan_scratch.xdci.max_outstanding_tre =
+		params->chan_params.re_size * gsi_ep_cfg_ptr->ipa_if_tlv;
+	gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
+		params->chan_scratch);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error writing channel scratch: %d\n", gsi_res);
+		result = -EFAULT;
+		goto write_chan_scratch_fail;
+	}
+
+	gsi_res = gsi_query_channel_db_addr(ep->gsi_chan_hdl,
+		&out_params->db_reg_phs_addr_lsb,
+		&out_params->db_reg_phs_addr_msb);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error querying channel DB registers addresses: %d\n",
+			gsi_res);
+		result = -EFAULT;
+		goto write_chan_scratch_fail;
+	}
+
+	ep->gsi_mem_info.evt_ring_len = params->evt_ring_params.ring_len;
+	ep->gsi_mem_info.evt_ring_base_addr =
+		params->evt_ring_params.ring_base_addr;
+	ep->gsi_mem_info.evt_ring_base_vaddr =
+		params->evt_ring_params.ring_base_vaddr;
+	ep->gsi_mem_info.chan_ring_len = params->chan_params.ring_len;
+	ep->gsi_mem_info.chan_ring_base_addr =
+		params->chan_params.ring_base_addr;
+	ep->gsi_mem_info.chan_ring_base_vaddr =
+		params->chan_params.ring_base_vaddr;
+
+	ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(params->client))
+		ipa3_install_dflt_flt_rules(ipa_ep_idx);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	IPADBG("client %d (ep: %d) connected\n", params->client, ipa_ep_idx);
+	IPADBG("exit\n");
+
+	return 0;
+
+write_chan_scratch_fail:
+	gsi_dealloc_channel(ep->gsi_chan_hdl);
+write_evt_scratch_fail:
+	gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+ipa_cfg_ep_fail:
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+fail:
+	return result;
+}
+
+int ipa3_set_usb_max_packet_size(
+	enum ipa_usb_max_usb_packet_size usb_max_packet_size)
+{
+	struct gsi_device_scratch dev_scratch;
+	enum gsi_status gsi_res;
+
+	IPADBG("entry\n");
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&dev_scratch, 0, sizeof(struct gsi_device_scratch));
+	dev_scratch.mhi_base_chan_idx_valid = false;
+	dev_scratch.max_usb_pkt_size_valid = true;
+	dev_scratch.max_usb_pkt_size = usb_max_packet_size;
+
+	gsi_res = gsi_write_device_scratch(ipa3_ctx->gsi_dev_hdl,
+		&dev_scratch);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error writing device scratch: %d\n", gsi_res);
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	IPADBG("exit\n");
+	return 0;
+}
+
+int ipa3_xdci_connect(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
+{
+	struct ipa3_ep_context *ep;
+	int result = -EFAULT;
+	enum gsi_status gsi_res;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes  ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0 ||
+		xferrscidx < 0 || xferrscidx > IPA_XFER_RSC_IDX_MAX) {
+		IPAERR("Bad parameters.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	if (xferrscidx_valid) {
+		ep->chan_scratch.xdci.xferrscidx = xferrscidx;
+		gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
+			ep->chan_scratch);
+		if (gsi_res != GSI_STATUS_SUCCESS) {
+			IPAERR("Error writing channel scratch: %d\n", gsi_res);
+			goto write_chan_scratch_fail;
+		}
+	}
+	gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error starting channel: %d\n", gsi_res);
+		goto write_chan_scratch_fail;
+	}
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("exit\n");
+	return 0;
+
+write_chan_scratch_fail:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+
+static int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info,
+	unsigned long chan_hdl)
+{
+	enum gsi_status gsi_res;
+
+	memset(gsi_chan_info, 0, sizeof(struct gsi_chan_info));
+	gsi_res = gsi_query_channel_info(chan_hdl, gsi_chan_info);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error querying channel info: %d\n", gsi_res);
+		return -EFAULT;
+	}
+	if (!gsi_chan_info->evt_valid) {
+		IPAERR("Event info invalid\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static bool ipa3_is_xdci_channel_with_given_info_empty(
+	struct ipa3_ep_context *ep, struct gsi_chan_info *chan_info)
+{
+	bool is_empty = false;
+
+	if (!IPA_CLIENT_IS_CONS(ep->client)) {
+		/* For UL channel: chan.RP == chan.WP */
+		is_empty = (chan_info->rp == chan_info->wp);
+	} else {
+		/* For DL channel: */
+		if (chan_info->wp !=
+		    (ep->gsi_mem_info.chan_ring_base_addr +
+		     ep->gsi_mem_info.chan_ring_len -
+		     GSI_CHAN_RE_SIZE_16B)) {
+			/*  if chan.WP != LINK TRB: chan.WP == evt.RP */
+			is_empty = (chan_info->wp == chan_info->evt_rp);
+		} else {
+			/*
+			 * if chan.WP == LINK TRB: chan.base_xfer_ring_addr
+			 * == evt.RP
+			 */
+			is_empty = (ep->gsi_mem_info.chan_ring_base_addr ==
+				chan_info->evt_rp);
+		}
+	}
+
+	return is_empty;
+}
+
+static int ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep,
+	bool *is_empty)
+{
+	struct gsi_chan_info chan_info;
+	int res;
+
+	if (!ep || !is_empty || !ep->valid) {
+		IPAERR("Input Error\n");
+		return -EFAULT;
+	}
+
+	res = ipa3_get_gsi_chan_info(&chan_info, ep->gsi_chan_hdl);
+	if (res) {
+		IPAERR("Failed to get GSI channel info\n");
+		return -EFAULT;
+	}
+
+	*is_empty = ipa3_is_xdci_channel_with_given_info_empty(ep, &chan_info);
+
+	return 0;
+}
+
+static int ipa3_enable_force_clear(u32 request_id, bool throttle_source,
+	u32 source_pipe_bitmask)
+{
+	struct ipa_enable_force_clear_datapath_req_msg_v01 req;
+	int result;
+
+	memset(&req, 0, sizeof(req));
+	req.request_id = request_id;
+	req.source_pipe_bitmask = source_pipe_bitmask;
+	if (throttle_source) {
+		req.throttle_source_valid = 1;
+		req.throttle_source = 1;
+	}
+	result = ipa3_qmi_enable_force_clear_datapath_send(&req);
+	if (result) {
+		IPAERR("ipa3_qmi_enable_force_clear_datapath_send failed %d\n",
+			result);
+		return result;
+	}
+
+	return 0;
+}
+
+static int ipa3_disable_force_clear(u32 request_id)
+{
+	struct ipa_disable_force_clear_datapath_req_msg_v01 req;
+	int result;
+
+	memset(&req, 0, sizeof(req));
+	req.request_id = request_id;
+	result = ipa3_qmi_disable_force_clear_datapath_send(&req);
+	if (result) {
+		IPAERR("ipa3_qmi_disable_force_clear_datapath_send failed %d\n",
+			result);
+		return result;
+	}
+
+	return 0;
+}
+
+/* Clocks should be voted before invoking this function */
+static int ipa3_xdci_stop_gsi_channel(u32 clnt_hdl, bool *stop_in_proc)
+{
+	int res;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0 ||
+		!stop_in_proc) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	res = ipa3_stop_gsi_channel(clnt_hdl);
+	if (res != 0 && res != -GSI_STATUS_AGAIN &&
+		res != -GSI_STATUS_TIMED_OUT) {
+		IPAERR("xDCI stop channel failed res=%d\n", res);
+		return -EFAULT;
+	}
+
+	if (res)
+		*stop_in_proc = true;
+	else
+		*stop_in_proc = false;
+
+	IPADBG("xDCI channel is %s (result=%d)\n",
+		res ? "STOP_IN_PROC/TimeOut" : "STOP", res);
+
+	IPADBG("exit\n");
+	return 0;
+}
+
+/* Clocks should be voted before invoking this function */
+static int ipa3_xdci_stop_gsi_ch_brute_force(u32 clnt_hdl,
+	bool *stop_in_proc)
+{
+	unsigned long jiffies_start;
+	unsigned long jiffies_timeout =
+		msecs_to_jiffies(IPA_CHANNEL_STOP_IN_PROC_TO_MSEC);
+	int res;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0 ||
+		!stop_in_proc) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	jiffies_start = jiffies;
+	while (1) {
+		res = ipa3_xdci_stop_gsi_channel(clnt_hdl,
+			stop_in_proc);
+		if (res) {
+			IPAERR("failed to stop xDCI channel hdl=%d\n",
+				clnt_hdl);
+			return res;
+		}
+
+		if (!*stop_in_proc) {
+			IPADBG("xDCI channel STOP hdl=%d\n", clnt_hdl);
+			return res;
+		}
+
+		/*
+		 * Give chance to the previous stop request to be accomplished
+		 * before the retry
+		 */
+		udelay(IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC);
+
+		if (time_after(jiffies, jiffies_start + jiffies_timeout)) {
+			IPADBG("timeout waiting for xDCI channel emptiness\n");
+			return res;
+		}
+	}
+}
+
+/* Clocks should be voted for before invoking this function */
+static int ipa3_stop_ul_chan_with_data_drain(u32 qmi_req_id,
+		u32 source_pipe_bitmask, bool should_force_clear, u32 clnt_hdl)
+{
+	int result;
+	bool is_empty = false;
+	int i;
+	bool stop_in_proc;
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	/* first try to stop the channel */
+	result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl,
+			&stop_in_proc);
+	if (result) {
+		IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n",
+			clnt_hdl, ep->client);
+		goto exit;
+	}
+	if (!stop_in_proc)
+		goto exit;
+
+	/* if stop_in_proc, lets wait for emptiness */
+	for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
+		result = ipa3_is_xdci_channel_empty(ep, &is_empty);
+		if (result)
+			goto exit;
+		if (is_empty)
+			break;
+		udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
+	}
+	/* In case of empty, lets try to stop the channel again */
+	if (is_empty) {
+		result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl,
+			&stop_in_proc);
+		if (result) {
+			IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n",
+				clnt_hdl, ep->client);
+			goto exit;
+		}
+		if (!stop_in_proc)
+			goto exit;
+	}
+	/* if still stop_in_proc or not empty, activate force clear */
+	if (should_force_clear) {
+		result = ipa3_enable_force_clear(qmi_req_id, false,
+			source_pipe_bitmask);
+		if (result)
+			goto exit;
+	}
+	/* with force clear, wait for emptiness */
+	for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
+		result = ipa3_is_xdci_channel_empty(ep, &is_empty);
+		if (result)
+			goto disable_force_clear_and_exit;
+		if (is_empty)
+			break;
+
+		udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
+	}
+	/* try to stop for the last time */
+	result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl,
+		&stop_in_proc);
+	if (result) {
+		IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n",
+			clnt_hdl, ep->client);
+		goto disable_force_clear_and_exit;
+	}
+	result = stop_in_proc ? -EFAULT : 0;
+
+disable_force_clear_and_exit:
+	if (should_force_clear)
+		ipa3_disable_force_clear(qmi_req_id);
+exit:
+	return result;
+}
+
+int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id)
+{
+	struct ipa3_ep_context *ep;
+	int result;
+	u32 source_pipe_bitmask = 0;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipa3_disable_data_path(clnt_hdl);
+
+	if (!IPA_CLIENT_IS_CONS(ep->client)) {
+		IPADBG("Stopping PROD channel - hdl=%d clnt=%d\n",
+			clnt_hdl, ep->client);
+		source_pipe_bitmask = 1 <<
+			ipa3_get_ep_mapping(ep->client);
+		result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id,
+			source_pipe_bitmask, should_force_clear, clnt_hdl);
+		if (result) {
+			IPAERR("Fail to stop UL channel with data drain\n");
+			WARN_ON(1);
+			goto stop_chan_fail;
+		}
+	} else {
+		IPADBG("Stopping CONS channel - hdl=%d clnt=%d\n",
+			clnt_hdl, ep->client);
+		result = ipa3_stop_gsi_channel(clnt_hdl);
+		if (result) {
+			IPAERR("Error stopping channel (CONS client): %d\n",
+				result);
+			goto stop_chan_fail;
+		}
+	}
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("exit\n");
+	return 0;
+
+stop_chan_fail:
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+
+int ipa3_release_gsi_channel(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int result = -EFAULT;
+	enum gsi_status gsi_res;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	gsi_res = gsi_dealloc_channel(ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error deallocating channel: %d\n", gsi_res);
+		goto dealloc_chan_fail;
+	}
+
+	gsi_res = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error deallocating event: %d\n", gsi_res);
+		goto dealloc_chan_fail;
+	}
+
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client))
+		ipa3_delete_dflt_flt_rules(clnt_hdl);
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context));
+
+	IPADBG("exit\n");
+	return 0;
+
+dealloc_chan_fail:
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+
+int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+	bool should_force_clear, u32 qmi_req_id, bool is_dpl)
+{
+	struct ipa3_ep_context *ul_ep, *dl_ep;
+	int result = -EFAULT;
+	u32 source_pipe_bitmask = 0;
+	bool dl_data_pending = true;
+	bool ul_data_pending = true;
+	int i;
+	bool is_empty = false;
+	struct gsi_chan_info ul_gsi_chan_info, dl_gsi_chan_info;
+	int aggr_active_bitmap = 0;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+	/* In case of DPL, dl is the DPL channel/client */
+
+	IPADBG("entry\n");
+	if (dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[dl_clnt_hdl].valid == 0 ||
+		(!is_dpl && (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[ul_clnt_hdl].valid == 0))) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	dl_ep = &ipa3_ctx->ep[dl_clnt_hdl];
+	if (!is_dpl)
+		ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
+
+	result = ipa3_get_gsi_chan_info(&dl_gsi_chan_info,
+		dl_ep->gsi_chan_hdl);
+	if (result)
+		goto disable_clk_and_exit;
+
+	if (!is_dpl) {
+		result = ipa3_get_gsi_chan_info(&ul_gsi_chan_info,
+			ul_ep->gsi_chan_hdl);
+		if (result)
+			goto disable_clk_and_exit;
+	}
+
+	for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
+		if (!dl_data_pending && !ul_data_pending)
+			break;
+		result = ipa3_is_xdci_channel_empty(dl_ep, &is_empty);
+		if (result)
+			goto disable_clk_and_exit;
+		if (!is_empty) {
+			dl_data_pending = true;
+			break;
+		}
+		dl_data_pending = false;
+		if (!is_dpl) {
+			result = ipa3_is_xdci_channel_empty(ul_ep, &is_empty);
+			if (result)
+				goto disable_clk_and_exit;
+			ul_data_pending = !is_empty;
+		} else {
+			ul_data_pending = false;
+		}
+
+		udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
+	}
+
+	if (!dl_data_pending) {
+		aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+		if (aggr_active_bitmap & (1 << dl_clnt_hdl)) {
+			IPADBG("DL/DPL data pending due to open aggr. frame\n");
+			dl_data_pending = true;
+		}
+	}
+	if (dl_data_pending) {
+		IPAERR("DL/DPL data pending, can't suspend\n");
+		result = -EFAULT;
+		goto disable_clk_and_exit;
+	}
+
+	/* Suspend the DL/DPL EP */
+	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	ep_cfg_ctrl.ipa_ep_suspend = true;
+	ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+
+	/*
+	 * Check if DL/DPL channel is empty again, data could enter the channel
+	 * before its IPA EP was suspended
+	 */
+	result = ipa3_is_xdci_channel_empty(dl_ep, &is_empty);
+	if (result)
+		goto unsuspend_dl_and_exit;
+	if (!is_empty) {
+		IPAERR("DL/DPL data pending, can't suspend\n");
+		result = -EFAULT;
+		goto unsuspend_dl_and_exit;
+	}
+
+	/* STOP UL channel */
+	if (!is_dpl) {
+		source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ul_ep->client);
+		result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id,
+			source_pipe_bitmask, should_force_clear, ul_clnt_hdl);
+		if (result) {
+			IPAERR("Error stopping UL channel: result = %d\n",
+				result);
+			goto unsuspend_dl_and_exit;
+		}
+	}
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
+
+	IPADBG("exit\n");
+	return 0;
+
+unsuspend_dl_and_exit:
+	/* Unsuspend the DL EP */
+	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	ep_cfg_ctrl.ipa_ep_suspend = false;
+	ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+disable_clk_and_exit:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
+	return result;
+}
+
+int ipa3_start_gsi_channel(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int result = -EFAULT;
+	enum gsi_status gsi_res;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes  ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("Bad parameters.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error starting channel: %d\n", gsi_res);
+		goto start_chan_fail;
+	}
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("exit\n");
+	return 0;
+
+start_chan_fail:
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+
+int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl)
+{
+	struct ipa3_ep_context *ul_ep, *dl_ep;
+	enum gsi_status gsi_res;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+	/* In case of DPL, dl is the DPL channel/client */
+
+	IPADBG("entry\n");
+	if (dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[dl_clnt_hdl].valid == 0 ||
+		(!is_dpl && (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[ul_clnt_hdl].valid == 0))) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	dl_ep = &ipa3_ctx->ep[dl_clnt_hdl];
+	if (!is_dpl)
+		ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
+
+	/* Unsuspend the DL/DPL EP */
+	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	ep_cfg_ctrl.ipa_ep_suspend = false;
+	ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+
+	/* Start UL channel */
+	if (!is_dpl) {
+		gsi_res = gsi_start_channel(ul_ep->gsi_chan_hdl);
+		if (gsi_res != GSI_STATUS_SUCCESS)
+			IPAERR("Error starting UL channel: %d\n", gsi_res);
+	}
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
+
+	IPADBG("exit\n");
+	return 0;
+}
+/**
+ * ipa3_clear_endpoint_delay() - Remove ep delay set on the IPA pipe before
+ * client disconnect.
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to remove
+ * ep delay on IPA consumer ipe before disconnect in BAM-BAM mode. this api
+ * expects caller to take responsibility to free any needed headers, routing
+ * and filtering tables and rules as needed.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_clear_endpoint_delay(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	struct ipa_ep_cfg_ctrl ep_ctrl = {0};
+	struct ipa_enable_force_clear_datapath_req_msg_v01 req = {0};
+	int res;
+
+	if (unlikely(!ipa3_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ipa3_ctx->tethered_flow_control) {
+		IPADBG("APPS flow control is not enabled\n");
+		/* Send a message to modem to disable flow control honoring. */
+		req.request_id = clnt_hdl;
+		req.source_pipe_bitmask = 1 << clnt_hdl;
+		res = ipa3_qmi_enable_force_clear_datapath_send(&req);
+		if (res) {
+			IPADBG("enable_force_clear_datapath failed %d\n",
+				res);
+		}
+		ep->qmi_request_sent = true;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	/* Set disconnect in progress flag so further flow control events are
+	 * not honored.
+	 */
+	spin_lock(&ipa3_ctx->disconnect_lock);
+	ep->disconnect_in_progress = true;
+	spin_unlock(&ipa3_ctx->disconnect_lock);
+
+	/* If flow is disabled at this point, restore the ep state.*/
+	ep_ctrl.ipa_ep_delay = false;
+	ep_ctrl.ipa_ep_suspend = false;
+	ipa3_cfg_ep_ctrl(clnt_hdl, &ep_ctrl);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("client (ep: %d) removed ep delay\n", clnt_hdl);
+
+	return 0;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
new file mode 100644
index 0000000..2368797
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -0,0 +1,2143 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/stringify.h>
+#include "ipa_i.h"
+#include "../ipa_rm_i.h"
+
+#define IPA_MAX_MSG_LEN 4096
+#define IPA_DBG_MAX_RULE_IN_TBL 128
+#define IPA_DBG_ACTIVE_CLIENT_BUF_SIZE ((IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN \
+	* IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) + IPA_MAX_MSG_LEN)
+
+#define IPA_DUMP_STATUS_FIELD(f) \
+	pr_err(#f "=0x%x\n", status->f)
+
+const char *ipa3_excp_name[] = {
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD0),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD1),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IHL),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_TAG),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_NAT),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IP),
+};
+
+const char *ipa3_event_name[] = {
+	__stringify(WLAN_CLIENT_CONNECT),
+	__stringify(WLAN_CLIENT_DISCONNECT),
+	__stringify(WLAN_CLIENT_POWER_SAVE_MODE),
+	__stringify(WLAN_CLIENT_NORMAL_MODE),
+	__stringify(SW_ROUTING_ENABLE),
+	__stringify(SW_ROUTING_DISABLE),
+	__stringify(WLAN_AP_CONNECT),
+	__stringify(WLAN_AP_DISCONNECT),
+	__stringify(WLAN_STA_CONNECT),
+	__stringify(WLAN_STA_DISCONNECT),
+	__stringify(WLAN_CLIENT_CONNECT_EX),
+	__stringify(WLAN_SWITCH_TO_SCC),
+	__stringify(WLAN_SWITCH_TO_MCC),
+	__stringify(WLAN_WDI_ENABLE),
+	__stringify(WLAN_WDI_DISABLE),
+	__stringify(WAN_UPSTREAM_ROUTE_ADD),
+	__stringify(WAN_UPSTREAM_ROUTE_DEL),
+	__stringify(WAN_EMBMS_CONNECT),
+	__stringify(WAN_XLAT_CONNECT),
+	__stringify(ECM_CONNECT),
+	__stringify(ECM_DISCONNECT),
+	__stringify(IPA_TETHERING_STATS_UPDATE_STATS),
+	__stringify(IPA_TETHERING_STATS_UPDATE_NETWORK_STATS),
+};
+
+const char *ipa3_hdr_l2_type_name[] = {
+	__stringify(IPA_HDR_L2_NONE),
+	__stringify(IPA_HDR_L2_ETHERNET_II),
+	__stringify(IPA_HDR_L2_802_3),
+};
+
+const char *ipa3_hdr_proc_type_name[] = {
+	__stringify(IPA_HDR_PROC_NONE),
+	__stringify(IPA_HDR_PROC_ETHII_TO_ETHII),
+	__stringify(IPA_HDR_PROC_ETHII_TO_802_3),
+	__stringify(IPA_HDR_PROC_802_3_TO_ETHII),
+	__stringify(IPA_HDR_PROC_802_3_TO_802_3),
+};
+
+static struct dentry *dent;
+static struct dentry *dfile_gen_reg;
+static struct dentry *dfile_ep_reg;
+static struct dentry *dfile_keep_awake;
+static struct dentry *dfile_ep_holb;
+static struct dentry *dfile_hdr;
+static struct dentry *dfile_proc_ctx;
+static struct dentry *dfile_ip4_rt;
+static struct dentry *dfile_ip4_rt_hw;
+static struct dentry *dfile_ip6_rt;
+static struct dentry *dfile_ip6_rt_hw;
+static struct dentry *dfile_ip4_flt;
+static struct dentry *dfile_ip4_flt_hw;
+static struct dentry *dfile_ip6_flt;
+static struct dentry *dfile_ip6_flt_hw;
+static struct dentry *dfile_stats;
+static struct dentry *dfile_wstats;
+static struct dentry *dfile_wdi_stats;
+static struct dentry *dfile_ntn_stats;
+static struct dentry *dfile_dbg_cnt;
+static struct dentry *dfile_msg;
+static struct dentry *dfile_ip4_nat;
+static struct dentry *dfile_rm_stats;
+static struct dentry *dfile_status_stats;
+static struct dentry *dfile_active_clients;
+static char dbg_buff[IPA_MAX_MSG_LEN];
+static char *active_clients_buf;
+
+static s8 ep_reg_idx;
+
+
+static ssize_t ipa3_read_gen_reg(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	struct ipahal_reg_shared_mem_size smem_sz;
+
+	memset(&smem_sz, 0, sizeof(smem_sz));
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	ipahal_read_reg_fields(IPA_SHARED_MEM_SIZE, &smem_sz);
+	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"IPA_VERSION=0x%x\n"
+			"IPA_COMP_HW_VERSION=0x%x\n"
+			"IPA_ROUTE=0x%x\n"
+			"IPA_SHARED_MEM_RESTRICTED=0x%x\n"
+			"IPA_SHARED_MEM_SIZE=0x%x\n",
+			ipahal_read_reg(IPA_VERSION),
+			ipahal_read_reg(IPA_COMP_HW_VERSION),
+			ipahal_read_reg(IPA_ROUTE),
+			smem_sz.shared_mem_baddr,
+			smem_sz.shared_mem_sz);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_write_ep_holb(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct ipa_ep_cfg_holb holb;
+	u32 en;
+	u32 tmr_val;
+	u32 ep_idx;
+	unsigned long missing;
+	char *sptr, *token;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+
+	sptr = dbg_buff;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &ep_idx))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &en))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &tmr_val))
+		return -EINVAL;
+
+	holb.en = en;
+	holb.tmr_val = tmr_val;
+
+	ipa3_cfg_ep_holb(ep_idx, &holb);
+
+	return count;
+}
+
+static ssize_t ipa3_write_ep_reg(struct file *file, const char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	s8 option = 0;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtos8(dbg_buff, 0, &option))
+		return -EFAULT;
+
+	if (option >= ipa3_ctx->ipa_num_pipes) {
+		IPAERR("bad pipe specified %u\n", option);
+		return count;
+	}
+
+	ep_reg_idx = option;
+
+	return count;
+}
+
+/**
+ * _ipa_read_ep_reg_v3_0() - Reads and prints endpoint configuration registers
+ *
+ * Returns the number of characters printed
+ */
+int _ipa_read_ep_reg_v3_0(char *buf, int max_len, int pipe)
+{
+	return scnprintf(
+		dbg_buff, IPA_MAX_MSG_LEN,
+		"IPA_ENDP_INIT_NAT_%u=0x%x\n"
+		"IPA_ENDP_INIT_HDR_%u=0x%x\n"
+		"IPA_ENDP_INIT_HDR_EXT_%u=0x%x\n"
+		"IPA_ENDP_INIT_MODE_%u=0x%x\n"
+		"IPA_ENDP_INIT_AGGR_%u=0x%x\n"
+		"IPA_ENDP_INIT_ROUTE_%u=0x%x\n"
+		"IPA_ENDP_INIT_CTRL_%u=0x%x\n"
+		"IPA_ENDP_INIT_HOL_EN_%u=0x%x\n"
+		"IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n"
+		"IPA_ENDP_INIT_DEAGGR_%u=0x%x\n"
+		"IPA_ENDP_INIT_CFG_%u=0x%x\n",
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_NAT_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_EXT_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_MODE_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_AGGR_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_ROUTE_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CTRL_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_EN_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_DEAGGR_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CFG_n, pipe));
+}
+
+static ssize_t ipa3_read_ep_reg(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int i;
+	int start_idx;
+	int end_idx;
+	int size = 0;
+	int ret;
+	loff_t pos;
+
+	/* negative ep_reg_idx means all registers */
+	if (ep_reg_idx < 0) {
+		start_idx = 0;
+		end_idx = ipa3_ctx->ipa_num_pipes;
+	} else {
+		start_idx = ep_reg_idx;
+		end_idx = start_idx + 1;
+	}
+	pos = *ppos;
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	for (i = start_idx; i < end_idx; i++) {
+
+		nbytes = ipa3_ctx->ctrl->ipa3_read_ep_reg(dbg_buff,
+				IPA_MAX_MSG_LEN, i);
+
+		*ppos = pos;
+		ret = simple_read_from_buffer(ubuf, count, ppos, dbg_buff,
+					      nbytes);
+		if (ret < 0) {
+			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+			return ret;
+		}
+
+		size += ret;
+		ubuf += nbytes;
+		count -= nbytes;
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	*ppos = pos + size;
+	return size;
+}
+
+static ssize_t ipa3_write_keep_awake(struct file *file, const char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	s8 option = 0;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtos8(dbg_buff, 0, &option))
+		return -EFAULT;
+
+	if (option == 1)
+		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	else if (option == 0)
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	else
+		return -EFAULT;
+
+	return count;
+}
+
+static ssize_t ipa3_read_keep_awake(struct file *file, char __user *ubuf,
+	size_t count, loff_t *ppos)
+{
+	int nbytes;
+
+	ipa3_active_clients_lock();
+	if (ipa3_ctx->ipa3_active_clients.cnt)
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"IPA APPS power state is ON\n");
+	else
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"IPA APPS power state is OFF\n");
+	ipa3_active_clients_unlock();
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_read_hdr(struct file *file, char __user *ubuf, size_t count,
+		loff_t *ppos)
+{
+	int nbytes = 0;
+	int i = 0;
+	struct ipa3_hdr_entry *entry;
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	if (ipa3_ctx->hdr_tbl_lcl)
+		pr_err("Table resides on local memory\n");
+	else
+		pr_err("Table resides on system (ddr) memory\n");
+
+	list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
+			link) {
+		nbytes = scnprintf(
+			dbg_buff,
+			IPA_MAX_MSG_LEN,
+			"name:%s len=%d ref=%d partial=%d type=%s ",
+			entry->name,
+			entry->hdr_len,
+			entry->ref_cnt,
+			entry->is_partial,
+			ipa3_hdr_l2_type_name[entry->type]);
+
+		if (entry->is_hdr_proc_ctx) {
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"phys_base=0x%pa ",
+				&entry->phys_base);
+		} else {
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"ofst=%u ",
+				entry->offset_entry->offset >> 2);
+		}
+		for (i = 0; i < entry->hdr_len; i++) {
+			scnprintf(dbg_buff + nbytes + i * 2,
+				  IPA_MAX_MSG_LEN - nbytes - i * 2,
+				  "%02x", entry->hdr[i]);
+		}
+		scnprintf(dbg_buff + nbytes + entry->hdr_len * 2,
+			  IPA_MAX_MSG_LEN - nbytes - entry->hdr_len * 2,
+			  "\n");
+		pr_err("%s", dbg_buff);
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return 0;
+}
+
+static int ipa3_attrib_dump(struct ipa_rule_attrib *attrib,
+		enum ipa_ip_type ip)
+{
+	uint32_t addr[4];
+	uint32_t mask[4];
+	int i;
+
+	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED)
+		pr_err("tos_value:%d ", attrib->tos_value);
+
+	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED)
+		pr_err("tos_mask:%d ", attrib->tos_mask);
+
+	if (attrib->attrib_mask & IPA_FLT_PROTOCOL)
+		pr_err("protocol:%d ", attrib->u.v4.protocol);
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+		if (ip == IPA_IP_v4) {
+			addr[0] = htonl(attrib->u.v4.src_addr);
+			mask[0] = htonl(attrib->u.v4.src_addr_mask);
+			pr_err(
+					"src_addr:%pI4 src_addr_mask:%pI4 ",
+					addr + 0, mask + 0);
+		} else if (ip == IPA_IP_v6) {
+			for (i = 0; i < 4; i++) {
+				addr[i] = htonl(attrib->u.v6.src_addr[i]);
+				mask[i] = htonl(attrib->u.v6.src_addr_mask[i]);
+			}
+			pr_err(
+					   "src_addr:%pI6 src_addr_mask:%pI6 ",
+					   addr + 0, mask + 0);
+		} else {
+			WARN_ON(1);
+		}
+	}
+	if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+		if (ip == IPA_IP_v4) {
+			addr[0] = htonl(attrib->u.v4.dst_addr);
+			mask[0] = htonl(attrib->u.v4.dst_addr_mask);
+			pr_err(
+					   "dst_addr:%pI4 dst_addr_mask:%pI4 ",
+					   addr + 0, mask + 0);
+		} else if (ip == IPA_IP_v6) {
+			for (i = 0; i < 4; i++) {
+				addr[i] = htonl(attrib->u.v6.dst_addr[i]);
+				mask[i] = htonl(attrib->u.v6.dst_addr_mask[i]);
+			}
+			pr_err(
+					   "dst_addr:%pI6 dst_addr_mask:%pI6 ",
+					   addr + 0, mask + 0);
+		} else {
+			WARN_ON(1);
+		}
+	}
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+		pr_err("src_port_range:%u %u ",
+				   attrib->src_port_lo,
+			     attrib->src_port_hi);
+	}
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+		pr_err("dst_port_range:%u %u ",
+				   attrib->dst_port_lo,
+			     attrib->dst_port_hi);
+	}
+	if (attrib->attrib_mask & IPA_FLT_TYPE)
+		pr_err("type:%d ", attrib->type);
+
+	if (attrib->attrib_mask & IPA_FLT_CODE)
+		pr_err("code:%d ", attrib->code);
+
+	if (attrib->attrib_mask & IPA_FLT_SPI)
+		pr_err("spi:%x ", attrib->spi);
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT)
+		pr_err("src_port:%u ", attrib->src_port);
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT)
+		pr_err("dst_port:%u ", attrib->dst_port);
+
+	if (attrib->attrib_mask & IPA_FLT_TC)
+		pr_err("tc:%d ", attrib->u.v6.tc);
+
+	if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL)
+		pr_err("flow_label:%x ", attrib->u.v6.flow_label);
+
+	if (attrib->attrib_mask & IPA_FLT_NEXT_HDR)
+		pr_err("next_hdr:%d ", attrib->u.v6.next_hdr);
+
+	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+		pr_err(
+				   "metadata:%x metadata_mask:%x ",
+				   attrib->meta_data, attrib->meta_data_mask);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
+		pr_err("frg ");
+
+	if ((attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) ||
+		(attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3)) {
+		pr_err("src_mac_addr:%pM ", attrib->src_mac_addr);
+	}
+
+	if ((attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) ||
+		(attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3)) {
+		pr_err("dst_mac_addr:%pM ", attrib->dst_mac_addr);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE)
+		pr_err("ether_type:%x ", attrib->ether_type);
+
+	pr_err("\n");
+	return 0;
+}
+
+static int ipa3_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
+{
+	uint8_t addr[16];
+	uint8_t mask[16];
+	int i;
+	int j;
+
+	if (attrib->tos_eq_present)
+		pr_err("tos_value:%d ", attrib->tos_eq);
+
+	if (attrib->protocol_eq_present)
+		pr_err("protocol:%d ", attrib->protocol_eq);
+
+	if (attrib->tc_eq_present)
+		pr_err("tc:%d ", attrib->tc_eq);
+
+	for (i = 0; i < attrib->num_offset_meq_128; i++) {
+		for (j = 0; j < 16; j++) {
+			addr[j] = attrib->offset_meq_128[i].value[j];
+			mask[j] = attrib->offset_meq_128[i].mask[j];
+		}
+		pr_err(
+			"(ofst_meq128: ofst:%d mask:%pI6 val:%pI6) ",
+			attrib->offset_meq_128[i].offset,
+			mask, addr);
+	}
+
+	for (i = 0; i < attrib->num_offset_meq_32; i++)
+		pr_err(
+			   "(ofst_meq32: ofst:%u mask:0x%x val:0x%x) ",
+			   attrib->offset_meq_32[i].offset,
+			   attrib->offset_meq_32[i].mask,
+			   attrib->offset_meq_32[i].value);
+
+	for (i = 0; i < attrib->num_ihl_offset_meq_32; i++)
+		pr_err(
+			"(ihl_ofst_meq32: ofts:%d mask:0x%x val:0x%x) ",
+			attrib->ihl_offset_meq_32[i].offset,
+			attrib->ihl_offset_meq_32[i].mask,
+			attrib->ihl_offset_meq_32[i].value);
+
+	if (attrib->metadata_meq32_present)
+		pr_err(
+			"(metadata: ofst:%u mask:0x%x val:0x%x) ",
+			attrib->metadata_meq32.offset,
+			attrib->metadata_meq32.mask,
+			attrib->metadata_meq32.value);
+
+	for (i = 0; i < attrib->num_ihl_offset_range_16; i++)
+		pr_err(
+			   "(ihl_ofst_range16: ofst:%u lo:%u hi:%u) ",
+			   attrib->ihl_offset_range_16[i].offset,
+			   attrib->ihl_offset_range_16[i].range_low,
+			   attrib->ihl_offset_range_16[i].range_high);
+
+	if (attrib->ihl_offset_eq_32_present)
+		pr_err(
+			"(ihl_ofst_eq32:%d val:0x%x) ",
+			attrib->ihl_offset_eq_32.offset,
+			attrib->ihl_offset_eq_32.value);
+
+	if (attrib->ihl_offset_eq_16_present)
+		pr_err(
+			"(ihl_ofst_eq16:%d val:0x%x) ",
+			attrib->ihl_offset_eq_16.offset,
+			attrib->ihl_offset_eq_16.value);
+
+	if (attrib->fl_eq_present)
+		pr_err("flow_label:%d ", attrib->fl_eq);
+
+	if (attrib->ipv4_frag_eq_present)
+		pr_err("frag ");
+
+	pr_err("\n");
+	return 0;
+}
+
+static int ipa3_open_dbg(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t ipa3_read_rt(struct file *file, char __user *ubuf, size_t count,
+		loff_t *ppos)
+{
+	int i = 0;
+	struct ipa3_rt_tbl *tbl;
+	struct ipa3_rt_entry *entry;
+	struct ipa3_rt_tbl_set *set;
+	enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+	u32 ofst;
+	u32 ofst_words;
+
+	set = &ipa3_ctx->rt_tbl_set[ip];
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	if (ip ==  IPA_IP_v6) {
+		if (ipa3_ctx->ip6_rt_tbl_hash_lcl)
+			pr_err("Hashable table resides on local memory\n");
+		else
+			pr_err("Hashable table resides on system (ddr) memory\n");
+		if (ipa3_ctx->ip6_rt_tbl_nhash_lcl)
+			pr_err("Non-Hashable table resides on local memory\n");
+		else
+			pr_err("Non-Hashable table resides on system (ddr) memory\n");
+	} else if (ip == IPA_IP_v4) {
+		if (ipa3_ctx->ip4_rt_tbl_hash_lcl)
+			pr_err("Hashable table resides on local memory\n");
+		else
+			pr_err("Hashable table resides on system (ddr) memory\n");
+		if (ipa3_ctx->ip4_rt_tbl_nhash_lcl)
+			pr_err("Non-Hashable table resides on local memory\n");
+		else
+			pr_err("Non-Hashable table resides on system (ddr) memory\n");
+	}
+
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		i = 0;
+		list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+			if (entry->proc_ctx) {
+				ofst = entry->proc_ctx->offset_entry->offset;
+				ofst_words =
+					(ofst +
+					ipa3_ctx->hdr_proc_ctx_tbl.start_offset)
+					>> 5;
+
+				pr_err("tbl_idx:%d tbl_name:%s tbl_ref:%u ",
+					entry->tbl->idx, entry->tbl->name,
+					entry->tbl->ref_cnt);
+				pr_err("rule_idx:%d dst:%d ep:%d S:%u ",
+					i, entry->rule.dst,
+					ipa3_get_ep_mapping(entry->rule.dst),
+					!ipa3_ctx->hdr_proc_ctx_tbl_lcl);
+				pr_err("proc_ctx[32B]:%u attrib_mask:%08x ",
+					ofst_words,
+					entry->rule.attrib.attrib_mask);
+				pr_err("rule_id:%u max_prio:%u prio:%u ",
+					entry->rule_id, entry->rule.max_prio,
+					entry->prio);
+				pr_err("hashable:%u retain_hdr:%u ",
+					entry->rule.hashable,
+					entry->rule.retain_hdr);
+			} else {
+				if (entry->hdr)
+					ofst = entry->hdr->offset_entry->offset;
+				else
+					ofst = 0;
+
+				pr_err("tbl_idx:%d tbl_name:%s tbl_ref:%u ",
+					entry->tbl->idx, entry->tbl->name,
+					entry->tbl->ref_cnt);
+				pr_err("rule_idx:%d dst:%d ep:%d S:%u ",
+					i, entry->rule.dst,
+					ipa3_get_ep_mapping(entry->rule.dst),
+					!ipa3_ctx->hdr_tbl_lcl);
+				pr_err("hdr_ofst[words]:%u attrib_mask:%08x ",
+					ofst >> 2,
+					entry->rule.attrib.attrib_mask);
+				pr_err("rule_id:%u max_prio:%u prio:%u ",
+					entry->rule_id, entry->rule.max_prio,
+					entry->prio);
+				pr_err("hashable:%u retain_hdr:%u ",
+					entry->rule.hashable,
+					entry->rule.retain_hdr);
+			}
+
+			ipa3_attrib_dump(&entry->rule.attrib, ip);
+			i++;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return 0;
+}
+
+static ssize_t ipa3_read_rt_hw(struct file *file, char __user *ubuf,
+	size_t count, loff_t *ppos)
+{
+	enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+	int tbls_num;
+	int rules_num;
+	int tbl;
+	int rl;
+	int res = 0;
+	struct ipahal_rt_rule_entry *rules = NULL;
+
+	switch (ip) {
+	case IPA_IP_v4:
+		tbls_num = IPA_MEM_PART(v4_rt_num_index);
+		break;
+	case IPA_IP_v6:
+		tbls_num = IPA_MEM_PART(v6_rt_num_index);
+		break;
+	default:
+		IPAERR("ip type error %d\n", ip);
+		return -EINVAL;
+	};
+
+	IPADBG("Tring to parse %d H/W routing tables - IP=%d\n", tbls_num, ip);
+
+	rules = kzalloc(sizeof(*rules) * IPA_DBG_MAX_RULE_IN_TBL, GFP_KERNEL);
+	if (!rules) {
+		IPAERR("failed to allocate mem for tbl rules\n");
+		return -ENOMEM;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	mutex_lock(&ipa3_ctx->lock);
+
+	for (tbl = 0 ; tbl < tbls_num ; tbl++) {
+		pr_err("=== Routing Table %d = Hashable Rules ===\n", tbl);
+		rules_num = IPA_DBG_MAX_RULE_IN_TBL;
+		res = ipa3_rt_read_tbl_from_hw(tbl, ip, true, rules,
+			&rules_num);
+		if (res) {
+			pr_err("ERROR - Check the logs\n");
+			IPAERR("failed reading tbl from hw\n");
+			goto bail;
+		}
+		if (!rules_num)
+			pr_err("-->No rules. Empty tbl or modem system table\n");
+
+		for (rl = 0 ; rl < rules_num ; rl++) {
+			pr_err("rule_idx:%d dst ep:%d L:%u ",
+				rl, rules[rl].dst_pipe_idx, rules[rl].hdr_lcl);
+
+			if (rules[rl].hdr_type == IPAHAL_RT_RULE_HDR_PROC_CTX)
+				pr_err("proc_ctx:%u attrib_mask:%08x ",
+					rules[rl].hdr_ofst,
+					rules[rl].eq_attrib.rule_eq_bitmap);
+			else
+				pr_err("hdr_ofst:%u attrib_mask:%08x ",
+					rules[rl].hdr_ofst,
+					rules[rl].eq_attrib.rule_eq_bitmap);
+
+			pr_err("rule_id:%u prio:%u retain_hdr:%u ",
+				rules[rl].id, rules[rl].priority,
+				rules[rl].retain_hdr);
+			ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
+		}
+
+		pr_err("=== Routing Table %d = Non-Hashable Rules ===\n", tbl);
+		rules_num = IPA_DBG_MAX_RULE_IN_TBL;
+		res = ipa3_rt_read_tbl_from_hw(tbl, ip, false, rules,
+			&rules_num);
+		if (res) {
+			pr_err("ERROR - Check the logs\n");
+			IPAERR("failed reading tbl from hw\n");
+			goto bail;
+		}
+		if (!rules_num)
+			pr_err("-->No rules. Empty tbl or modem system table\n");
+
+		for (rl = 0 ; rl < rules_num ; rl++) {
+			pr_err("rule_idx:%d dst ep:%d L:%u ",
+				rl, rules[rl].dst_pipe_idx, rules[rl].hdr_lcl);
+
+			if (rules[rl].hdr_type == IPAHAL_RT_RULE_HDR_PROC_CTX)
+				pr_err("proc_ctx:%u attrib_mask:%08x ",
+					rules[rl].hdr_ofst,
+					rules[rl].eq_attrib.rule_eq_bitmap);
+			else
+				pr_err("hdr_ofst:%u attrib_mask:%08x ",
+					rules[rl].hdr_ofst,
+					rules[rl].eq_attrib.rule_eq_bitmap);
+
+			pr_err("rule_id:%u prio:%u retain_hdr:%u\n",
+				rules[rl].id, rules[rl].priority,
+				rules[rl].retain_hdr);
+			ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
+		}
+		pr_err("\n");
+	}
+
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	kfree(rules);
+	return res;
+}
+
+static ssize_t ipa3_read_proc_ctx(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes = 0;
+	struct ipa3_hdr_proc_ctx_tbl *tbl;
+	struct ipa3_hdr_proc_ctx_entry *entry;
+	u32 ofst_words;
+
+	tbl = &ipa3_ctx->hdr_proc_ctx_tbl;
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	if (ipa3_ctx->hdr_proc_ctx_tbl_lcl)
+		pr_info("Table resides on local memory\n");
+	else
+		pr_info("Table resides on system(ddr) memory\n");
+
+	list_for_each_entry(entry, &tbl->head_proc_ctx_entry_list, link) {
+		ofst_words = (entry->offset_entry->offset +
+			ipa3_ctx->hdr_proc_ctx_tbl.start_offset)
+			>> 5;
+		if (entry->hdr->is_hdr_proc_ctx) {
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"id:%u hdr_proc_type:%s proc_ctx[32B]:%u ",
+				entry->id,
+				ipa3_hdr_proc_type_name[entry->type],
+				ofst_words);
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"hdr_phys_base:0x%pa\n",
+				&entry->hdr->phys_base);
+		} else {
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"id:%u hdr_proc_type:%s proc_ctx[32B]:%u ",
+				entry->id,
+				ipa3_hdr_proc_type_name[entry->type],
+				ofst_words);
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"hdr[words]:%u\n",
+				entry->hdr->offset_entry->offset >> 2);
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count,
+		loff_t *ppos)
+{
+	int i;
+	int j;
+	struct ipa3_flt_tbl *tbl;
+	struct ipa3_flt_entry *entry;
+	enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+	struct ipa3_rt_tbl *rt_tbl;
+	u32 rt_tbl_idx;
+	u32 bitmap;
+	bool eq;
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	for (j = 0; j < ipa3_ctx->ipa_num_pipes; j++) {
+		if (!ipa_is_ep_support_flt(j))
+			continue;
+		tbl = &ipa3_ctx->flt_tbl[j][ip];
+		i = 0;
+		list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+			if (entry->rule.eq_attrib_type) {
+				rt_tbl_idx = entry->rule.rt_tbl_idx;
+				bitmap = entry->rule.eq_attrib.rule_eq_bitmap;
+				eq = true;
+			} else {
+				rt_tbl = ipa3_id_find(entry->rule.rt_tbl_hdl);
+				if (rt_tbl)
+					rt_tbl_idx = rt_tbl->idx;
+				else
+					rt_tbl_idx = ~0;
+				bitmap = entry->rule.attrib.attrib_mask;
+				eq = false;
+			}
+			pr_err("ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ",
+				j, i, entry->rule.action, rt_tbl_idx);
+			pr_err("attrib_mask:%08x retain_hdr:%d eq:%d ",
+				bitmap, entry->rule.retain_hdr, eq);
+			pr_err("hashable:%u rule_id:%u max_prio:%u prio:%u ",
+				entry->rule.hashable, entry->rule_id,
+				entry->rule.max_prio, entry->prio);
+			if (eq)
+				ipa3_attrib_dump_eq(
+					&entry->rule.eq_attrib);
+			else
+				ipa3_attrib_dump(
+					&entry->rule.attrib, ip);
+			i++;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return 0;
+}
+
+static ssize_t ipa3_read_flt_hw(struct file *file, char __user *ubuf,
+	size_t count, loff_t *ppos)
+{
+	int pipe;
+	int rl;
+	int rules_num;
+	struct ipahal_flt_rule_entry *rules;
+	enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+	u32 rt_tbl_idx;
+	u32 bitmap;
+	int res = 0;
+
+	IPADBG("Tring to parse %d H/W filtering tables - IP=%d\n",
+		ipa3_ctx->ep_flt_num, ip);
+
+	rules = kzalloc(sizeof(*rules) * IPA_DBG_MAX_RULE_IN_TBL, GFP_KERNEL);
+	if (!rules) {
+		IPAERR("failed to allocate mem for tbl rules\n");
+		return -ENOMEM;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	mutex_lock(&ipa3_ctx->lock);
+	for (pipe = 0; pipe < ipa3_ctx->ipa_num_pipes; pipe++) {
+		if (!ipa_is_ep_support_flt(pipe))
+			continue;
+		pr_err("=== Filtering Table ep:%d = Hashable Rules ===\n",
+			pipe);
+		rules_num = IPA_DBG_MAX_RULE_IN_TBL;
+		res = ipa3_flt_read_tbl_from_hw(pipe, ip, true, rules,
+			&rules_num);
+		if (res) {
+			pr_err("ERROR - Check the logs\n");
+			IPAERR("failed reading tbl from hw\n");
+			goto bail;
+		}
+		if (!rules_num)
+			pr_err("-->No rules. Empty tbl or modem sys table\n");
+
+		for (rl = 0; rl < rules_num; rl++) {
+			rt_tbl_idx = rules[rl].rule.rt_tbl_idx;
+			bitmap = rules[rl].rule.eq_attrib.rule_eq_bitmap;
+			pr_err("ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ",
+				pipe, rl, rules[rl].rule.action, rt_tbl_idx);
+			pr_err("attrib_mask:%08x retain_hdr:%d ",
+				bitmap, rules[rl].rule.retain_hdr);
+			pr_err("rule_id:%u prio:%u ",
+				rules[rl].id, rules[rl].priority);
+			ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
+		}
+
+		pr_err("=== Filtering Table ep:%d = Non-Hashable Rules ===\n",
+			pipe);
+		rules_num = IPA_DBG_MAX_RULE_IN_TBL;
+		res = ipa3_flt_read_tbl_from_hw(pipe, ip, false, rules,
+			&rules_num);
+		if (res) {
+			pr_err("ERROR - Check the logs\n");
+			IPAERR("failed reading tbl from hw\n");
+			goto bail;
+		}
+		if (!rules_num)
+			pr_err("-->No rules. Empty tbl or modem sys table\n");
+		for (rl = 0; rl < rules_num; rl++) {
+			rt_tbl_idx = rules[rl].rule.rt_tbl_idx;
+			bitmap = rules[rl].rule.eq_attrib.rule_eq_bitmap;
+			pr_err("ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ",
+				pipe, rl, rules[rl].rule.action, rt_tbl_idx);
+			pr_err("attrib_mask:%08x retain_hdr:%d ",
+				bitmap, rules[rl].rule.retain_hdr);
+			pr_err("rule_id:%u  prio:%u ",
+				rules[rl].id, rules[rl].priority);
+			ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
+		}
+		pr_err("\n");
+	}
+
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	kfree(rules);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int i;
+	int cnt = 0;
+	uint connect = 0;
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++)
+		connect |= (ipa3_ctx->ep[i].valid << i);
+
+	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+		"sw_tx=%u\n"
+		"hw_tx=%u\n"
+		"tx_non_linear=%u\n"
+		"tx_compl=%u\n"
+		"wan_rx=%u\n"
+		"stat_compl=%u\n"
+		"lan_aggr_close=%u\n"
+		"wan_aggr_close=%u\n"
+		"act_clnt=%u\n"
+		"con_clnt_bmap=0x%x\n"
+		"wan_rx_empty=%u\n"
+		"wan_repl_rx_empty=%u\n"
+		"lan_rx_empty=%u\n"
+		"lan_repl_rx_empty=%u\n"
+		"flow_enable=%u\n"
+		"flow_disable=%u\n",
+		ipa3_ctx->stats.tx_sw_pkts,
+		ipa3_ctx->stats.tx_hw_pkts,
+		ipa3_ctx->stats.tx_non_linear,
+		ipa3_ctx->stats.tx_pkts_compl,
+		ipa3_ctx->stats.rx_pkts,
+		ipa3_ctx->stats.stat_compl,
+		ipa3_ctx->stats.aggr_close,
+		ipa3_ctx->stats.wan_aggr_close,
+		ipa3_ctx->ipa3_active_clients.cnt,
+		connect,
+		ipa3_ctx->stats.wan_rx_empty,
+		ipa3_ctx->stats.wan_repl_rx_empty,
+		ipa3_ctx->stats.lan_rx_empty,
+		ipa3_ctx->stats.lan_repl_rx_empty,
+		ipa3_ctx->stats.flow_enable,
+		ipa3_ctx->stats.flow_disable);
+	cnt += nbytes;
+
+	for (i = 0; i < IPAHAL_PKT_STATUS_EXCEPTION_MAX; i++) {
+		nbytes = scnprintf(dbg_buff + cnt,
+			IPA_MAX_MSG_LEN - cnt,
+			"lan_rx_excp[%u:%20s]=%u\n", i,
+			ipahal_pkt_status_exception_str(i),
+			ipa3_ctx->stats.rx_excp_pkts[i]);
+		cnt += nbytes;
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_wstats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+
+#define HEAD_FRMT_STR "%25s\n"
+#define FRMT_STR "%25s %10u\n"
+#define FRMT_STR1 "%25s %10u\n\n"
+
+	int cnt = 0;
+	int nbytes;
+	int ipa_ep_idx;
+	enum ipa_client_type client = IPA_CLIENT_WLAN1_PROD;
+	struct ipa3_ep_context *ep;
+
+	do {
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			HEAD_FRMT_STR, "Client IPA_CLIENT_WLAN1_PROD Stats:");
+		cnt += nbytes;
+
+		ipa_ep_idx = ipa3_get_ep_mapping(client);
+		if (ipa_ep_idx == -1) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+			cnt += nbytes;
+			break;
+		}
+
+		ep = &ipa3_ctx->ep[ipa_ep_idx];
+		if (ep->valid != 1) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+			cnt += nbytes;
+			break;
+		}
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Avail Fifo Desc:",
+			atomic_read(&ep->avail_fifo_desc));
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx Pkts Rcvd:", ep->wstats.rx_pkts_rcvd);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx Pkts Status Rcvd:",
+			ep->wstats.rx_pkts_status_rcvd);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx DH Rcvd:", ep->wstats.rx_hd_rcvd);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx DH Processed:",
+			ep->wstats.rx_hd_processed);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx DH Sent Back:", ep->wstats.rx_hd_reply);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx Pkt Leak:", ep->wstats.rx_pkt_leak);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR1, "Rx DP Fail:", ep->wstats.rx_dp_fail);
+		cnt += nbytes;
+
+	} while (0);
+
+	client = IPA_CLIENT_WLAN1_CONS;
+	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+		"Client IPA_CLIENT_WLAN1_CONS Stats:");
+	cnt += nbytes;
+	while (1) {
+		ipa_ep_idx = ipa3_get_ep_mapping(client);
+		if (ipa_ep_idx == -1) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+			cnt += nbytes;
+			goto nxt_clnt_cons;
+		}
+
+		ep = &ipa3_ctx->ep[ipa_ep_idx];
+		if (ep->valid != 1) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+			cnt += nbytes;
+			goto nxt_clnt_cons;
+		}
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Tx Pkts Received:", ep->wstats.tx_pkts_rcvd);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Tx Pkts Sent:", ep->wstats.tx_pkts_sent);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR1, "Tx Pkts Dropped:",
+			ep->wstats.tx_pkts_dropped);
+		cnt += nbytes;
+
+nxt_clnt_cons:
+			switch (client) {
+			case IPA_CLIENT_WLAN1_CONS:
+				client = IPA_CLIENT_WLAN2_CONS;
+				nbytes = scnprintf(dbg_buff + cnt,
+					IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+					"Client IPA_CLIENT_WLAN2_CONS Stats:");
+				cnt += nbytes;
+				continue;
+			case IPA_CLIENT_WLAN2_CONS:
+				client = IPA_CLIENT_WLAN3_CONS;
+				nbytes = scnprintf(dbg_buff + cnt,
+					IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+					"Client IPA_CLIENT_WLAN3_CONS Stats:");
+				cnt += nbytes;
+				continue;
+			case IPA_CLIENT_WLAN3_CONS:
+				client = IPA_CLIENT_WLAN4_CONS;
+				nbytes = scnprintf(dbg_buff + cnt,
+					IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+					"Client IPA_CLIENT_WLAN4_CONS Stats:");
+				cnt += nbytes;
+				continue;
+			case IPA_CLIENT_WLAN4_CONS:
+			default:
+				break;
+			}
+		break;
+	}
+
+	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+		"\n"HEAD_FRMT_STR, "All Wlan Consumer pipes stats:");
+	cnt += nbytes;
+
+	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR,
+		"Tx Comm Buff Allocated:",
+		ipa3_ctx->wc_memb.wlan_comm_total_cnt);
+	cnt += nbytes;
+
+	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR,
+		"Tx Comm Buff Avail:", ipa3_ctx->wc_memb.wlan_comm_free_cnt);
+	cnt += nbytes;
+
+	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR1,
+		"Total Tx Pkts Freed:", ipa3_ctx->wc_memb.total_tx_pkts_freed);
+	cnt += nbytes;
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_ntn(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+#define TX_STATS(y) \
+	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) \
+	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+	struct Ipa3HwStatsNTNInfoData_t stats;
+	int nbytes;
+	int cnt = 0;
+
+	if (!ipa3_get_ntn_stats(&stats)) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"TX num_pkts_processed=%u\n"
+			"TX tail_ptr_val=%u\n"
+			"TX num_db_fired=%u\n"
+			"TX ringFull=%u\n"
+			"TX ringEmpty=%u\n"
+			"TX ringUsageHigh=%u\n"
+			"TX ringUsageLow=%u\n"
+			"TX RingUtilCount=%u\n"
+			"TX bamFifoFull=%u\n"
+			"TX bamFifoEmpty=%u\n"
+			"TX bamFifoUsageHigh=%u\n"
+			"TX bamFifoUsageLow=%u\n"
+			"TX bamUtilCount=%u\n"
+			"TX num_db=%u\n"
+			"TX num_unexpected_db=%u\n"
+			"TX num_bam_int_handled=%u\n"
+			"TX num_bam_int_in_non_running_state=%u\n"
+			"TX num_qmb_int_handled=%u\n"
+			"TX num_bam_int_handled_while_wait_for_bam=%u\n"
+			"TX num_bam_int_handled_while_not_in_bam=%u\n",
+			TX_STATS(num_pkts_processed),
+			TX_STATS(tail_ptr_val),
+			TX_STATS(num_db_fired),
+			TX_STATS(tx_comp_ring_stats.ringFull),
+			TX_STATS(tx_comp_ring_stats.ringEmpty),
+			TX_STATS(tx_comp_ring_stats.ringUsageHigh),
+			TX_STATS(tx_comp_ring_stats.ringUsageLow),
+			TX_STATS(tx_comp_ring_stats.RingUtilCount),
+			TX_STATS(bam_stats.bamFifoFull),
+			TX_STATS(bam_stats.bamFifoEmpty),
+			TX_STATS(bam_stats.bamFifoUsageHigh),
+			TX_STATS(bam_stats.bamFifoUsageLow),
+			TX_STATS(bam_stats.bamUtilCount),
+			TX_STATS(num_db),
+			TX_STATS(num_unexpected_db),
+			TX_STATS(num_bam_int_handled),
+			TX_STATS(num_bam_int_in_non_running_state),
+			TX_STATS(num_qmb_int_handled),
+			TX_STATS(num_bam_int_handled_while_wait_for_bam),
+			TX_STATS(num_bam_int_handled_while_not_in_bam));
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"RX max_outstanding_pkts=%u\n"
+			"RX num_pkts_processed=%u\n"
+			"RX rx_ring_rp_value=%u\n"
+			"RX ringFull=%u\n"
+			"RX ringEmpty=%u\n"
+			"RX ringUsageHigh=%u\n"
+			"RX ringUsageLow=%u\n"
+			"RX RingUtilCount=%u\n"
+			"RX bamFifoFull=%u\n"
+			"RX bamFifoEmpty=%u\n"
+			"RX bamFifoUsageHigh=%u\n"
+			"RX bamFifoUsageLow=%u\n"
+			"RX bamUtilCount=%u\n"
+			"RX num_bam_int_handled=%u\n"
+			"RX num_db=%u\n"
+			"RX num_unexpected_db=%u\n"
+			"RX num_pkts_in_dis_uninit_state=%u\n"
+			"num_ic_inj_vdev_change=%u\n"
+			"num_ic_inj_fw_desc_change=%u\n",
+			RX_STATS(max_outstanding_pkts),
+			RX_STATS(num_pkts_processed),
+			RX_STATS(rx_ring_rp_value),
+			RX_STATS(rx_ind_ring_stats.ringFull),
+			RX_STATS(rx_ind_ring_stats.ringEmpty),
+			RX_STATS(rx_ind_ring_stats.ringUsageHigh),
+			RX_STATS(rx_ind_ring_stats.ringUsageLow),
+			RX_STATS(rx_ind_ring_stats.RingUtilCount),
+			RX_STATS(bam_stats.bamFifoFull),
+			RX_STATS(bam_stats.bamFifoEmpty),
+			RX_STATS(bam_stats.bamFifoUsageHigh),
+			RX_STATS(bam_stats.bamFifoUsageLow),
+			RX_STATS(bam_stats.bamUtilCount),
+			RX_STATS(num_bam_int_handled),
+			RX_STATS(num_db),
+			RX_STATS(num_unexpected_db),
+			RX_STATS(num_pkts_in_dis_uninit_state),
+			RX_STATS(num_bam_int_handled_while_not_in_bam),
+			RX_STATS(num_bam_int_handled_while_in_bam_state));
+		cnt += nbytes;
+	} else {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"Fail to read NTN stats\n");
+		cnt += nbytes;
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_wdi(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct IpaHwStatsWDIInfoData_t stats;
+	int nbytes;
+	int cnt = 0;
+
+	if (!ipa3_get_wdi_stats(&stats)) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"TX num_pkts_processed=%u\n"
+			"TX copy_engine_doorbell_value=%u\n"
+			"TX num_db_fired=%u\n"
+			"TX ringFull=%u\n"
+			"TX ringEmpty=%u\n"
+			"TX ringUsageHigh=%u\n"
+			"TX ringUsageLow=%u\n"
+			"TX RingUtilCount=%u\n"
+			"TX bamFifoFull=%u\n"
+			"TX bamFifoEmpty=%u\n"
+			"TX bamFifoUsageHigh=%u\n"
+			"TX bamFifoUsageLow=%u\n"
+			"TX bamUtilCount=%u\n"
+			"TX num_db=%u\n"
+			"TX num_unexpected_db=%u\n"
+			"TX num_bam_int_handled=%u\n"
+			"TX num_bam_int_in_non_running_state=%u\n"
+			"TX num_qmb_int_handled=%u\n"
+			"TX num_bam_int_handled_while_wait_for_bam=%u\n",
+			stats.tx_ch_stats.num_pkts_processed,
+			stats.tx_ch_stats.copy_engine_doorbell_value,
+			stats.tx_ch_stats.num_db_fired,
+			stats.tx_ch_stats.tx_comp_ring_stats.ringFull,
+			stats.tx_ch_stats.tx_comp_ring_stats.ringEmpty,
+			stats.tx_ch_stats.tx_comp_ring_stats.ringUsageHigh,
+			stats.tx_ch_stats.tx_comp_ring_stats.ringUsageLow,
+			stats.tx_ch_stats.tx_comp_ring_stats.RingUtilCount,
+			stats.tx_ch_stats.bam_stats.bamFifoFull,
+			stats.tx_ch_stats.bam_stats.bamFifoEmpty,
+			stats.tx_ch_stats.bam_stats.bamFifoUsageHigh,
+			stats.tx_ch_stats.bam_stats.bamFifoUsageLow,
+			stats.tx_ch_stats.bam_stats.bamUtilCount,
+			stats.tx_ch_stats.num_db,
+			stats.tx_ch_stats.num_unexpected_db,
+			stats.tx_ch_stats.num_bam_int_handled,
+			stats.tx_ch_stats.num_bam_int_in_non_running_state,
+			stats.tx_ch_stats.num_qmb_int_handled,
+			stats.tx_ch_stats.
+				num_bam_int_handled_while_wait_for_bam);
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"RX max_outstanding_pkts=%u\n"
+			"RX num_pkts_processed=%u\n"
+			"RX rx_ring_rp_value=%u\n"
+			"RX ringFull=%u\n"
+			"RX ringEmpty=%u\n"
+			"RX ringUsageHigh=%u\n"
+			"RX ringUsageLow=%u\n"
+			"RX RingUtilCount=%u\n"
+			"RX bamFifoFull=%u\n"
+			"RX bamFifoEmpty=%u\n"
+			"RX bamFifoUsageHigh=%u\n"
+			"RX bamFifoUsageLow=%u\n"
+			"RX bamUtilCount=%u\n"
+			"RX num_bam_int_handled=%u\n"
+			"RX num_db=%u\n"
+			"RX num_unexpected_db=%u\n"
+			"RX num_pkts_in_dis_uninit_state=%u\n"
+			"num_ic_inj_vdev_change=%u\n"
+			"num_ic_inj_fw_desc_change=%u\n"
+			"RX reserved1=%u\n"
+			"RX reserved2=%u\n",
+			stats.rx_ch_stats.max_outstanding_pkts,
+			stats.rx_ch_stats.num_pkts_processed,
+			stats.rx_ch_stats.rx_ring_rp_value,
+			stats.rx_ch_stats.rx_ind_ring_stats.ringFull,
+			stats.rx_ch_stats.rx_ind_ring_stats.ringEmpty,
+			stats.rx_ch_stats.rx_ind_ring_stats.ringUsageHigh,
+			stats.rx_ch_stats.rx_ind_ring_stats.ringUsageLow,
+			stats.rx_ch_stats.rx_ind_ring_stats.RingUtilCount,
+			stats.rx_ch_stats.bam_stats.bamFifoFull,
+			stats.rx_ch_stats.bam_stats.bamFifoEmpty,
+			stats.rx_ch_stats.bam_stats.bamFifoUsageHigh,
+			stats.rx_ch_stats.bam_stats.bamFifoUsageLow,
+			stats.rx_ch_stats.bam_stats.bamUtilCount,
+			stats.rx_ch_stats.num_bam_int_handled,
+			stats.rx_ch_stats.num_db,
+			stats.rx_ch_stats.num_unexpected_db,
+			stats.rx_ch_stats.num_pkts_in_dis_uninit_state,
+			stats.rx_ch_stats.num_ic_inj_vdev_change,
+			stats.rx_ch_stats.num_ic_inj_fw_desc_change,
+			stats.rx_ch_stats.reserved1,
+			stats.rx_ch_stats.reserved2);
+		cnt += nbytes;
+	} else {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"Fail to read WDI stats\n");
+		cnt += nbytes;
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_write_dbg_cnt(struct file *file, const char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	u32 option = 0;
+	struct ipahal_reg_debug_cnt_ctrl dbg_cnt_ctrl;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtou32(dbg_buff, 0, &option))
+		return -EFAULT;
+
+	memset(&dbg_cnt_ctrl, 0, sizeof(dbg_cnt_ctrl));
+	dbg_cnt_ctrl.type = DBG_CNT_TYPE_GENERAL;
+	dbg_cnt_ctrl.product = true;
+	dbg_cnt_ctrl.src_pipe = 0xff;
+	dbg_cnt_ctrl.rule_idx_pipe_rule = false;
+	dbg_cnt_ctrl.rule_idx = 0;
+	if (option == 1)
+		dbg_cnt_ctrl.en = true;
+	else
+		dbg_cnt_ctrl.en = false;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipahal_write_reg_n_fields(IPA_DEBUG_CNT_CTRL_n, 0, &dbg_cnt_ctrl);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return count;
+}
+
+static ssize_t ipa3_read_dbg_cnt(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	u32 regval;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	regval =
+		ipahal_read_reg_n(IPA_DEBUG_CNT_REG_n, 0);
+	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"IPA_DEBUG_CNT_REG_0=0x%x\n", regval);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_read_msg(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int cnt = 0;
+	int i;
+
+	for (i = 0; i < IPA_EVENT_MAX_NUM; i++) {
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+				"msg[%u:%27s] W:%u R:%u\n", i,
+				ipa3_event_name[i],
+				ipa3_ctx->stats.msg_w[i],
+				ipa3_ctx->stats.msg_r[i]);
+		cnt += nbytes;
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_nat4(struct file *file,
+		char __user *ubuf, size_t count,
+		loff_t *ppos) {
+
+#define ENTRY_U32_FIELDS 8
+#define NAT_ENTRY_ENABLE 0x8000
+#define NAT_ENTRY_RST_FIN_BIT 0x4000
+#define BASE_TABLE 0
+#define EXPANSION_TABLE 1
+
+	u32 *base_tbl, *indx_tbl;
+	u32 tbl_size, *tmp;
+	u32 value, i, j, rule_id;
+	u16 enable, tbl_entry, flag;
+	u32 no_entrys = 0;
+
+	value = ipa3_ctx->nat_mem.public_ip_addr;
+	pr_err(
+				"Table IP Address:%d.%d.%d.%d\n",
+				((value & 0xFF000000) >> 24),
+				((value & 0x00FF0000) >> 16),
+				((value & 0x0000FF00) >> 8),
+				((value & 0x000000FF)));
+
+	pr_err("Table Size:%d\n",
+				ipa3_ctx->nat_mem.size_base_tables);
+
+	pr_err("Expansion Table Size:%d\n",
+				ipa3_ctx->nat_mem.size_expansion_tables-1);
+
+	if (!ipa3_ctx->nat_mem.is_sys_mem)
+		pr_err("Not supported for local(shared) memory\n");
+
+	/* Print Base tables */
+	rule_id = 0;
+	for (j = 0; j < 2; j++) {
+		if (j == BASE_TABLE) {
+			tbl_size = ipa3_ctx->nat_mem.size_base_tables;
+			base_tbl = (u32 *)ipa3_ctx->nat_mem.ipv4_rules_addr;
+
+			pr_err("\nBase Table:\n");
+		} else {
+			tbl_size = ipa3_ctx->nat_mem.size_expansion_tables-1;
+			base_tbl =
+			 (u32 *)ipa3_ctx->nat_mem.ipv4_expansion_rules_addr;
+
+			pr_err("\nExpansion Base Table:\n");
+		}
+
+		if (base_tbl != NULL) {
+			for (i = 0; i <= tbl_size; i++, rule_id++) {
+				tmp = base_tbl;
+				value = tmp[4];
+				enable = ((value & 0xFFFF0000) >> 16);
+
+				if (enable & NAT_ENTRY_ENABLE) {
+					no_entrys++;
+					pr_err("Rule:%d ", rule_id);
+
+					value = *tmp;
+					pr_err(
+						"Private_IP:%d.%d.%d.%d ",
+						((value & 0xFF000000) >> 24),
+						((value & 0x00FF0000) >> 16),
+						((value & 0x0000FF00) >> 8),
+						((value & 0x000000FF)));
+					tmp++;
+
+					value = *tmp;
+					pr_err(
+						"Target_IP:%d.%d.%d.%d ",
+						((value & 0xFF000000) >> 24),
+						((value & 0x00FF0000) >> 16),
+						((value & 0x0000FF00) >> 8),
+						((value & 0x000000FF)));
+					tmp++;
+
+					value = *tmp;
+					pr_err(
+						"Next_Index:%d  Public_Port:%d ",
+						(value & 0x0000FFFF),
+						((value & 0xFFFF0000) >> 16));
+					tmp++;
+
+					value = *tmp;
+					pr_err(
+						"Private_Port:%d  Target_Port:%d ",
+						(value & 0x0000FFFF),
+						((value & 0xFFFF0000) >> 16));
+					tmp++;
+
+					value = *tmp;
+					flag = ((value & 0xFFFF0000) >> 16);
+					if (flag & NAT_ENTRY_RST_FIN_BIT) {
+						pr_err(
+								"IP_CKSM_delta:0x%x  Flags:%s ",
+							  (value & 0x0000FFFF),
+								"Direct_To_A5");
+					} else {
+						pr_err(
+							"IP_CKSM_delta:0x%x  Flags:%s ",
+							(value & 0x0000FFFF),
+							"Fwd_to_route");
+					}
+					tmp++;
+
+					value = *tmp;
+					pr_err(
+						"Time_stamp:0x%x Proto:%d ",
+						(value & 0x00FFFFFF),
+						((value & 0xFF000000) >> 24));
+					tmp++;
+
+					value = *tmp;
+					pr_err(
+						"Prev_Index:%d  Indx_tbl_entry:%d ",
+						(value & 0x0000FFFF),
+						((value & 0xFFFF0000) >> 16));
+					tmp++;
+
+					value = *tmp;
+					pr_err(
+						"TCP_UDP_cksum_delta:0x%x\n",
+						((value & 0xFFFF0000) >> 16));
+				}
+
+				base_tbl += ENTRY_U32_FIELDS;
+
+			}
+		}
+	}
+
+	/* Print Index tables */
+	rule_id = 0;
+	for (j = 0; j < 2; j++) {
+		if (j == BASE_TABLE) {
+			tbl_size = ipa3_ctx->nat_mem.size_base_tables;
+			indx_tbl = (u32 *)ipa3_ctx->nat_mem.index_table_addr;
+
+			pr_err("\nIndex Table:\n");
+		} else {
+			tbl_size = ipa3_ctx->nat_mem.size_expansion_tables-1;
+			indx_tbl =
+			 (u32 *)ipa3_ctx->nat_mem.index_table_expansion_addr;
+
+			pr_err("\nExpansion Index Table:\n");
+		}
+
+		if (indx_tbl != NULL) {
+			for (i = 0; i <= tbl_size; i++, rule_id++) {
+				tmp = indx_tbl;
+				value = *tmp;
+				tbl_entry = (value & 0x0000FFFF);
+
+				if (tbl_entry) {
+					pr_err("Rule:%d ", rule_id);
+
+					value = *tmp;
+					pr_err(
+						"Table_Entry:%d  Next_Index:%d\n",
+						tbl_entry,
+						((value & 0xFFFF0000) >> 16));
+				}
+
+				indx_tbl++;
+			}
+		}
+	}
+	pr_err("Current No. Nat Entries: %d\n", no_entrys);
+
+	return 0;
+}
+
+static ssize_t ipa3_rm_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int result, nbytes, cnt = 0;
+
+	result = ipa_rm_stat(dbg_buff, IPA_MAX_MSG_LEN);
+	if (result < 0) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"Error in printing RM stat %d\n", result);
+		cnt += nbytes;
+	} else
+		cnt += result;
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static void ipa_dump_status(struct ipahal_pkt_status *status)
+{
+	IPA_DUMP_STATUS_FIELD(status_opcode);
+	IPA_DUMP_STATUS_FIELD(exception);
+	IPA_DUMP_STATUS_FIELD(status_mask);
+	IPA_DUMP_STATUS_FIELD(pkt_len);
+	IPA_DUMP_STATUS_FIELD(endp_src_idx);
+	IPA_DUMP_STATUS_FIELD(endp_dest_idx);
+	IPA_DUMP_STATUS_FIELD(metadata);
+	IPA_DUMP_STATUS_FIELD(flt_local);
+	IPA_DUMP_STATUS_FIELD(flt_hash);
+	IPA_DUMP_STATUS_FIELD(flt_global);
+	IPA_DUMP_STATUS_FIELD(flt_ret_hdr);
+	IPA_DUMP_STATUS_FIELD(flt_miss);
+	IPA_DUMP_STATUS_FIELD(flt_rule_id);
+	IPA_DUMP_STATUS_FIELD(rt_local);
+	IPA_DUMP_STATUS_FIELD(rt_hash);
+	IPA_DUMP_STATUS_FIELD(ucp);
+	IPA_DUMP_STATUS_FIELD(rt_tbl_idx);
+	IPA_DUMP_STATUS_FIELD(rt_miss);
+	IPA_DUMP_STATUS_FIELD(rt_rule_id);
+	IPA_DUMP_STATUS_FIELD(nat_hit);
+	IPA_DUMP_STATUS_FIELD(nat_entry_idx);
+	IPA_DUMP_STATUS_FIELD(nat_type);
+	pr_err("tag = 0x%llx\n", (u64)status->tag_info & 0xFFFFFFFFFFFF);
+	IPA_DUMP_STATUS_FIELD(seq_num);
+	IPA_DUMP_STATUS_FIELD(time_of_day_ctr);
+	IPA_DUMP_STATUS_FIELD(hdr_local);
+	IPA_DUMP_STATUS_FIELD(hdr_offset);
+	IPA_DUMP_STATUS_FIELD(frag_hit);
+	IPA_DUMP_STATUS_FIELD(frag_rule);
+}
+
+static ssize_t ipa_status_stats_read(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct ipa3_status_stats *stats;
+	int i, j;
+
+	stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+	if (!stats)
+		return -EFAULT;
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa3_ctx->ep[i].sys || !ipa3_ctx->ep[i].sys->status_stat)
+			continue;
+
+		memcpy(stats, ipa3_ctx->ep[i].sys->status_stat, sizeof(*stats));
+		pr_err("Statuses for pipe %d\n", i);
+		for (j = 0; j < IPA_MAX_STATUS_STAT_NUM; j++) {
+			pr_err("curr=%d\n", stats->curr);
+			ipa_dump_status(&stats->status[stats->curr]);
+			pr_err("\n\n\n");
+			stats->curr = (stats->curr + 1) %
+				IPA_MAX_STATUS_STAT_NUM;
+		}
+	}
+
+	kfree(stats);
+	return 0;
+}
+
+static ssize_t ipa3_print_active_clients_log(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int cnt;
+	int table_size;
+
+	if (active_clients_buf == NULL) {
+		IPAERR("Active Clients buffer is not allocated");
+		return 0;
+	}
+	memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENT_BUF_SIZE);
+	ipa3_active_clients_lock();
+	cnt = ipa3_active_clients_log_print_buffer(active_clients_buf,
+			IPA_DBG_ACTIVE_CLIENT_BUF_SIZE - IPA_MAX_MSG_LEN);
+	table_size = ipa3_active_clients_log_print_table(active_clients_buf
+			+ cnt, IPA_MAX_MSG_LEN);
+	ipa3_active_clients_unlock();
+
+	return simple_read_from_buffer(ubuf, count, ppos,
+			active_clients_buf, cnt + table_size);
+}
+
+static ssize_t ipa3_clear_active_clients_log(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+		s8 option = 0;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtos8(dbg_buff, 0, &option))
+		return -EFAULT;
+
+	ipa3_active_clients_log_clear();
+
+	return count;
+}
+
+static ssize_t ipa3_enable_ipc_low(struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	s8 option = 0;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtos8(dbg_buff, 0, &option))
+		return -EFAULT;
+
+	if (option) {
+		if (!ipa3_ctx->logbuf_low) {
+			ipa3_ctx->logbuf_low =
+				ipc_log_context_create(IPA_IPC_LOG_PAGES,
+					"ipa_low", 0);
+		}
+
+		if (ipa3_ctx->logbuf_low == NULL) {
+			IPAERR("failed to get logbuf_low\n");
+			return -EFAULT;
+		}
+	} else {
+		if (ipa3_ctx->logbuf_low)
+			ipc_log_context_destroy(ipa3_ctx->logbuf_low);
+		ipa3_ctx->logbuf_low = NULL;
+	}
+
+	return count;
+}
+
+const struct file_operations ipa3_gen_reg_ops = {
+	.read = ipa3_read_gen_reg,
+};
+
+const struct file_operations ipa3_ep_reg_ops = {
+	.read = ipa3_read_ep_reg,
+	.write = ipa3_write_ep_reg,
+};
+
+const struct file_operations ipa3_keep_awake_ops = {
+	.read = ipa3_read_keep_awake,
+	.write = ipa3_write_keep_awake,
+};
+
+const struct file_operations ipa3_ep_holb_ops = {
+	.write = ipa3_write_ep_holb,
+};
+
+const struct file_operations ipa3_hdr_ops = {
+	.read = ipa3_read_hdr,
+};
+
+const struct file_operations ipa3_rt_ops = {
+	.read = ipa3_read_rt,
+	.open = ipa3_open_dbg,
+};
+
+const struct file_operations ipa3_rt_hw_ops = {
+	.read = ipa3_read_rt_hw,
+	.open = ipa3_open_dbg,
+};
+
+const struct file_operations ipa3_proc_ctx_ops = {
+	.read = ipa3_read_proc_ctx,
+};
+
+const struct file_operations ipa3_flt_ops = {
+	.read = ipa3_read_flt,
+	.open = ipa3_open_dbg,
+};
+
+const struct file_operations ipa3_flt_hw_ops = {
+	.read = ipa3_read_flt_hw,
+	.open = ipa3_open_dbg,
+};
+
+const struct file_operations ipa3_stats_ops = {
+	.read = ipa3_read_stats,
+};
+
+const struct file_operations ipa3_wstats_ops = {
+	.read = ipa3_read_wstats,
+};
+
+const struct file_operations ipa3_wdi_ops = {
+	.read = ipa3_read_wdi,
+};
+
+const struct file_operations ipa3_ntn_ops = {
+	.read = ipa3_read_ntn,
+};
+
+const struct file_operations ipa3_msg_ops = {
+	.read = ipa3_read_msg,
+};
+
+const struct file_operations ipa3_dbg_cnt_ops = {
+	.read = ipa3_read_dbg_cnt,
+	.write = ipa3_write_dbg_cnt,
+};
+
+const struct file_operations ipa3_status_stats_ops = {
+	.read = ipa_status_stats_read,
+};
+
+const struct file_operations ipa3_nat4_ops = {
+	.read = ipa3_read_nat4,
+};
+
+const struct file_operations ipa3_rm_stats = {
+	.read = ipa3_rm_read_stats,
+};
+
+const struct file_operations ipa3_active_clients = {
+	.read = ipa3_print_active_clients_log,
+	.write = ipa3_clear_active_clients_log,
+};
+
+const struct file_operations ipa3_ipc_low_ops = {
+	.write = ipa3_enable_ipc_low,
+};
+
+void ipa3_debugfs_init(void)
+{
+	const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+	const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+			S_IWUSR | S_IWGRP;
+	const mode_t write_only_mode = S_IWUSR | S_IWGRP;
+	struct dentry *file;
+
+	dent = debugfs_create_dir("ipa", 0);
+	if (IS_ERR(dent)) {
+		IPAERR("fail to create folder in debug_fs.\n");
+		return;
+	}
+
+	file = debugfs_create_u32("hw_type", read_only_mode,
+			dent, &ipa3_ctx->ipa_hw_type);
+	if (!file) {
+		IPAERR("could not create hw_type file\n");
+		goto fail;
+	}
+
+
+	dfile_gen_reg = debugfs_create_file("gen_reg", read_only_mode, dent, 0,
+			&ipa3_gen_reg_ops);
+	if (!dfile_gen_reg || IS_ERR(dfile_gen_reg)) {
+		IPAERR("fail to create file for debug_fs gen_reg\n");
+		goto fail;
+	}
+
+	dfile_active_clients = debugfs_create_file("active_clients",
+			read_write_mode, dent, 0, &ipa3_active_clients);
+	if (!dfile_active_clients || IS_ERR(dfile_active_clients)) {
+		IPAERR("fail to create file for debug_fs active_clients\n");
+		goto fail;
+	}
+
+	active_clients_buf = NULL;
+	active_clients_buf = kzalloc(IPA_DBG_ACTIVE_CLIENT_BUF_SIZE,
+			GFP_KERNEL);
+	if (active_clients_buf == NULL)
+		IPAERR("fail to allocate active clients memory buffer");
+
+	dfile_ep_reg = debugfs_create_file("ep_reg", read_write_mode, dent, 0,
+			&ipa3_ep_reg_ops);
+	if (!dfile_ep_reg || IS_ERR(dfile_ep_reg)) {
+		IPAERR("fail to create file for debug_fs ep_reg\n");
+		goto fail;
+	}
+
+	dfile_keep_awake = debugfs_create_file("keep_awake", read_write_mode,
+			dent, 0, &ipa3_keep_awake_ops);
+	if (!dfile_keep_awake || IS_ERR(dfile_keep_awake)) {
+		IPAERR("fail to create file for debug_fs dfile_keep_awake\n");
+		goto fail;
+	}
+
+	dfile_ep_holb = debugfs_create_file("holb", write_only_mode, dent,
+			0, &ipa3_ep_holb_ops);
+	if (!dfile_ep_holb || IS_ERR(dfile_ep_holb)) {
+		IPAERR("fail to create file for debug_fs dfile_ep_hol_en\n");
+		goto fail;
+	}
+
+	dfile_hdr = debugfs_create_file("hdr", read_only_mode, dent, 0,
+			&ipa3_hdr_ops);
+	if (!dfile_hdr || IS_ERR(dfile_hdr)) {
+		IPAERR("fail to create file for debug_fs hdr\n");
+		goto fail;
+	}
+
+	dfile_proc_ctx = debugfs_create_file("proc_ctx", read_only_mode, dent,
+		0, &ipa3_proc_ctx_ops);
+	if (!dfile_hdr || IS_ERR(dfile_hdr)) {
+		IPAERR("fail to create file for debug_fs proc_ctx\n");
+		goto fail;
+	}
+
+	dfile_ip4_rt = debugfs_create_file("ip4_rt", read_only_mode, dent,
+			(void *)IPA_IP_v4, &ipa3_rt_ops);
+	if (!dfile_ip4_rt || IS_ERR(dfile_ip4_rt)) {
+		IPAERR("fail to create file for debug_fs ip4 rt\n");
+		goto fail;
+	}
+
+	dfile_ip4_rt_hw = debugfs_create_file("ip4_rt_hw", read_only_mode, dent,
+		(void *)IPA_IP_v4, &ipa3_rt_hw_ops);
+	if (!dfile_ip4_rt_hw || IS_ERR(dfile_ip4_rt_hw)) {
+		IPAERR("fail to create file for debug_fs ip4 rt hw\n");
+		goto fail;
+	}
+
+	dfile_ip6_rt = debugfs_create_file("ip6_rt", read_only_mode, dent,
+			(void *)IPA_IP_v6, &ipa3_rt_ops);
+	if (!dfile_ip6_rt || IS_ERR(dfile_ip6_rt)) {
+		IPAERR("fail to create file for debug_fs ip6:w rt\n");
+		goto fail;
+	}
+
+	dfile_ip6_rt_hw = debugfs_create_file("ip6_rt_hw", read_only_mode, dent,
+		(void *)IPA_IP_v6, &ipa3_rt_hw_ops);
+	if (!dfile_ip6_rt_hw || IS_ERR(dfile_ip6_rt_hw)) {
+		IPAERR("fail to create file for debug_fs ip6 rt hw\n");
+		goto fail;
+	}
+
+	dfile_ip4_flt = debugfs_create_file("ip4_flt", read_only_mode, dent,
+			(void *)IPA_IP_v4, &ipa3_flt_ops);
+	if (!dfile_ip4_flt || IS_ERR(dfile_ip4_flt)) {
+		IPAERR("fail to create file for debug_fs ip4 flt\n");
+		goto fail;
+	}
+
+	dfile_ip4_flt_hw = debugfs_create_file("ip4_flt_hw", read_only_mode,
+			dent, (void *)IPA_IP_v4, &ipa3_flt_hw_ops);
+	if (!dfile_ip4_flt_hw || IS_ERR(dfile_ip4_flt_hw)) {
+		IPAERR("fail to create file for debug_fs ip4 flt\n");
+		goto fail;
+	}
+
+	dfile_ip6_flt = debugfs_create_file("ip6_flt", read_only_mode, dent,
+			(void *)IPA_IP_v6, &ipa3_flt_ops);
+	if (!dfile_ip6_flt || IS_ERR(dfile_ip6_flt)) {
+		IPAERR("fail to create file for debug_fs ip6 flt\n");
+		goto fail;
+	}
+
+	dfile_ip6_flt_hw = debugfs_create_file("ip6_flt_hw", read_only_mode,
+			dent, (void *)IPA_IP_v6, &ipa3_flt_hw_ops);
+	if (!dfile_ip6_flt_hw || IS_ERR(dfile_ip6_flt_hw)) {
+		IPAERR("fail to create file for debug_fs ip6 flt\n");
+		goto fail;
+	}
+
+	dfile_stats = debugfs_create_file("stats", read_only_mode, dent, 0,
+			&ipa3_stats_ops);
+	if (!dfile_stats || IS_ERR(dfile_stats)) {
+		IPAERR("fail to create file for debug_fs stats\n");
+		goto fail;
+	}
+
+	dfile_wstats = debugfs_create_file("wstats", read_only_mode,
+			dent, 0, &ipa3_wstats_ops);
+	if (!dfile_wstats || IS_ERR(dfile_wstats)) {
+		IPAERR("fail to create file for debug_fs wstats\n");
+		goto fail;
+	}
+
+	dfile_wdi_stats = debugfs_create_file("wdi", read_only_mode, dent, 0,
+			&ipa3_wdi_ops);
+	if (!dfile_wdi_stats || IS_ERR(dfile_wdi_stats)) {
+		IPAERR("fail to create file for debug_fs wdi stats\n");
+		goto fail;
+	}
+
+	dfile_ntn_stats = debugfs_create_file("ntn", read_only_mode, dent, 0,
+			&ipa3_ntn_ops);
+	if (!dfile_ntn_stats || IS_ERR(dfile_ntn_stats)) {
+		IPAERR("fail to create file for debug_fs ntn stats\n");
+		goto fail;
+	}
+
+	dfile_dbg_cnt = debugfs_create_file("dbg_cnt", read_write_mode, dent, 0,
+			&ipa3_dbg_cnt_ops);
+	if (!dfile_dbg_cnt || IS_ERR(dfile_dbg_cnt)) {
+		IPAERR("fail to create file for debug_fs dbg_cnt\n");
+		goto fail;
+	}
+
+	dfile_msg = debugfs_create_file("msg", read_only_mode, dent, 0,
+			&ipa3_msg_ops);
+	if (!dfile_msg || IS_ERR(dfile_msg)) {
+		IPAERR("fail to create file for debug_fs msg\n");
+		goto fail;
+	}
+
+	dfile_ip4_nat = debugfs_create_file("ip4_nat", read_only_mode, dent,
+			0, &ipa3_nat4_ops);
+	if (!dfile_ip4_nat || IS_ERR(dfile_ip4_nat)) {
+		IPAERR("fail to create file for debug_fs ip4 nat\n");
+		goto fail;
+	}
+
+	dfile_rm_stats = debugfs_create_file("rm_stats",
+			read_only_mode, dent, 0, &ipa3_rm_stats);
+	if (!dfile_rm_stats || IS_ERR(dfile_rm_stats)) {
+		IPAERR("fail to create file for debug_fs rm_stats\n");
+		goto fail;
+	}
+
+	dfile_status_stats = debugfs_create_file("status_stats",
+			read_only_mode, dent, 0, &ipa3_status_stats_ops);
+	if (!dfile_status_stats || IS_ERR(dfile_status_stats)) {
+		IPAERR("fail to create file for debug_fs status_stats\n");
+		goto fail;
+	}
+
+	file = debugfs_create_u32("enable_clock_scaling", read_write_mode,
+		dent, &ipa3_ctx->enable_clock_scaling);
+	if (!file) {
+		IPAERR("could not create enable_clock_scaling file\n");
+		goto fail;
+	}
+
+	file = debugfs_create_u32("clock_scaling_bw_threshold_nominal_mbps",
+		read_write_mode, dent,
+		&ipa3_ctx->ctrl->clock_scaling_bw_threshold_nominal);
+	if (!file) {
+		IPAERR("could not create bw_threshold_nominal_mbps\n");
+		goto fail;
+	}
+
+	file = debugfs_create_u32("clock_scaling_bw_threshold_turbo_mbps",
+		read_write_mode, dent,
+		&ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo);
+	if (!file) {
+		IPAERR("could not create bw_threshold_turbo_mbps\n");
+		goto fail;
+	}
+
+	file = debugfs_create_file("enable_low_prio_print", write_only_mode,
+		dent, 0, &ipa3_ipc_low_ops);
+	if (!file) {
+		IPAERR("could not create enable_low_prio_print file\n");
+		goto fail;
+	}
+
+	return;
+
+fail:
+	debugfs_remove_recursive(dent);
+}
+
+void ipa3_debugfs_remove(void)
+{
+	if (IS_ERR(dent)) {
+		IPAERR("ipa3_debugfs_remove: folder was not created.\n");
+		return;
+	}
+	if (active_clients_buf != NULL) {
+		kfree(active_clients_buf);
+		active_clients_buf = NULL;
+	}
+	debugfs_remove_recursive(dent);
+}
+
+struct dentry *ipa_debugfs_get_root(void)
+{
+	return dent;
+}
+EXPORT_SYMBOL(ipa_debugfs_get_root);
+
+#else /* !CONFIG_DEBUG_FS */
+void ipa3_debugfs_init(void) {}
+void ipa3_debugfs_remove(void) {}
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
new file mode 100644
index 0000000..2a1c286
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
@@ -0,0 +1,990 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/msm_ipa.h>
+#include <linux/mutex.h>
+#include <linux/ipa.h>
+#include "linux/msm_gsi.h"
+#include "ipa_i.h"
+
+#define IPA_DMA_POLLING_MIN_SLEEP_RX 1010
+#define IPA_DMA_POLLING_MAX_SLEEP_RX 1050
+#define IPA_DMA_SYS_DESC_MAX_FIFO_SZ 0x7FF8
+#define IPA_DMA_MAX_PKT_SZ 0xFFFF
+#define IPA_DMA_MAX_PENDING_SYNC (IPA_SYS_DESC_FIFO_SZ / \
+	sizeof(struct sps_iovec) - 1)
+#define IPA_DMA_MAX_PENDING_ASYNC (IPA_DMA_SYS_DESC_MAX_FIFO_SZ / \
+	sizeof(struct sps_iovec) - 1)
+
+#define IPADMA_DRV_NAME "ipa_dma"
+
+#define IPADMA_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPADMA_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPADMA_ERR(fmt, args...) \
+	do { \
+		pr_err(IPADMA_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPADMA_FUNC_ENTRY() \
+	IPADMA_DBG_LOW("ENTRY\n")
+
+#define IPADMA_FUNC_EXIT() \
+	IPADMA_DBG_LOW("EXIT\n")
+
+#ifdef CONFIG_DEBUG_FS
+#define IPADMA_MAX_MSG_LEN 1024
+static char dbg_buff[IPADMA_MAX_MSG_LEN];
+static void ipa3_dma_debugfs_init(void);
+static void ipa3_dma_debugfs_destroy(void);
+#else
+static void ipa3_dma_debugfs_init(void) {}
+static void ipa3_dma_debugfs_destroy(void) {}
+#endif
+
+/**
+ * struct ipa3_dma_ctx -IPADMA driver context information
+ * @is_enabled:is ipa_dma enabled?
+ * @destroy_pending: destroy ipa_dma after handling all pending memcpy
+ * @ipa_dma_xfer_wrapper_cache: cache of ipa3_dma_xfer_wrapper structs
+ * @sync_lock: lock for synchronisation in sync_memcpy
+ * @async_lock: lock for synchronisation in async_memcpy
+ * @enable_lock: lock for is_enabled
+ * @pending_lock: lock for synchronize is_enable and pending_cnt
+ * @done: no pending works-ipadma can be destroyed
+ * @ipa_dma_sync_prod_hdl: handle of sync memcpy producer
+ * @ipa_dma_async_prod_hdl:handle of async memcpy producer
+ * @ipa_dma_sync_cons_hdl: handle of sync memcpy consumer
+ * @sync_memcpy_pending_cnt: number of pending sync memcopy operations
+ * @async_memcpy_pending_cnt: number of pending async memcopy operations
+ * @uc_memcpy_pending_cnt: number of pending uc memcopy operations
+ * @total_sync_memcpy: total number of sync memcpy (statistics)
+ * @total_async_memcpy: total number of async memcpy (statistics)
+ * @total_uc_memcpy: total number of uc memcpy (statistics)
+ */
+struct ipa3_dma_ctx {
+	bool is_enabled;
+	bool destroy_pending;
+	struct kmem_cache *ipa_dma_xfer_wrapper_cache;
+	struct mutex sync_lock;
+	spinlock_t async_lock;
+	struct mutex enable_lock;
+	spinlock_t pending_lock;
+	struct completion done;
+	u32 ipa_dma_sync_prod_hdl;
+	u32 ipa_dma_async_prod_hdl;
+	u32 ipa_dma_sync_cons_hdl;
+	u32 ipa_dma_async_cons_hdl;
+	atomic_t sync_memcpy_pending_cnt;
+	atomic_t async_memcpy_pending_cnt;
+	atomic_t uc_memcpy_pending_cnt;
+	atomic_t total_sync_memcpy;
+	atomic_t total_async_memcpy;
+	atomic_t total_uc_memcpy;
+};
+static struct ipa3_dma_ctx *ipa3_dma_ctx;
+
+/**
+ * ipa3_dma_init() -Initialize IPADMA.
+ *
+ * This function initialize all IPADMA internal data and connect in dma:
+ *	MEMCPY_DMA_SYNC_PROD ->MEMCPY_DMA_SYNC_CONS
+ *	MEMCPY_DMA_ASYNC_PROD->MEMCPY_DMA_SYNC_CONS
+ *
+ * Return codes: 0: success
+ *		-EFAULT: IPADMA is already initialized
+ *		-EINVAL: IPA driver is not initialized
+ *		-ENOMEM: allocating memory error
+ *		-EPERM: pipe connection failed
+ */
+int ipa3_dma_init(void)
+{
+	struct ipa3_dma_ctx *ipa_dma_ctx_t;
+	struct ipa_sys_connect_params sys_in;
+	int res = 0;
+
+	IPADMA_FUNC_ENTRY();
+
+	if (ipa3_dma_ctx) {
+		IPADMA_ERR("Already initialized.\n");
+		return -EFAULT;
+	}
+
+	if (!ipa3_is_ready()) {
+		IPADMA_ERR("IPA is not ready yet\n");
+		return -EINVAL;
+	}
+
+	ipa_dma_ctx_t = kzalloc(sizeof(*(ipa3_dma_ctx)), GFP_KERNEL);
+
+	if (!ipa_dma_ctx_t) {
+		IPADMA_ERR("kzalloc error.\n");
+		return -ENOMEM;
+	}
+
+	ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache =
+		kmem_cache_create("IPA DMA XFER WRAPPER",
+			sizeof(struct ipa3_dma_xfer_wrapper), 0, 0, NULL);
+	if (!ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache) {
+		IPAERR(":failed to create ipa dma xfer wrapper cache.\n");
+		res = -ENOMEM;
+		goto fail_mem_ctrl;
+	}
+
+	mutex_init(&ipa_dma_ctx_t->enable_lock);
+	spin_lock_init(&ipa_dma_ctx_t->async_lock);
+	mutex_init(&ipa_dma_ctx_t->sync_lock);
+	spin_lock_init(&ipa_dma_ctx_t->pending_lock);
+	init_completion(&ipa_dma_ctx_t->done);
+	ipa_dma_ctx_t->is_enabled = false;
+	ipa_dma_ctx_t->destroy_pending = false;
+	atomic_set(&ipa_dma_ctx_t->async_memcpy_pending_cnt, 0);
+	atomic_set(&ipa_dma_ctx_t->sync_memcpy_pending_cnt, 0);
+	atomic_set(&ipa_dma_ctx_t->uc_memcpy_pending_cnt, 0);
+	atomic_set(&ipa_dma_ctx_t->total_async_memcpy, 0);
+	atomic_set(&ipa_dma_ctx_t->total_sync_memcpy, 0);
+	atomic_set(&ipa_dma_ctx_t->total_uc_memcpy, 0);
+
+	/* IPADMA SYNC PROD-source for sync memcpy */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_PROD;
+	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+	sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS;
+	sys_in.skip_ep_cfg = false;
+	if (ipa3_setup_sys_pipe(&sys_in,
+		&ipa_dma_ctx_t->ipa_dma_sync_prod_hdl)) {
+		IPADMA_ERR(":setup sync prod pipe failed\n");
+		res = -EPERM;
+		goto fail_sync_prod;
+	}
+
+	/* IPADMA SYNC CONS-destination for sync memcpy */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS;
+	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	sys_in.skip_ep_cfg = false;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+	sys_in.notify = NULL;
+	sys_in.priv = NULL;
+	if (ipa3_setup_sys_pipe(&sys_in,
+		&ipa_dma_ctx_t->ipa_dma_sync_cons_hdl)) {
+		IPADMA_ERR(":setup sync cons pipe failed.\n");
+		res = -EPERM;
+		goto fail_sync_cons;
+	}
+
+	IPADMA_DBG("SYNC MEMCPY pipes are connected\n");
+
+	/* IPADMA ASYNC PROD-source for sync memcpy */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD;
+	sys_in.desc_fifo_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+	sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS;
+	sys_in.skip_ep_cfg = false;
+	sys_in.notify = NULL;
+	if (ipa3_setup_sys_pipe(&sys_in,
+		&ipa_dma_ctx_t->ipa_dma_async_prod_hdl)) {
+		IPADMA_ERR(":setup async prod pipe failed.\n");
+		res = -EPERM;
+		goto fail_async_prod;
+	}
+
+	/* IPADMA ASYNC CONS-destination for sync memcpy */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS;
+	sys_in.desc_fifo_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ;
+	sys_in.skip_ep_cfg = false;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+	sys_in.notify = ipa3_dma_async_memcpy_notify_cb;
+	sys_in.priv = NULL;
+	if (ipa3_setup_sys_pipe(&sys_in,
+		&ipa_dma_ctx_t->ipa_dma_async_cons_hdl)) {
+		IPADMA_ERR(":setup async cons pipe failed.\n");
+		res = -EPERM;
+		goto fail_async_cons;
+	}
+	ipa3_dma_debugfs_init();
+	ipa3_dma_ctx = ipa_dma_ctx_t;
+	IPADMA_DBG("ASYNC MEMCPY pipes are connected\n");
+
+	IPADMA_FUNC_EXIT();
+	return res;
+fail_async_cons:
+	ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_async_prod_hdl);
+fail_async_prod:
+	ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_cons_hdl);
+fail_sync_cons:
+	ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_prod_hdl);
+fail_sync_prod:
+	kmem_cache_destroy(ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache);
+fail_mem_ctrl:
+	kfree(ipa_dma_ctx_t);
+	ipa3_dma_ctx = NULL;
+	return res;
+
+}
+
+/**
+ * ipa3_dma_enable() -Vote for IPA clocks.
+ *
+ *Return codes: 0: success
+ *		-EINVAL: IPADMA is not initialized
+ *		-EPERM: Operation not permitted as ipa_dma is already
+ *		 enabled
+ */
+int ipa3_dma_enable(void)
+{
+	IPADMA_FUNC_ENTRY();
+	if (ipa3_dma_ctx == NULL) {
+		IPADMA_ERR("IPADMA isn't initialized, can't enable\n");
+		return -EPERM;
+	}
+	mutex_lock(&ipa3_dma_ctx->enable_lock);
+	if (ipa3_dma_ctx->is_enabled) {
+		IPADMA_ERR("Already enabled.\n");
+		mutex_unlock(&ipa3_dma_ctx->enable_lock);
+		return -EPERM;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SPECIAL("DMA");
+	ipa3_dma_ctx->is_enabled = true;
+	mutex_unlock(&ipa3_dma_ctx->enable_lock);
+
+	IPADMA_FUNC_EXIT();
+	return 0;
+}
+
+static bool ipa3_dma_work_pending(void)
+{
+	if (atomic_read(&ipa3_dma_ctx->sync_memcpy_pending_cnt)) {
+		IPADMA_DBG("pending sync\n");
+		return true;
+	}
+	if (atomic_read(&ipa3_dma_ctx->async_memcpy_pending_cnt)) {
+		IPADMA_DBG("pending async\n");
+		return true;
+	}
+	if (atomic_read(&ipa3_dma_ctx->uc_memcpy_pending_cnt)) {
+		IPADMA_DBG("pending uc\n");
+		return true;
+	}
+	IPADMA_DBG_LOW("no pending work\n");
+	return false;
+}
+
+/**
+ * ipa3_dma_disable()- Unvote for IPA clocks.
+ *
+ * enter to power save mode.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: IPADMA is not initialized
+ *		-EPERM: Operation not permitted as ipa_dma is already
+ *			diabled
+ *		-EFAULT: can not disable ipa_dma as there are pending
+ *			memcopy works
+ */
+int ipa3_dma_disable(void)
+{
+	unsigned long flags;
+
+	IPADMA_FUNC_ENTRY();
+	if (ipa3_dma_ctx == NULL) {
+		IPADMA_ERR("IPADMA isn't initialized, can't disable\n");
+		return -EPERM;
+	}
+	mutex_lock(&ipa3_dma_ctx->enable_lock);
+	spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
+	if (!ipa3_dma_ctx->is_enabled) {
+		IPADMA_ERR("Already disabled.\n");
+		spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+		mutex_unlock(&ipa3_dma_ctx->enable_lock);
+		return -EPERM;
+	}
+	if (ipa3_dma_work_pending()) {
+		IPADMA_ERR("There is pending work, can't disable.\n");
+		spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+		mutex_unlock(&ipa3_dma_ctx->enable_lock);
+		return -EFAULT;
+	}
+	ipa3_dma_ctx->is_enabled = false;
+	spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+	IPA_ACTIVE_CLIENTS_DEC_SPECIAL("DMA");
+	mutex_unlock(&ipa3_dma_ctx->enable_lock);
+	IPADMA_FUNC_EXIT();
+	return 0;
+}
+
+/**
+ * ipa3_dma_sync_memcpy()- Perform synchronous memcpy using IPA.
+ *
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: invalid params
+ *		-EPERM: operation not permitted as ipa_dma isn't enable or
+ *			initialized
+ *		-SPS_ERROR: on sps faliures
+ *		-EFAULT: other
+ */
+int ipa3_dma_sync_memcpy(u64 dest, u64 src, int len)
+{
+	int ep_idx;
+	int res;
+	int i = 0;
+	struct ipa3_sys_context *cons_sys;
+	struct ipa3_sys_context *prod_sys;
+	struct sps_iovec iov;
+	struct ipa3_dma_xfer_wrapper *xfer_descr = NULL;
+	struct ipa3_dma_xfer_wrapper *head_descr = NULL;
+	struct gsi_xfer_elem xfer_elem;
+	struct gsi_chan_xfer_notify gsi_notify;
+	unsigned long flags;
+	bool stop_polling = false;
+
+	IPADMA_FUNC_ENTRY();
+	IPADMA_DBG_LOW("dest =  0x%llx, src = 0x%llx, len = %d\n",
+		dest, src, len);
+	if (ipa3_dma_ctx == NULL) {
+		IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n");
+		return -EPERM;
+	}
+	if ((max(src, dest) - min(src, dest)) < len) {
+		IPADMA_ERR("invalid addresses - overlapping buffers\n");
+		return -EINVAL;
+	}
+	if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {
+		IPADMA_ERR("invalid len, %d\n", len);
+		return	-EINVAL;
+	}
+	if (ipa3_ctx->transport_prototype != IPA_TRANSPORT_TYPE_GSI) {
+		if (((u32)src != src) || ((u32)dest != dest)) {
+			IPADMA_ERR("Bad addr, only 32b addr supported for BAM");
+			return -EINVAL;
+		}
+	}
+	spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
+	if (!ipa3_dma_ctx->is_enabled) {
+		IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n");
+		spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+		return -EPERM;
+	}
+	atomic_inc(&ipa3_dma_ctx->sync_memcpy_pending_cnt);
+	spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_SPS) {
+		if (atomic_read(&ipa3_dma_ctx->sync_memcpy_pending_cnt) >=
+				IPA_DMA_MAX_PENDING_SYNC) {
+			atomic_dec(&ipa3_dma_ctx->sync_memcpy_pending_cnt);
+			IPADMA_ERR("Reached pending requests limit\n");
+			return -EFAULT;
+		}
+	}
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS);
+	if (-1 == ep_idx) {
+		IPADMA_ERR("Client %u is not mapped\n",
+			IPA_CLIENT_MEMCPY_DMA_SYNC_CONS);
+		return -EFAULT;
+	}
+	cons_sys = ipa3_ctx->ep[ep_idx].sys;
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
+	if (-1 == ep_idx) {
+		IPADMA_ERR("Client %u is not mapped\n",
+			IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
+		return -EFAULT;
+	}
+	prod_sys = ipa3_ctx->ep[ep_idx].sys;
+
+	xfer_descr = kmem_cache_zalloc(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache,
+					GFP_KERNEL);
+	if (!xfer_descr) {
+		IPADMA_ERR("failed to alloc xfer descr wrapper\n");
+		res = -ENOMEM;
+		goto fail_mem_alloc;
+	}
+	xfer_descr->phys_addr_dest = dest;
+	xfer_descr->phys_addr_src = src;
+	xfer_descr->len = len;
+	init_completion(&xfer_descr->xfer_done);
+
+	mutex_lock(&ipa3_dma_ctx->sync_lock);
+	list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list);
+	cons_sys->len++;
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		xfer_elem.addr = dest;
+		xfer_elem.len = len;
+		xfer_elem.type = GSI_XFER_ELEM_DATA;
+		xfer_elem.flags = GSI_XFER_FLAG_EOT;
+		xfer_elem.xfer_user_data = xfer_descr;
+		res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
+				&xfer_elem, true);
+		if (res) {
+			IPADMA_ERR(
+				"Failed: gsi_queue_xfer dest descr res:%d\n",
+				res);
+			goto fail_send;
+		}
+		xfer_elem.addr = src;
+		xfer_elem.len = len;
+		xfer_elem.type = GSI_XFER_ELEM_DATA;
+		xfer_elem.flags = GSI_XFER_FLAG_EOT;
+		xfer_elem.xfer_user_data = NULL;
+		res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
+				&xfer_elem, true);
+		if (res) {
+			IPADMA_ERR(
+				"Failed: gsi_queue_xfer src descr res:%d\n",
+				 res);
+			BUG();
+		}
+	} else {
+		res = sps_transfer_one(cons_sys->ep->ep_hdl, dest, len,
+			NULL, 0);
+		if (res) {
+			IPADMA_ERR("Failed: sps_transfer_one on dest descr\n");
+			goto fail_send;
+		}
+		res = sps_transfer_one(prod_sys->ep->ep_hdl, src, len,
+			NULL, SPS_IOVEC_FLAG_EOT);
+		if (res) {
+			IPADMA_ERR("Failed: sps_transfer_one on src descr\n");
+			BUG();
+		}
+	}
+	head_descr = list_first_entry(&cons_sys->head_desc_list,
+				struct ipa3_dma_xfer_wrapper, link);
+
+	/* in case we are not the head of the list, wait for head to wake us */
+	if (xfer_descr != head_descr) {
+		mutex_unlock(&ipa3_dma_ctx->sync_lock);
+		wait_for_completion(&xfer_descr->xfer_done);
+		mutex_lock(&ipa3_dma_ctx->sync_lock);
+		head_descr = list_first_entry(&cons_sys->head_desc_list,
+					struct ipa3_dma_xfer_wrapper, link);
+		BUG_ON(xfer_descr != head_descr);
+	}
+	mutex_unlock(&ipa3_dma_ctx->sync_lock);
+
+	do {
+		/* wait for transfer to complete */
+		if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+			res = gsi_poll_channel(cons_sys->ep->gsi_chan_hdl,
+				&gsi_notify);
+			if (res == GSI_STATUS_SUCCESS)
+				stop_polling = true;
+			else if (res != GSI_STATUS_POLL_EMPTY)
+				IPADMA_ERR(
+					"Failed: gsi_poll_chanel, returned %d loop#:%d\n",
+					res, i);
+		} else {
+			res = sps_get_iovec(cons_sys->ep->ep_hdl, &iov);
+			if (res)
+				IPADMA_ERR(
+					"Failed: get_iovec, returned %d loop#:%d\n",
+					res, i);
+			if (iov.addr != 0)
+				stop_polling = true;
+		}
+		usleep_range(IPA_DMA_POLLING_MIN_SLEEP_RX,
+			IPA_DMA_POLLING_MAX_SLEEP_RX);
+		i++;
+	} while (!stop_polling);
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		BUG_ON(len != gsi_notify.bytes_xfered);
+		BUG_ON(dest != ((struct ipa3_dma_xfer_wrapper *)
+				(gsi_notify.xfer_user_data))->phys_addr_dest);
+	} else {
+		BUG_ON(dest != iov.addr);
+		BUG_ON(len != iov.size);
+	}
+
+	mutex_lock(&ipa3_dma_ctx->sync_lock);
+	list_del(&head_descr->link);
+	cons_sys->len--;
+	kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr);
+	/* wake the head of the list */
+	if (!list_empty(&cons_sys->head_desc_list)) {
+		head_descr = list_first_entry(&cons_sys->head_desc_list,
+				struct ipa3_dma_xfer_wrapper, link);
+		complete(&head_descr->xfer_done);
+	}
+	mutex_unlock(&ipa3_dma_ctx->sync_lock);
+
+	atomic_inc(&ipa3_dma_ctx->total_sync_memcpy);
+	atomic_dec(&ipa3_dma_ctx->sync_memcpy_pending_cnt);
+	if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
+		complete(&ipa3_dma_ctx->done);
+
+	IPADMA_FUNC_EXIT();
+	return res;
+
+fail_send:
+	list_del(&xfer_descr->link);
+	cons_sys->len--;
+	mutex_unlock(&ipa3_dma_ctx->sync_lock);
+	kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr);
+fail_mem_alloc:
+	atomic_dec(&ipa3_dma_ctx->sync_memcpy_pending_cnt);
+	if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
+		complete(&ipa3_dma_ctx->done);
+	return res;
+}
+
+/**
+ * ipa3_dma_async_memcpy()- Perform asynchronous memcpy using IPA.
+ *
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ * @user_cb: callback function to notify the client when the copy was done.
+ * @user_param: cookie for user_cb.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: invalid params
+ *		-EPERM: operation not permitted as ipa_dma isn't enable or
+ *			initialized
+ *		-SPS_ERROR: on sps faliures
+ *		-EFAULT: descr fifo is full.
+ */
+int ipa3_dma_async_memcpy(u64 dest, u64 src, int len,
+		void (*user_cb)(void *user1), void *user_param)
+{
+	int ep_idx;
+	int res = 0;
+	struct ipa3_dma_xfer_wrapper *xfer_descr = NULL;
+	struct ipa3_sys_context *prod_sys;
+	struct ipa3_sys_context *cons_sys;
+	struct gsi_xfer_elem xfer_elem_cons, xfer_elem_prod;
+	unsigned long flags;
+
+	IPADMA_FUNC_ENTRY();
+	IPADMA_DBG_LOW("dest =  0x%llx, src = 0x%llx, len = %d\n",
+		dest, src, len);
+	if (ipa3_dma_ctx == NULL) {
+		IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n");
+		return -EPERM;
+	}
+	if ((max(src, dest) - min(src, dest)) < len) {
+		IPADMA_ERR("invalid addresses - overlapping buffers\n");
+		return -EINVAL;
+	}
+	if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {
+		IPADMA_ERR("invalid len, %d\n", len);
+		return	-EINVAL;
+	}
+	if (ipa3_ctx->transport_prototype != IPA_TRANSPORT_TYPE_GSI) {
+		if (((u32)src != src) || ((u32)dest != dest)) {
+			IPADMA_ERR(
+				"Bad addr - only 32b addr supported for BAM");
+			return -EINVAL;
+		}
+	}
+	if (!user_cb) {
+		IPADMA_ERR("null pointer: user_cb\n");
+		return -EINVAL;
+	}
+	spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
+	if (!ipa3_dma_ctx->is_enabled) {
+		IPADMA_ERR("can't memcpy, IPA_DMA isn't enabled\n");
+		spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+		return -EPERM;
+	}
+	atomic_inc(&ipa3_dma_ctx->async_memcpy_pending_cnt);
+	spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_SPS) {
+		if (atomic_read(&ipa3_dma_ctx->async_memcpy_pending_cnt) >=
+				IPA_DMA_MAX_PENDING_ASYNC) {
+			atomic_dec(&ipa3_dma_ctx->async_memcpy_pending_cnt);
+			IPADMA_ERR("Reached pending requests limit\n");
+			return -EFAULT;
+		}
+	}
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+	if (-1 == ep_idx) {
+		IPADMA_ERR("Client %u is not mapped\n",
+			IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+		return -EFAULT;
+	}
+	cons_sys = ipa3_ctx->ep[ep_idx].sys;
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD);
+	if (-1 == ep_idx) {
+		IPADMA_ERR("Client %u is not mapped\n",
+			IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
+		return -EFAULT;
+	}
+	prod_sys = ipa3_ctx->ep[ep_idx].sys;
+
+	xfer_descr = kmem_cache_zalloc(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache,
+					GFP_KERNEL);
+	if (!xfer_descr) {
+		IPADMA_ERR("failed to alloc xfrer descr wrapper\n");
+		res = -ENOMEM;
+		goto fail_mem_alloc;
+	}
+	xfer_descr->phys_addr_dest = dest;
+	xfer_descr->phys_addr_src = src;
+	xfer_descr->len = len;
+	xfer_descr->callback = user_cb;
+	xfer_descr->user1 = user_param;
+
+	spin_lock_irqsave(&ipa3_dma_ctx->async_lock, flags);
+	list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list);
+	cons_sys->len++;
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		xfer_elem_cons.addr = dest;
+		xfer_elem_cons.len = len;
+		xfer_elem_cons.type = GSI_XFER_ELEM_DATA;
+		xfer_elem_cons.flags = GSI_XFER_FLAG_EOT;
+		xfer_elem_cons.xfer_user_data = xfer_descr;
+		xfer_elem_prod.addr = src;
+		xfer_elem_prod.len = len;
+		xfer_elem_prod.type = GSI_XFER_ELEM_DATA;
+		xfer_elem_prod.flags = GSI_XFER_FLAG_EOT;
+		xfer_elem_prod.xfer_user_data = NULL;
+		res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
+				&xfer_elem_cons, true);
+		if (res) {
+			IPADMA_ERR(
+				"Failed: gsi_queue_xfer on dest descr res: %d\n",
+				res);
+			goto fail_send;
+		}
+		res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
+				&xfer_elem_prod, true);
+		if (res) {
+			IPADMA_ERR(
+				"Failed: gsi_queue_xfer on src descr res: %d\n",
+				res);
+			BUG();
+			goto fail_send;
+		}
+	} else {
+		res = sps_transfer_one(cons_sys->ep->ep_hdl, dest, len,
+			xfer_descr, 0);
+		if (res) {
+			IPADMA_ERR("Failed: sps_transfer_one on dest descr\n");
+			goto fail_send;
+		}
+		res = sps_transfer_one(prod_sys->ep->ep_hdl, src, len,
+			NULL, SPS_IOVEC_FLAG_EOT);
+		if (res) {
+			IPADMA_ERR("Failed: sps_transfer_one on src descr\n");
+			BUG();
+			goto fail_send;
+		}
+	}
+	spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags);
+	IPADMA_FUNC_EXIT();
+	return res;
+
+fail_send:
+	list_del(&xfer_descr->link);
+	spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags);
+	kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr);
+fail_mem_alloc:
+	atomic_dec(&ipa3_dma_ctx->async_memcpy_pending_cnt);
+	if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
+		complete(&ipa3_dma_ctx->done);
+	return res;
+}
+
+/**
+ * ipa3_dma_uc_memcpy() - Perform a memcpy action using IPA uC
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: invalid params
+ *		-EPERM: operation not permitted as ipa_dma isn't enable or
+ *			initialized
+ *		-EBADF: IPA uC is not loaded
+ */
+int ipa3_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
+{
+	int res;
+	unsigned long flags;
+
+	IPADMA_FUNC_ENTRY();
+	if (ipa3_dma_ctx == NULL) {
+		IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n");
+		return -EPERM;
+	}
+	if ((max(src, dest) - min(src, dest)) < len) {
+		IPADMA_ERR("invalid addresses - overlapping buffers\n");
+		return -EINVAL;
+	}
+	if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {
+		IPADMA_ERR("invalid len, %d\n", len);
+		return	-EINVAL;
+	}
+
+	spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
+	if (!ipa3_dma_ctx->is_enabled) {
+		IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n");
+		spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+		return -EPERM;
+	}
+	atomic_inc(&ipa3_dma_ctx->uc_memcpy_pending_cnt);
+	spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+
+	res = ipa3_uc_memcpy(dest, src, len);
+	if (res) {
+		IPADMA_ERR("ipa3_uc_memcpy failed %d\n", res);
+		goto dec_and_exit;
+	}
+
+	atomic_inc(&ipa3_dma_ctx->total_uc_memcpy);
+	res = 0;
+dec_and_exit:
+	atomic_dec(&ipa3_dma_ctx->uc_memcpy_pending_cnt);
+	if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
+		complete(&ipa3_dma_ctx->done);
+	IPADMA_FUNC_EXIT();
+	return res;
+}
+
+/**
+ * ipa3_dma_destroy() -teardown IPADMA pipes and release ipadma.
+ *
+ * this is a blocking function, returns just after destroying IPADMA.
+ */
+void ipa3_dma_destroy(void)
+{
+	int res = 0;
+
+	IPADMA_FUNC_ENTRY();
+	if (!ipa3_dma_ctx) {
+		IPADMA_ERR("IPADMA isn't initialized\n");
+		return;
+	}
+
+	if (ipa3_dma_work_pending()) {
+		ipa3_dma_ctx->destroy_pending = true;
+		IPADMA_DBG("There are pending memcpy, wait for completion\n");
+		wait_for_completion(&ipa3_dma_ctx->done);
+	}
+
+	res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_async_cons_hdl);
+	if (res)
+		IPADMA_ERR("teardown IPADMA ASYNC CONS failed\n");
+	ipa3_dma_ctx->ipa_dma_async_cons_hdl = 0;
+	res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_sync_cons_hdl);
+	if (res)
+		IPADMA_ERR("teardown IPADMA SYNC CONS failed\n");
+	ipa3_dma_ctx->ipa_dma_sync_cons_hdl = 0;
+	res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_async_prod_hdl);
+	if (res)
+		IPADMA_ERR("teardown IPADMA ASYNC PROD failed\n");
+	ipa3_dma_ctx->ipa_dma_async_prod_hdl = 0;
+	res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_sync_prod_hdl);
+	if (res)
+		IPADMA_ERR("teardown IPADMA SYNC PROD failed\n");
+	ipa3_dma_ctx->ipa_dma_sync_prod_hdl = 0;
+
+	ipa3_dma_debugfs_destroy();
+	kmem_cache_destroy(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache);
+	kfree(ipa3_dma_ctx);
+	ipa3_dma_ctx = NULL;
+
+	IPADMA_FUNC_EXIT();
+}
+
+/**
+ * ipa3_dma_async_memcpy_notify_cb() -Callback function which will be called by
+ * IPA driver after getting notify from SPS driver or poll mode on Rx operation
+ * is completed (data was written to dest descriptor on async_cons ep).
+ *
+ * @priv -not in use.
+ * @evt - event name - IPA_RECIVE.
+ * @data -the ipa_mem_buffer.
+ */
+void ipa3_dma_async_memcpy_notify_cb(void *priv
+			, enum ipa_dp_evt_type evt, unsigned long data)
+{
+	int ep_idx = 0;
+	struct ipa3_dma_xfer_wrapper *xfer_descr_expected;
+	struct ipa3_sys_context *sys;
+	unsigned long flags;
+	struct ipa_mem_buffer *mem_info;
+
+	IPADMA_FUNC_ENTRY();
+
+	mem_info = (struct ipa_mem_buffer *)data;
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+	sys = ipa3_ctx->ep[ep_idx].sys;
+
+	spin_lock_irqsave(&ipa3_dma_ctx->async_lock, flags);
+	xfer_descr_expected = list_first_entry(&sys->head_desc_list,
+				 struct ipa3_dma_xfer_wrapper, link);
+	list_del(&xfer_descr_expected->link);
+	sys->len--;
+	spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags);
+	if (ipa3_ctx->transport_prototype != IPA_TRANSPORT_TYPE_GSI) {
+		BUG_ON(xfer_descr_expected->phys_addr_dest !=
+				mem_info->phys_base);
+		BUG_ON(xfer_descr_expected->len != mem_info->size);
+	}
+	atomic_inc(&ipa3_dma_ctx->total_async_memcpy);
+	atomic_dec(&ipa3_dma_ctx->async_memcpy_pending_cnt);
+	xfer_descr_expected->callback(xfer_descr_expected->user1);
+
+	kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache,
+		xfer_descr_expected);
+
+	if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
+		complete(&ipa3_dma_ctx->done);
+
+	IPADMA_FUNC_EXIT();
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *dent;
+static struct dentry *dfile_info;
+
+static ssize_t ipa3_dma_debugfs_read(struct file *file, char __user *ubuf,
+				 size_t count, loff_t *ppos)
+{
+	int nbytes = 0;
+
+	if (!ipa3_dma_ctx) {
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"Not initialized\n");
+	} else {
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"Status:\n	IPADMA is %s\n",
+			(ipa3_dma_ctx->is_enabled) ? "Enabled" : "Disabled");
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"Statistics:\n	total sync memcpy: %d\n	",
+			atomic_read(&ipa3_dma_ctx->total_sync_memcpy));
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"total async memcpy: %d\n	",
+			atomic_read(&ipa3_dma_ctx->total_async_memcpy));
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"pending sync memcpy jobs: %d\n	",
+			atomic_read(&ipa3_dma_ctx->sync_memcpy_pending_cnt));
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"pending async memcpy jobs: %d\n",
+			atomic_read(&ipa3_dma_ctx->async_memcpy_pending_cnt));
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"pending uc memcpy jobs: %d\n",
+			atomic_read(&ipa3_dma_ctx->uc_memcpy_pending_cnt));
+	}
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_dma_debugfs_reset_statistics(struct file *file,
+					const char __user *ubuf,
+					size_t count,
+					loff_t *ppos)
+{
+	unsigned long missing;
+	s8 in_num = 0;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtos8(dbg_buff, 0, &in_num))
+		return -EFAULT;
+	switch (in_num) {
+	case 0:
+		if (ipa3_dma_work_pending())
+			IPADMA_ERR("Note, there are pending memcpy\n");
+
+		atomic_set(&ipa3_dma_ctx->total_async_memcpy, 0);
+		atomic_set(&ipa3_dma_ctx->total_sync_memcpy, 0);
+		break;
+	default:
+		IPADMA_ERR("invalid argument: To reset statistics echo 0\n");
+		break;
+	}
+	return count;
+}
+
+const struct file_operations ipa3_ipadma_stats_ops = {
+	.read = ipa3_dma_debugfs_read,
+	.write = ipa3_dma_debugfs_reset_statistics,
+};
+
+static void ipa3_dma_debugfs_init(void)
+{
+	const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+			S_IWUSR | S_IWGRP | S_IWOTH;
+
+	dent = debugfs_create_dir("ipa_dma", 0);
+	if (IS_ERR(dent)) {
+		IPADMA_ERR("fail to create folder ipa_dma\n");
+		return;
+	}
+
+	dfile_info =
+		debugfs_create_file("info", read_write_mode, dent,
+				 0, &ipa3_ipadma_stats_ops);
+	if (!dfile_info || IS_ERR(dfile_info)) {
+		IPADMA_ERR("fail to create file stats\n");
+		goto fail;
+	}
+	return;
+fail:
+	debugfs_remove_recursive(dent);
+}
+
+static void ipa3_dma_debugfs_destroy(void)
+{
+	debugfs_remove_recursive(dent);
+}
+
+#endif /* !CONFIG_DEBUG_FS */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
new file mode 100644
index 0000000..ec3334c
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -0,0 +1,4287 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/msm_gsi.h>
+#include "ipa_i.h"
+#include "ipa_trace.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+
+#define IPA_LAST_DESC_CNT 0xFFFF
+#define POLLING_INACTIVITY_RX 40
+#define POLLING_MIN_SLEEP_RX 1010
+#define POLLING_MAX_SLEEP_RX 1050
+#define POLLING_INACTIVITY_TX 40
+#define POLLING_MIN_SLEEP_TX 400
+#define POLLING_MAX_SLEEP_TX 500
+/* 8K less 1 nominal MTU (1500 bytes) rounded to units of KB */
+#define IPA_MTU 1500
+#define IPA_GENERIC_AGGR_BYTE_LIMIT 6
+#define IPA_GENERIC_AGGR_TIME_LIMIT 1
+#define IPA_GENERIC_AGGR_PKT_LIMIT 0
+
+#define IPA_GENERIC_RX_BUFF_BASE_SZ 8192
+#define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\
+		(X) + NET_SKB_PAD) +\
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define IPA_GENERIC_RX_BUFF_SZ(X) ((X) -\
+		(IPA_REAL_GENERIC_RX_BUFF_SZ(X) - (X)))
+#define IPA_GENERIC_RX_BUFF_LIMIT (\
+		IPA_REAL_GENERIC_RX_BUFF_SZ(\
+		IPA_GENERIC_RX_BUFF_BASE_SZ) -\
+		IPA_GENERIC_RX_BUFF_BASE_SZ)
+
+/* less 1 nominal MTU (1500 bytes) rounded to units of KB */
+#define IPA_ADJUST_AGGR_BYTE_LIMIT(X) (((X) - IPA_MTU)/1000)
+
+#define IPA_RX_BUFF_CLIENT_HEADROOM 256
+
+#define IPA_WLAN_RX_POOL_SZ 100
+#define IPA_WLAN_RX_POOL_SZ_LOW_WM 5
+#define IPA_WLAN_RX_BUFF_SZ 2048
+#define IPA_WLAN_COMM_RX_POOL_LOW 100
+#define IPA_WLAN_COMM_RX_POOL_HIGH 900
+
+#define IPA_ODU_RX_BUFF_SZ 2048
+#define IPA_ODU_RX_POOL_SZ 64
+#define IPA_SIZE_DL_CSUM_META_TRAILER 8
+
+#define IPA_GSI_EVT_RING_LEN 4096
+#define IPA_GSI_MAX_CH_LOW_WEIGHT 15
+#define IPA_GSI_EVT_RING_INT_MODT 3200 /* 0.1s under 32KHz clock */
+
+#define IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC 10
+/* The below virtual channel cannot be used by any entity */
+#define IPA_GSI_CH_20_WA_VIRT_CHAN 29
+
+#define IPA_DEFAULT_SYS_YELLOW_WM 32
+
+static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
+static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
+static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
+static void ipa3_replenish_rx_work_func(struct work_struct *work);
+static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys);
+static void ipa3_wq_handle_rx(struct work_struct *work);
+static void ipa3_wq_handle_tx(struct work_struct *work);
+static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size);
+static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys,
+				u32 size);
+static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
+		struct ipa3_sys_context *sys);
+static void ipa3_cleanup_rx(struct ipa3_sys_context *sys);
+static void ipa3_wq_rx_avail(struct work_struct *work);
+static void ipa3_alloc_wlan_rx_common_cache(u32 size);
+static void ipa3_cleanup_wlan_rx_common_cache(void);
+static void ipa3_wq_repl_rx(struct work_struct *work);
+static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys,
+		struct ipa_mem_buffer *mem_info);
+static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
+	struct ipa3_ep_context *ep);
+static int ipa_populate_tag_field(struct ipa3_desc *desc,
+		struct ipa3_tx_pkt_wrapper *tx_pkt,
+		struct ipahal_imm_cmd_pyld **tag_pyld_ret);
+static int ipa_handle_rx_core_gsi(struct ipa3_sys_context *sys,
+	bool process_all, bool in_poll_state);
+static int ipa_handle_rx_core_sps(struct ipa3_sys_context *sys,
+	bool process_all, bool in_poll_state);
+static unsigned long tag_to_pointer_wa(uint64_t tag);
+static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt);
+
+static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit);
+
+static void ipa3_wq_write_done_common(struct ipa3_sys_context *sys,
+				struct ipa3_tx_pkt_wrapper *tx_pkt)
+{
+	struct ipa3_tx_pkt_wrapper *next_pkt;
+	int i, cnt;
+
+	if (unlikely(tx_pkt == NULL)) {
+		IPAERR("tx_pkt is NULL\n");
+		return;
+	}
+
+	cnt = tx_pkt->cnt;
+	IPADBG_LOW("cnt: %d\n", cnt);
+	for (i = 0; i < cnt; i++) {
+		spin_lock_bh(&sys->spinlock);
+		if (unlikely(list_empty(&sys->head_desc_list))) {
+			spin_unlock_bh(&sys->spinlock);
+			return;
+		}
+		next_pkt = list_next_entry(tx_pkt, link);
+		list_del(&tx_pkt->link);
+		sys->len--;
+		spin_unlock_bh(&sys->spinlock);
+		if (!tx_pkt->no_unmap_dma) {
+			if (tx_pkt->type != IPA_DATA_DESC_SKB_PAGED) {
+				dma_unmap_single(ipa3_ctx->pdev,
+					tx_pkt->mem.phys_base,
+					tx_pkt->mem.size,
+					DMA_TO_DEVICE);
+			} else {
+				dma_unmap_page(ipa3_ctx->pdev,
+					next_pkt->mem.phys_base,
+					next_pkt->mem.size,
+					DMA_TO_DEVICE);
+			}
+		}
+		if (tx_pkt->callback)
+			tx_pkt->callback(tx_pkt->user1, tx_pkt->user2);
+
+		if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_SPS
+			&& tx_pkt->cnt > 1
+			&& tx_pkt->cnt != IPA_LAST_DESC_CNT) {
+			if (tx_pkt->cnt == IPA_NUM_DESC_PER_SW_TX) {
+				dma_pool_free(ipa3_ctx->dma_pool,
+					tx_pkt->mult.base,
+					tx_pkt->mult.phys_base);
+			} else {
+				dma_unmap_single(ipa3_ctx->pdev,
+					tx_pkt->mult.phys_base,
+					tx_pkt->mult.size,
+					DMA_TO_DEVICE);
+				kfree(tx_pkt->mult.base);
+			}
+		}
+
+		kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+		tx_pkt = next_pkt;
+	}
+}
+
+static void ipa3_wq_write_done_status(int src_pipe,
+			struct ipa3_tx_pkt_wrapper *tx_pkt)
+{
+	struct ipa3_sys_context *sys;
+
+	WARN_ON(src_pipe >= ipa3_ctx->ipa_num_pipes);
+
+	if (!ipa3_ctx->ep[src_pipe].status.status_en)
+		return;
+
+	sys = ipa3_ctx->ep[src_pipe].sys;
+	if (!sys)
+		return;
+
+	ipa3_wq_write_done_common(sys, tx_pkt);
+}
+
+/**
+ * ipa_write_done() - this function will be (eventually) called when a Tx
+ * operation is complete
+ * * @work:	work_struct used by the work queue
+ *
+ * Will be called in deferred context.
+ * - invoke the callback supplied by the client who sent this command
+ * - iterate over all packets and validate that
+ *   the order for sent packet is the same as expected
+ * - delete all the tx packet descriptors from the system
+ *   pipe context (not needed anymore)
+ * - return the tx buffer back to dma_pool
+ */
+static void ipa3_wq_write_done(struct work_struct *work)
+{
+	struct ipa3_tx_pkt_wrapper *tx_pkt;
+	struct ipa3_sys_context *sys;
+
+	tx_pkt = container_of(work, struct ipa3_tx_pkt_wrapper, work);
+	sys = tx_pkt->sys;
+
+	ipa3_wq_write_done_common(sys, tx_pkt);
+}
+
+static int ipa3_handle_tx_core(struct ipa3_sys_context *sys, bool process_all,
+		bool in_poll_state)
+{
+	struct sps_iovec iov;
+	struct ipa3_tx_pkt_wrapper *tx_pkt_expected;
+	int ret;
+	int cnt = 0;
+
+	while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
+				!atomic_read(&sys->curr_polling_state))) {
+		if (cnt && !process_all)
+			break;
+		ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+		if (ret) {
+			IPAERR("sps_get_iovec failed %d\n", ret);
+			break;
+		}
+
+		if (iov.addr == 0)
+			break;
+
+		tx_pkt_expected = list_first_entry(&sys->head_desc_list,
+						   struct ipa3_tx_pkt_wrapper,
+						   link);
+		ipa3_wq_write_done_common(sys, tx_pkt_expected);
+		cnt++;
+	};
+
+	return cnt;
+}
+
+/**
+ * ipa3_tx_switch_to_intr_mode() - Operate the Tx data path in interrupt mode
+ */
+static void ipa3_tx_switch_to_intr_mode(struct ipa3_sys_context *sys)
+{
+	int ret;
+
+	if (!atomic_read(&sys->curr_polling_state)) {
+		IPAERR("already in intr mode\n");
+		goto fail;
+	}
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		atomic_set(&sys->curr_polling_state, 0);
+		ipa3_dec_release_wakelock();
+		ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
+			GSI_CHAN_MODE_CALLBACK);
+		if (ret != GSI_STATUS_SUCCESS) {
+			IPAERR("Failed to switch to intr mode.\n");
+			goto fail;
+		}
+	} else {
+		ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
+		if (ret) {
+			IPAERR("sps_get_config() failed %d\n", ret);
+			goto fail;
+		}
+		sys->event.options = SPS_O_EOT;
+		ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
+		if (ret) {
+			IPAERR("sps_register_event() failed %d\n", ret);
+			goto fail;
+		}
+		sys->ep->connect.options =
+			SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+		ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
+		if (ret) {
+			IPAERR("sps_set_config() failed %d\n", ret);
+			goto fail;
+		}
+		atomic_set(&sys->curr_polling_state, 0);
+		ipa3_handle_tx_core(sys, true, false);
+		ipa3_dec_release_wakelock();
+	}
+	return;
+
+fail:
+	queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
+			msecs_to_jiffies(1));
+}
+
+static void ipa3_handle_tx(struct ipa3_sys_context *sys)
+{
+	int inactive_cycles = 0;
+	int cnt;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	do {
+		cnt = ipa3_handle_tx_core(sys, true, true);
+		if (cnt == 0) {
+			inactive_cycles++;
+			usleep_range(POLLING_MIN_SLEEP_TX,
+					POLLING_MAX_SLEEP_TX);
+		} else {
+			inactive_cycles = 0;
+		}
+	} while (inactive_cycles <= POLLING_INACTIVITY_TX);
+
+	ipa3_tx_switch_to_intr_mode(sys);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+static void ipa3_wq_handle_tx(struct work_struct *work)
+{
+	struct ipa3_sys_context *sys;
+
+	sys = container_of(work, struct ipa3_sys_context, work);
+
+	ipa3_handle_tx(sys);
+}
+
+/**
+ * ipa3_send_one() - Send a single descriptor
+ * @sys:	system pipe context
+ * @desc:	descriptor to send
+ * @in_atomic:  whether caller is in atomic context
+ *
+ * - Allocate tx_packet wrapper
+ * - transfer data to the IPA
+ * - after the transfer was done the SPS will
+ *   notify the sending user via ipa_sps_irq_comp_tx()
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc,
+		bool in_atomic)
+{
+	struct ipa3_tx_pkt_wrapper *tx_pkt;
+	struct gsi_xfer_elem gsi_xfer;
+	int result;
+	u16 sps_flags = SPS_IOVEC_FLAG_EOT;
+	dma_addr_t dma_address;
+	u16 len;
+	u32 mem_flag = GFP_ATOMIC;
+
+	if (unlikely(!in_atomic))
+		mem_flag = GFP_KERNEL;
+
+	tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache, mem_flag);
+	if (!tx_pkt) {
+		IPAERR("failed to alloc tx wrapper\n");
+		goto fail_mem_alloc;
+	}
+
+	if (!desc->dma_address_valid) {
+		dma_address = dma_map_single(ipa3_ctx->pdev, desc->pyld,
+			desc->len, DMA_TO_DEVICE);
+	} else {
+		dma_address = desc->dma_address;
+		tx_pkt->no_unmap_dma = true;
+	}
+	if (!dma_address) {
+		IPAERR("failed to DMA wrap\n");
+		goto fail_dma_map;
+	}
+
+	INIT_LIST_HEAD(&tx_pkt->link);
+	tx_pkt->type = desc->type;
+	tx_pkt->cnt = 1;    /* only 1 desc in this "set" */
+
+	tx_pkt->mem.phys_base = dma_address;
+	tx_pkt->mem.base = desc->pyld;
+	tx_pkt->mem.size = desc->len;
+	tx_pkt->sys = sys;
+	tx_pkt->callback = desc->callback;
+	tx_pkt->user1 = desc->user1;
+	tx_pkt->user2 = desc->user2;
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		memset(&gsi_xfer, 0, sizeof(gsi_xfer));
+		gsi_xfer.addr = dma_address;
+		gsi_xfer.flags |= GSI_XFER_FLAG_EOT;
+		gsi_xfer.xfer_user_data = tx_pkt;
+		if (desc->type == IPA_IMM_CMD_DESC) {
+			gsi_xfer.len = desc->opcode;
+			gsi_xfer.type = GSI_XFER_ELEM_IMME_CMD;
+		} else {
+			gsi_xfer.len = desc->len;
+			gsi_xfer.type = GSI_XFER_ELEM_DATA;
+		}
+	} else {
+		/*
+		 * Special treatment for immediate commands, where the
+		 * structure of the descriptor is different
+		 */
+		if (desc->type == IPA_IMM_CMD_DESC) {
+			sps_flags |= SPS_IOVEC_FLAG_IMME;
+			len = desc->opcode;
+			IPADBG_LOW("sending cmd=%d pyld_len=%d sps_flags=%x\n",
+					desc->opcode, desc->len, sps_flags);
+			IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
+		} else {
+			len = desc->len;
+		}
+	}
+
+	INIT_WORK(&tx_pkt->work, ipa3_wq_write_done);
+
+	spin_lock_bh(&sys->spinlock);
+	list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
+					&gsi_xfer, true);
+		if (result != GSI_STATUS_SUCCESS) {
+			IPAERR("GSI xfer failed.\n");
+			goto fail_transport_send;
+		}
+	} else {
+		result = sps_transfer_one(sys->ep->ep_hdl, dma_address,
+					len, tx_pkt, sps_flags);
+		if (result) {
+			IPAERR("sps_transfer_one failed rc=%d\n", result);
+			goto fail_transport_send;
+		}
+	}
+
+	spin_unlock_bh(&sys->spinlock);
+
+	return 0;
+
+fail_transport_send:
+	list_del(&tx_pkt->link);
+	spin_unlock_bh(&sys->spinlock);
+	dma_unmap_single(ipa3_ctx->pdev, dma_address, desc->len, DMA_TO_DEVICE);
+fail_dma_map:
+	kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+fail_mem_alloc:
+	return -EFAULT;
+}
+
+/**
+ * ipa3_send() - Send multiple descriptors in one HW transaction
+ * @sys: system pipe context
+ * @num_desc: number of packets
+ * @desc: packets to send (may be immediate command or data)
+ * @in_atomic:  whether caller is in atomic context
+ *
+ * This function is used for system-to-bam connection.
+ * - SPS driver expect struct sps_transfer which will contain all the data
+ *   for a transaction
+ * - ipa3_tx_pkt_wrapper will be used for each ipa
+ *   descriptor (allocated from wrappers cache)
+ * - The wrapper struct will be configured for each ipa-desc payload and will
+ *   contain information which will be later used by the user callbacks
+ * - each transfer will be made by calling to sps_transfer()
+ * - Each packet (command or data) that will be sent will also be saved in
+ *   ipa3_sys_context for later check that all data was sent
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa3_send(struct ipa3_sys_context *sys,
+		u32 num_desc,
+		struct ipa3_desc *desc,
+		bool in_atomic)
+{
+	struct ipa3_tx_pkt_wrapper *tx_pkt, *tx_pkt_first;
+	struct ipahal_imm_cmd_pyld *tag_pyld_ret = NULL;
+	struct ipa3_tx_pkt_wrapper *next_pkt;
+	struct sps_transfer transfer = { 0 };
+	struct sps_iovec *iovec;
+	struct gsi_xfer_elem *gsi_xfer_elem_array = NULL;
+	dma_addr_t dma_addr;
+	int i = 0;
+	int j;
+	int result;
+	int fail_dma_wrap = 0;
+	uint size;
+	u32 mem_flag = GFP_ATOMIC;
+	int ipa_ep_idx;
+	struct ipa_gsi_ep_config *gsi_ep_cfg;
+
+	if (unlikely(!in_atomic))
+		mem_flag = GFP_KERNEL;
+
+	size = num_desc * sizeof(struct sps_iovec);
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		ipa_ep_idx = ipa3_get_ep_mapping(sys->ep->client);
+		if (unlikely(ipa_ep_idx < 0)) {
+			IPAERR("invalid ep_index of client = %d\n",
+				sys->ep->client);
+			return -EFAULT;
+		}
+		gsi_ep_cfg = ipa3_get_gsi_ep_info(ipa_ep_idx);
+		if (unlikely(!gsi_ep_cfg)) {
+			IPAERR("failed to get gsi EP config of ep_idx=%d\n",
+				ipa_ep_idx);
+			return -EFAULT;
+		}
+		if (unlikely(num_desc > gsi_ep_cfg->ipa_if_tlv)) {
+			IPAERR("Too many chained descriptors need=%d max=%d\n",
+				num_desc, gsi_ep_cfg->ipa_if_tlv);
+			WARN_ON(1);
+			return -EPERM;
+		}
+
+		gsi_xfer_elem_array =
+			kzalloc(num_desc * sizeof(struct gsi_xfer_elem),
+			mem_flag);
+		if (!gsi_xfer_elem_array) {
+			IPAERR("Failed to alloc mem for gsi xfer array.\n");
+			return -EFAULT;
+		}
+	} else {
+		if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
+			transfer.iovec = dma_pool_alloc(ipa3_ctx->dma_pool,
+					mem_flag, &dma_addr);
+			if (!transfer.iovec) {
+				IPAERR("fail to alloc dma mem\n");
+				return -EFAULT;
+			}
+		} else {
+			transfer.iovec = kmalloc(size, mem_flag);
+			if (!transfer.iovec) {
+				IPAERR("fail to alloc mem for sps xfr buff ");
+				IPAERR("num_desc = %d size = %d\n",
+						num_desc, size);
+				return -EFAULT;
+			}
+			dma_addr  = dma_map_single(ipa3_ctx->pdev,
+					transfer.iovec, size, DMA_TO_DEVICE);
+			if (!dma_addr) {
+				IPAERR("dma_map_single failed\n");
+				kfree(transfer.iovec);
+				return -EFAULT;
+			}
+		}
+		transfer.iovec_phys = dma_addr;
+		transfer.iovec_count = num_desc;
+	}
+
+	spin_lock_bh(&sys->spinlock);
+
+	for (i = 0; i < num_desc; i++) {
+		fail_dma_wrap = 0;
+		tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache,
+					   mem_flag);
+		if (!tx_pkt) {
+			IPAERR("failed to alloc tx wrapper\n");
+			goto failure;
+		}
+
+		INIT_LIST_HEAD(&tx_pkt->link);
+
+		if (i == 0) {
+			tx_pkt_first = tx_pkt;
+			tx_pkt->cnt = num_desc;
+			INIT_WORK(&tx_pkt->work, ipa3_wq_write_done);
+		}
+
+		/* populate tag field */
+		if (desc[i].opcode ==
+			ipahal_imm_cmd_get_opcode(
+				IPA_IMM_CMD_IP_PACKET_TAG_STATUS)) {
+			if (ipa_populate_tag_field(&desc[i], tx_pkt,
+				&tag_pyld_ret)) {
+				IPAERR("Failed to populate tag field\n");
+				goto failure;
+			}
+		}
+
+		tx_pkt->type = desc[i].type;
+
+		if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) {
+			tx_pkt->mem.base = desc[i].pyld;
+			tx_pkt->mem.size = desc[i].len;
+
+			if (!desc[i].dma_address_valid) {
+				tx_pkt->mem.phys_base =
+					dma_map_single(ipa3_ctx->pdev,
+					tx_pkt->mem.base,
+					tx_pkt->mem.size,
+					DMA_TO_DEVICE);
+				if (!tx_pkt->mem.phys_base) {
+					IPAERR("failed to do dma map.\n");
+					fail_dma_wrap = 1;
+					goto failure;
+				}
+			} else {
+					tx_pkt->mem.phys_base =
+						desc[i].dma_address;
+					tx_pkt->no_unmap_dma = true;
+			}
+		} else {
+			tx_pkt->mem.base = desc[i].frag;
+			tx_pkt->mem.size = desc[i].len;
+
+			if (!desc[i].dma_address_valid) {
+				tx_pkt->mem.phys_base =
+					skb_frag_dma_map(ipa3_ctx->pdev,
+					desc[i].frag,
+					0, tx_pkt->mem.size,
+					DMA_TO_DEVICE);
+				if (!tx_pkt->mem.phys_base) {
+					IPAERR("dma map failed\n");
+					fail_dma_wrap = 1;
+					goto failure;
+				}
+			} else {
+				tx_pkt->mem.phys_base =
+					desc[i].dma_address;
+				tx_pkt->no_unmap_dma = true;
+			}
+		}
+		tx_pkt->sys = sys;
+		tx_pkt->callback = desc[i].callback;
+		tx_pkt->user1 = desc[i].user1;
+		tx_pkt->user2 = desc[i].user2;
+
+		list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+
+		if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+			gsi_xfer_elem_array[i].addr = tx_pkt->mem.phys_base;
+
+			/*
+			 * Special treatment for immediate commands, where
+			 * the structure of the descriptor is different
+			 */
+			if (desc[i].type == IPA_IMM_CMD_DESC) {
+				gsi_xfer_elem_array[i].len = desc[i].opcode;
+				gsi_xfer_elem_array[i].type =
+					GSI_XFER_ELEM_IMME_CMD;
+			} else {
+				gsi_xfer_elem_array[i].len = desc[i].len;
+				gsi_xfer_elem_array[i].type =
+					GSI_XFER_ELEM_DATA;
+			}
+
+			if (i == (num_desc - 1)) {
+				gsi_xfer_elem_array[i].flags |=
+					GSI_XFER_FLAG_EOT;
+				gsi_xfer_elem_array[i].xfer_user_data =
+					tx_pkt_first;
+				/* "mark" the last desc */
+				tx_pkt->cnt = IPA_LAST_DESC_CNT;
+			} else
+				gsi_xfer_elem_array[i].flags |=
+					GSI_XFER_FLAG_CHAIN;
+		} else {
+			/*
+			 * first desc of set is "special" as it
+			 * holds the count and other info
+			 */
+			if (i == 0) {
+				transfer.user = tx_pkt;
+				tx_pkt->mult.phys_base = dma_addr;
+				tx_pkt->mult.base = transfer.iovec;
+				tx_pkt->mult.size = size;
+			}
+
+			iovec = &transfer.iovec[i];
+			iovec->flags = 0;
+			/*
+			 * Point the iovec to the buffer and
+			 */
+			iovec->addr = tx_pkt->mem.phys_base;
+			/*
+			 * Special treatment for immediate commands, where
+			 * the structure of the descriptor is different
+			 */
+			if (desc[i].type == IPA_IMM_CMD_DESC) {
+				iovec->size = desc[i].opcode;
+				iovec->flags |= SPS_IOVEC_FLAG_IMME;
+				IPA_DUMP_BUFF(desc[i].pyld,
+					tx_pkt->mem.phys_base, desc[i].len);
+			} else {
+				iovec->size = desc[i].len;
+			}
+
+			if (i == (num_desc - 1)) {
+				iovec->flags |= SPS_IOVEC_FLAG_EOT;
+				/* "mark" the last desc */
+				tx_pkt->cnt = IPA_LAST_DESC_CNT;
+			}
+		}
+	}
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, num_desc,
+				gsi_xfer_elem_array, true);
+		if (result != GSI_STATUS_SUCCESS) {
+			IPAERR("GSI xfer failed.\n");
+			goto failure;
+		}
+		kfree(gsi_xfer_elem_array);
+	} else {
+		result = sps_transfer(sys->ep->ep_hdl, &transfer);
+		if (result) {
+			IPAERR("sps_transfer failed rc=%d\n", result);
+			goto failure;
+		}
+	}
+
+	spin_unlock_bh(&sys->spinlock);
+	return 0;
+
+failure:
+	ipahal_destroy_imm_cmd(tag_pyld_ret);
+	tx_pkt = tx_pkt_first;
+	for (j = 0; j < i; j++) {
+		next_pkt = list_next_entry(tx_pkt, link);
+		list_del(&tx_pkt->link);
+		if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
+			dma_unmap_single(ipa3_ctx->pdev, tx_pkt->mem.phys_base,
+				tx_pkt->mem.size,
+				DMA_TO_DEVICE);
+		} else {
+			dma_unmap_page(ipa3_ctx->pdev, tx_pkt->mem.phys_base,
+				tx_pkt->mem.size,
+				DMA_TO_DEVICE);
+		}
+		kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+		tx_pkt = next_pkt;
+	}
+	if (j < num_desc)
+		/* last desc failed */
+		if (fail_dma_wrap)
+			kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		kfree(gsi_xfer_elem_array);
+	} else {
+		if (transfer.iovec_phys) {
+			if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
+				dma_pool_free(ipa3_ctx->dma_pool,
+					transfer.iovec, transfer.iovec_phys);
+			} else {
+				dma_unmap_single(ipa3_ctx->pdev,
+					transfer.iovec_phys, size,
+					DMA_TO_DEVICE);
+				kfree(transfer.iovec);
+			}
+		}
+	}
+	spin_unlock_bh(&sys->spinlock);
+	return -EFAULT;
+}
+
+/**
+ * ipa3_transport_irq_cmd_ack - callback function which will be called by
+ * SPS/GSI driver after an immediate command is complete.
+ * @user1:	pointer to the descriptor of the transfer
+ * @user2:
+ *
+ * Complete the immediate commands completion object, this will release the
+ * thread which waits on this completion object (ipa3_send_cmd())
+ */
+static void ipa3_transport_irq_cmd_ack(void *user1, int user2)
+{
+	struct ipa3_desc *desc = (struct ipa3_desc *)user1;
+
+	if (!desc) {
+		IPAERR("desc is NULL\n");
+		WARN_ON(1);
+		return;
+	}
+	IPADBG_LOW("got ack for cmd=%d\n", desc->opcode);
+	complete(&desc->xfer_done);
+}
+
+/**
+ * ipa3_send_cmd - send immediate commands
+ * @num_desc:	number of descriptors within the desc struct
+ * @descr:	descriptor structure
+ *
+ * Function will block till command gets ACK from IPA HW, caller needs
+ * to free any resources it allocated after function returns
+ * The callback in ipa3_desc should not be set by the caller
+ * for this function.
+ */
+int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr)
+{
+	struct ipa3_desc *desc;
+	int i, result = 0;
+	struct ipa3_sys_context *sys;
+	int ep_idx;
+
+	for (i = 0; i < num_desc; i++)
+		IPADBG("sending imm cmd %d\n", descr[i].opcode);
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
+	if (-1 == ep_idx) {
+		IPAERR("Client %u is not mapped\n",
+			IPA_CLIENT_APPS_CMD_PROD);
+		return -EFAULT;
+	}
+	sys = ipa3_ctx->ep[ep_idx].sys;
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	if (num_desc == 1) {
+		init_completion(&descr->xfer_done);
+
+		if (descr->callback || descr->user1)
+			WARN_ON(1);
+
+		descr->callback = ipa3_transport_irq_cmd_ack;
+		descr->user1 = descr;
+		if (ipa3_send_one(sys, descr, true)) {
+			IPAERR("fail to send immediate command\n");
+			result = -EFAULT;
+			goto bail;
+		}
+		wait_for_completion(&descr->xfer_done);
+	} else {
+		desc = &descr[num_desc - 1];
+		init_completion(&desc->xfer_done);
+
+		if (desc->callback || desc->user1)
+			WARN_ON(1);
+
+		desc->callback = ipa3_transport_irq_cmd_ack;
+		desc->user1 = desc;
+		if (ipa3_send(sys, num_desc, descr, true)) {
+			IPAERR("fail to send multiple immediate command set\n");
+			result = -EFAULT;
+			goto bail;
+		}
+		wait_for_completion(&desc->xfer_done);
+	}
+
+bail:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+}
+
+/**
+ * ipa3_sps_irq_tx_notify() - Callback function which will be called by
+ * the SPS driver to start a Tx poll operation.
+ * Called in an interrupt context.
+ * @notify:	SPS driver supplied notification struct
+ *
+ * This function defer the work for this event to the tx workqueue.
+ */
+static void ipa3_sps_irq_tx_notify(struct sps_event_notify *notify)
+{
+	struct ipa3_sys_context *sys = (struct ipa3_sys_context *)notify->user;
+	int ret;
+
+	IPADBG_LOW("event %d notified\n", notify->event_id);
+
+	switch (notify->event_id) {
+	case SPS_EVENT_EOT:
+		if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
+			atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+		if (!atomic_read(&sys->curr_polling_state)) {
+			ret = sps_get_config(sys->ep->ep_hdl,
+					&sys->ep->connect);
+			if (ret) {
+				IPAERR("sps_get_config() failed %d\n", ret);
+				break;
+			}
+			sys->ep->connect.options = SPS_O_AUTO_ENABLE |
+				SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+			ret = sps_set_config(sys->ep->ep_hdl,
+					&sys->ep->connect);
+			if (ret) {
+				IPAERR("sps_set_config() failed %d\n", ret);
+				break;
+			}
+			ipa3_inc_acquire_wakelock();
+			atomic_set(&sys->curr_polling_state, 1);
+			queue_work(sys->wq, &sys->work);
+		}
+		break;
+	default:
+		IPAERR("received unexpected event id %d\n", notify->event_id);
+	}
+}
+
+/**
+ * ipa3_sps_irq_tx_no_aggr_notify() - Callback function which will be called by
+ * the SPS driver after a Tx operation is complete.
+ * Called in an interrupt context.
+ * @notify:	SPS driver supplied notification struct
+ *
+ * This function defer the work for this event to the tx workqueue.
+ * This event will be later handled by ipa_write_done.
+ */
+static void ipa3_sps_irq_tx_no_aggr_notify(struct sps_event_notify *notify)
+{
+	struct ipa3_tx_pkt_wrapper *tx_pkt;
+
+	IPADBG_LOW("event %d notified\n", notify->event_id);
+
+	switch (notify->event_id) {
+	case SPS_EVENT_EOT:
+		tx_pkt = notify->data.transfer.user;
+		if (IPA_CLIENT_IS_APPS_CONS(tx_pkt->sys->ep->client))
+			atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+		queue_work(tx_pkt->sys->wq, &tx_pkt->work);
+		break;
+	default:
+		IPAERR("received unexpected event id %d\n", notify->event_id);
+	}
+}
+
+/**
+ * ipa3_handle_rx_core() - The core functionality of packet reception. This
+ * function is read from multiple code paths.
+ *
+ * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN
+ * endpoint. The function runs as long as there are packets in the pipe.
+ * For each packet:
+ *  - Disconnect the packet from the system pipe linked list
+ *  - Unmap the packets skb, make it non DMAable
+ *  - Free the packet from the cache
+ *  - Prepare a proper skb
+ *  - Call the endpoints notify function, passing the skb in the parameters
+ *  - Replenish the rx cache
+ */
+static int ipa3_handle_rx_core(struct ipa3_sys_context *sys, bool process_all,
+		bool in_poll_state)
+{
+	int cnt;
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
+		cnt = ipa_handle_rx_core_gsi(sys, process_all, in_poll_state);
+	else
+		cnt = ipa_handle_rx_core_sps(sys, process_all, in_poll_state);
+
+	return cnt;
+}
+
+/**
+ * ipa3_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
+ */
+static void ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys)
+{
+	int ret;
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		if (!atomic_read(&sys->curr_polling_state)) {
+			IPAERR("already in intr mode\n");
+			goto fail;
+		}
+		atomic_set(&sys->curr_polling_state, 0);
+		ipa3_dec_release_wakelock();
+		ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
+			GSI_CHAN_MODE_CALLBACK);
+		if (ret != GSI_STATUS_SUCCESS) {
+			IPAERR("Failed to switch to intr mode.\n");
+			goto fail;
+		}
+	} else {
+		ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
+		if (ret) {
+			IPAERR("sps_get_config() failed %d\n", ret);
+			goto fail;
+		}
+		if (!atomic_read(&sys->curr_polling_state) &&
+			((sys->ep->connect.options & SPS_O_EOT) == SPS_O_EOT)) {
+			IPADBG("already in intr mode\n");
+			return;
+		}
+		if (!atomic_read(&sys->curr_polling_state)) {
+			IPAERR("already in intr mode\n");
+			goto fail;
+		}
+		sys->event.options = SPS_O_EOT;
+		ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
+		if (ret) {
+			IPAERR("sps_register_event() failed %d\n", ret);
+			goto fail;
+		}
+		sys->ep->connect.options =
+			SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+		ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
+		if (ret) {
+			IPAERR("sps_set_config() failed %d\n", ret);
+			goto fail;
+		}
+		atomic_set(&sys->curr_polling_state, 0);
+		ipa3_handle_rx_core(sys, true, false);
+		ipa3_dec_release_wakelock();
+	}
+	return;
+
+fail:
+	queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
+			msecs_to_jiffies(1));
+}
+
+/**
+ * ipa_rx_notify() - Callback function which is called by the SPS driver when a
+ * a packet is received
+ * @notify:	SPS driver supplied notification information
+ *
+ * Called in an interrupt context, therefore the majority of the work is
+ * deffered using a work queue.
+ *
+ * After receiving a packet, the driver goes to polling mode and keeps pulling
+ * packets until the rx buffer is empty, then it goes back to interrupt mode.
+ * This comes to prevent the CPU from handling too many interrupts when the
+ * throughput is high.
+ */
+static void ipa3_sps_irq_rx_notify(struct sps_event_notify *notify)
+{
+	struct ipa3_sys_context *sys = (struct ipa3_sys_context *)notify->user;
+	int ret;
+
+	IPADBG_LOW("event %d notified\n", notify->event_id);
+
+	switch (notify->event_id) {
+	case SPS_EVENT_EOT:
+		if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
+			atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+		if (!atomic_read(&sys->curr_polling_state)) {
+			sys->ep->eot_in_poll_err++;
+			break;
+		}
+
+		ret = sps_get_config(sys->ep->ep_hdl,
+							 &sys->ep->connect);
+		if (ret) {
+			IPAERR("sps_get_config() failed %d\n", ret);
+			break;
+		}
+		sys->ep->connect.options = SPS_O_AUTO_ENABLE |
+			  SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+		ret = sps_set_config(sys->ep->ep_hdl,
+							 &sys->ep->connect);
+		if (ret) {
+			IPAERR("sps_set_config() failed %d\n", ret);
+			break;
+		}
+		ipa3_inc_acquire_wakelock();
+		atomic_set(&sys->curr_polling_state, 1);
+		trace_intr_to_poll3(sys->ep->client);
+		queue_work(sys->wq, &sys->work);
+		break;
+	default:
+		IPAERR("received unexpected event id %d\n", notify->event_id);
+	}
+}
+
+/**
+ * switch_to_intr_tx_work_func() - Wrapper function to move from polling
+ *	to interrupt mode
+ * @work: work struct
+ */
+void ipa3_switch_to_intr_tx_work_func(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct ipa3_sys_context *sys;
+
+	dwork = container_of(work, struct delayed_work, work);
+	sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work);
+	ipa3_handle_tx(sys);
+}
+
+/**
+ * ipa3_handle_rx() - handle packet reception. This function is executed in the
+ * context of a work queue.
+ * @work: work struct needed by the work queue
+ *
+ * ipa3_handle_rx_core() is run in polling mode. After all packets has been
+ * received, the driver switches back to interrupt mode.
+ */
+static void ipa3_handle_rx(struct ipa3_sys_context *sys)
+{
+	int inactive_cycles = 0;
+	int cnt;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	do {
+		cnt = ipa3_handle_rx_core(sys, true, true);
+		if (cnt == 0) {
+			inactive_cycles++;
+			trace_idle_sleep_enter3(sys->ep->client);
+			usleep_range(POLLING_MIN_SLEEP_RX,
+					POLLING_MAX_SLEEP_RX);
+			trace_idle_sleep_exit3(sys->ep->client);
+		} else {
+			inactive_cycles = 0;
+		}
+	} while (inactive_cycles <= POLLING_INACTIVITY_RX);
+
+	trace_poll_to_intr3(sys->ep->client);
+	ipa3_rx_switch_to_intr_mode(sys);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+static void ipa3_switch_to_intr_rx_work_func(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct ipa3_sys_context *sys;
+
+	dwork = container_of(work, struct delayed_work, work);
+	sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work);
+
+	if (sys->ep->napi_enabled) {
+		if (sys->ep->switch_to_intr) {
+			ipa3_rx_switch_to_intr_mode(sys);
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI");
+			sys->ep->switch_to_intr = false;
+			sys->ep->inactive_cycles = 0;
+		} else
+			sys->ep->client_notify(sys->ep->priv,
+				IPA_CLIENT_START_POLL, 0);
+	} else
+		ipa3_handle_rx(sys);
+}
+
+/**
+ * ipa3_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
+ * IPA EP configuration
+ * @sys_in:	[in] input needed to setup BAM pipe and configure EP
+ * @clnt_hdl:	[out] client handle
+ *
+ *  - configure the end-point registers with the supplied
+ *    parameters from the user.
+ *  - call SPS APIs to create a system-to-bam connection with IPA.
+ *  - allocate descriptor FIFO
+ *  - register callback function(ipa3_sps_irq_rx_notify or
+ *    ipa3_sps_irq_tx_notify - depends on client type) in case the driver is
+ *    not configured to pulling mode
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int ipa_ep_idx;
+	int result = -EINVAL;
+	dma_addr_t dma_addr;
+	char buff[IPA_RESOURCE_NAME_MAX];
+	struct iommu_domain *smmu_domain;
+
+	if (sys_in == NULL || clnt_hdl == NULL) {
+		IPAERR("NULL args\n");
+		goto fail_gen;
+	}
+
+	if (sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
+		IPAERR("bad parm client:%d fifo_sz:%d\n",
+			sys_in->client, sys_in->desc_fifo_sz);
+		goto fail_gen;
+	}
+
+	ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("Invalid client.\n");
+		goto fail_gen;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
+
+	if (ep->valid == 1) {
+		if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
+			IPAERR("EP already allocated.\n");
+			goto fail_and_disable_clocks;
+		} else {
+			if (ipa3_cfg_ep_hdr(ipa_ep_idx,
+						&sys_in->ipa_ep_cfg.hdr)) {
+				IPAERR("fail to configure hdr prop of EP.\n");
+				result = -EFAULT;
+				goto fail_and_disable_clocks;
+			}
+			if (ipa3_cfg_ep_cfg(ipa_ep_idx,
+						&sys_in->ipa_ep_cfg.cfg)) {
+				IPAERR("fail to configure cfg prop of EP.\n");
+				result = -EFAULT;
+				goto fail_and_disable_clocks;
+			}
+			IPADBG("client %d (ep: %d) overlay ok sys=%p\n",
+					sys_in->client, ipa_ep_idx, ep->sys);
+			ep->client_notify = sys_in->notify;
+			ep->priv = sys_in->priv;
+			*clnt_hdl = ipa_ep_idx;
+			if (!ep->keep_ipa_awake)
+				IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+			return 0;
+		}
+	}
+
+	memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
+
+	if (!ep->sys) {
+		ep->sys = kzalloc(sizeof(struct ipa3_sys_context), GFP_KERNEL);
+		if (!ep->sys) {
+			IPAERR("failed to sys ctx for client %d\n",
+					sys_in->client);
+			result = -ENOMEM;
+			goto fail_and_disable_clocks;
+		}
+
+		ep->sys->ep = ep;
+		snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipawq%d",
+				sys_in->client);
+		ep->sys->wq = alloc_workqueue(buff,
+				WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+		if (!ep->sys->wq) {
+			IPAERR("failed to create wq for client %d\n",
+					sys_in->client);
+			result = -EFAULT;
+			goto fail_wq;
+		}
+
+		snprintf(buff, IPA_RESOURCE_NAME_MAX, "iparepwq%d",
+				sys_in->client);
+		ep->sys->repl_wq = alloc_workqueue(buff,
+				WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+		if (!ep->sys->repl_wq) {
+			IPAERR("failed to create rep wq for client %d\n",
+					sys_in->client);
+			result = -EFAULT;
+			goto fail_wq2;
+		}
+
+		INIT_LIST_HEAD(&ep->sys->head_desc_list);
+		INIT_LIST_HEAD(&ep->sys->rcycl_list);
+		spin_lock_init(&ep->sys->spinlock);
+	} else {
+		memset(ep->sys, 0, offsetof(struct ipa3_sys_context, ep));
+	}
+
+	ep->skip_ep_cfg = sys_in->skip_ep_cfg;
+	if (ipa3_assign_policy(sys_in, ep->sys)) {
+		IPAERR("failed to sys ctx for client %d\n", sys_in->client);
+		result = -ENOMEM;
+		goto fail_gen2;
+	}
+
+	ep->valid = 1;
+	ep->client = sys_in->client;
+	ep->client_notify = sys_in->notify;
+	ep->napi_enabled = sys_in->napi_enabled;
+	ep->priv = sys_in->priv;
+	ep->keep_ipa_awake = sys_in->keep_ipa_awake;
+	atomic_set(&ep->avail_fifo_desc,
+		((sys_in->desc_fifo_sz/sizeof(struct sps_iovec))-1));
+
+	if (ep->status.status_en && IPA_CLIENT_IS_CONS(ep->client) &&
+	    ep->sys->status_stat == NULL) {
+		ep->sys->status_stat =
+			kzalloc(sizeof(struct ipa3_status_stats), GFP_KERNEL);
+		if (!ep->sys->status_stat) {
+			IPAERR("no memory\n");
+			goto fail_gen2;
+		}
+	}
+
+	result = ipa3_enable_data_path(ipa_ep_idx);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d.\n", result,
+				ipa_ep_idx);
+		goto fail_gen2;
+	}
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa3_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto fail_gen2;
+		}
+		if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
+			IPAERR("fail to configure status of EP.\n");
+			goto fail_gen2;
+		}
+		IPADBG("ep configuration successful\n");
+	} else {
+		IPADBG("skipping ep configuration\n");
+	}
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		result = ipa_gsi_setup_channel(sys_in, ep);
+		if (result) {
+			IPAERR("Failed to setup GSI channel\n");
+			goto fail_gen2;
+		}
+	} else {
+		/* Default Config */
+		ep->ep_hdl = sps_alloc_endpoint();
+		if (ep->ep_hdl == NULL) {
+			IPAERR("SPS EP allocation failed.\n");
+			goto fail_gen2;
+		}
+
+		result = sps_get_config(ep->ep_hdl, &ep->connect);
+		if (result) {
+			IPAERR("fail to get config.\n");
+			goto fail_sps_cfg;
+		}
+
+		/* Specific Config */
+		if (IPA_CLIENT_IS_CONS(sys_in->client)) {
+			ep->connect.mode = SPS_MODE_SRC;
+			ep->connect.destination = SPS_DEV_HANDLE_MEM;
+			ep->connect.source = ipa3_ctx->bam_handle;
+			ep->connect.dest_pipe_index = ipa3_ctx->a5_pipe_index++;
+			ep->connect.src_pipe_index = ipa_ep_idx;
+		} else {
+			ep->connect.mode = SPS_MODE_DEST;
+			ep->connect.source = SPS_DEV_HANDLE_MEM;
+			ep->connect.destination = ipa3_ctx->bam_handle;
+			ep->connect.src_pipe_index = ipa3_ctx->a5_pipe_index++;
+			ep->connect.dest_pipe_index = ipa_ep_idx;
+		}
+
+		IPADBG("client:%d ep:%d",
+			sys_in->client, ipa_ep_idx);
+
+		IPADBG("dest_pipe_index:%d src_pipe_index:%d\n",
+			ep->connect.dest_pipe_index,
+			ep->connect.src_pipe_index);
+
+		ep->connect.options = ep->sys->sps_option;
+		ep->connect.desc.size = sys_in->desc_fifo_sz;
+		ep->connect.desc.base = dma_alloc_coherent(ipa3_ctx->pdev,
+				ep->connect.desc.size, &dma_addr, 0);
+		if (ipa3_ctx->smmu_s1_bypass) {
+			ep->connect.desc.phys_base = dma_addr;
+		} else {
+			ep->connect.desc.iova = dma_addr;
+			smmu_domain = ipa3_get_smmu_domain();
+			if (smmu_domain != NULL) {
+				ep->connect.desc.phys_base =
+					iommu_iova_to_phys(smmu_domain,
+							dma_addr);
+			}
+		}
+		if (ep->connect.desc.base == NULL) {
+			IPAERR("fail to get DMA desc memory.\n");
+			goto fail_sps_cfg;
+		}
+
+		ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
+
+		result = ipa3_sps_connect_safe(ep->ep_hdl,
+				&ep->connect, sys_in->client);
+		if (result) {
+			IPAERR("sps_connect fails.\n");
+			goto fail_sps_connect;
+		}
+
+		ep->sys->event.options = SPS_O_EOT;
+		ep->sys->event.mode = SPS_TRIGGER_CALLBACK;
+		ep->sys->event.xfer_done = NULL;
+		ep->sys->event.user = ep->sys;
+		ep->sys->event.callback = ep->sys->sps_callback;
+		result = sps_register_event(ep->ep_hdl, &ep->sys->event);
+		if (result < 0) {
+			IPAERR("register event error %d\n", result);
+			goto fail_register_event;
+		}
+	}	/* end of sps config */
+
+	*clnt_hdl = ipa_ep_idx;
+
+	if (ep->sys->repl_hdlr == ipa3_fast_replenish_rx_cache) {
+		ep->sys->repl.capacity = ep->sys->rx_pool_sz + 1;
+		ep->sys->repl.cache = kzalloc(ep->sys->repl.capacity *
+				sizeof(void *), GFP_KERNEL);
+		if (!ep->sys->repl.cache) {
+			IPAERR("ep=%d fail to alloc repl cache\n", ipa_ep_idx);
+			ep->sys->repl_hdlr = ipa3_replenish_rx_cache;
+			ep->sys->repl.capacity = 0;
+		} else {
+			atomic_set(&ep->sys->repl.head_idx, 0);
+			atomic_set(&ep->sys->repl.tail_idx, 0);
+			ipa3_wq_repl_rx(&ep->sys->repl_work);
+		}
+	}
+
+	if (IPA_CLIENT_IS_CONS(sys_in->client))
+		ipa3_replenish_rx_cache(ep->sys);
+
+	if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) {
+		ipa3_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW);
+		atomic_inc(&ipa3_ctx->wc_memb.active_clnt_cnt);
+	}
+
+	ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) {
+		if (ipa3_ctx->modem_cfg_emb_pipe_flt &&
+			sys_in->client == IPA_CLIENT_APPS_LAN_WAN_PROD)
+			IPADBG("modem cfg emb pipe flt\n");
+		else
+			ipa3_install_dflt_flt_rules(ipa_ep_idx);
+	}
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+	IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
+			ipa_ep_idx, ep->sys);
+
+	return 0;
+
+fail_register_event:
+	sps_disconnect(ep->ep_hdl);
+fail_sps_connect:
+	dma_free_coherent(ipa3_ctx->pdev, ep->connect.desc.size,
+			  ep->connect.desc.base,
+			  ep->connect.desc.phys_base);
+fail_sps_cfg:
+	sps_free_endpoint(ep->ep_hdl);
+fail_gen2:
+	destroy_workqueue(ep->sys->repl_wq);
+fail_wq2:
+	destroy_workqueue(ep->sys->wq);
+fail_wq:
+	kfree(ep->sys);
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+fail_and_disable_clocks:
+	IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+fail_gen:
+	return result;
+}
+
+/**
+ * ipa3_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP
+ * @clnt_hdl:	[in] the handle obtained from ipa3_setup_sys_pipe
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_teardown_sys_pipe(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int empty;
+	int result;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipa3_disable_data_path(clnt_hdl);
+	if (ep->napi_enabled) {
+		ep->switch_to_intr = true;
+		do {
+			usleep_range(95, 105);
+		} while (atomic_read(&ep->sys->curr_polling_state));
+	}
+
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		do {
+			spin_lock_bh(&ep->sys->spinlock);
+			empty = list_empty(&ep->sys->head_desc_list);
+			spin_unlock_bh(&ep->sys->spinlock);
+			if (!empty)
+				usleep_range(95, 105);
+			else
+				break;
+		} while (1);
+	}
+
+	if (IPA_CLIENT_IS_CONS(ep->client))
+		cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
+	flush_workqueue(ep->sys->wq);
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		result = ipa3_stop_gsi_channel(clnt_hdl);
+		if (result != GSI_STATUS_SUCCESS) {
+			IPAERR("GSI stop chan err: %d.\n", result);
+			BUG();
+			return result;
+		}
+		result = gsi_reset_channel(ep->gsi_chan_hdl);
+		if (result != GSI_STATUS_SUCCESS) {
+			IPAERR("Failed to reset chan: %d.\n", result);
+			BUG();
+			return result;
+		}
+		dma_free_coherent(ipa3_ctx->pdev,
+			ep->gsi_mem_info.chan_ring_len,
+			ep->gsi_mem_info.chan_ring_base_vaddr,
+			ep->gsi_mem_info.chan_ring_base_addr);
+		result = gsi_dealloc_channel(ep->gsi_chan_hdl);
+		if (result != GSI_STATUS_SUCCESS) {
+			IPAERR("Failed to dealloc chan: %d.\n", result);
+			BUG();
+			return result;
+		}
+
+		/* free event ring only when it is present */
+		if (ep->gsi_evt_ring_hdl != ~0) {
+			result = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
+			if (result != GSI_STATUS_SUCCESS) {
+				IPAERR("Failed to reset evt ring: %d.\n",
+						result);
+				BUG();
+				return result;
+			}
+			dma_free_coherent(ipa3_ctx->pdev,
+				ep->gsi_mem_info.evt_ring_len,
+				ep->gsi_mem_info.evt_ring_base_vaddr,
+				ep->gsi_mem_info.evt_ring_base_addr);
+			result = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+			if (result != GSI_STATUS_SUCCESS) {
+				IPAERR("Failed to dealloc evt ring: %d.\n",
+						result);
+				BUG();
+				return result;
+			}
+		}
+	} else {
+		sps_disconnect(ep->ep_hdl);
+		dma_free_coherent(ipa3_ctx->pdev, ep->connect.desc.size,
+				  ep->connect.desc.base,
+				  ep->connect.desc.phys_base);
+		sps_free_endpoint(ep->ep_hdl);
+	}
+	if (ep->sys->repl_wq)
+		flush_workqueue(ep->sys->repl_wq);
+	if (IPA_CLIENT_IS_CONS(ep->client))
+		ipa3_cleanup_rx(ep->sys);
+
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) {
+		if (ipa3_ctx->modem_cfg_emb_pipe_flt &&
+			ep->client == IPA_CLIENT_APPS_LAN_WAN_PROD)
+			IPADBG("modem cfg emb pipe flt\n");
+		else
+			ipa3_delete_dflt_flt_rules(clnt_hdl);
+	}
+
+	if (IPA_CLIENT_IS_WLAN_CONS(ep->client))
+		atomic_dec(&ipa3_ctx->wc_memb.active_clnt_cnt);
+
+	memset(&ep->wstats, 0, sizeof(struct ipa3_wlan_stats));
+
+	if (!atomic_read(&ipa3_ctx->wc_memb.active_clnt_cnt))
+		ipa3_cleanup_wlan_rx_common_cache();
+
+	ep->valid = 0;
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+	return 0;
+}
+
+/**
+ * ipa3_tx_comp_usr_notify_release() - Callback function which will call the
+ * user supplied callback function to release the skb, or release it on
+ * its own if no callback function was supplied.
+ * @user1
+ * @user2
+ *
+ * This notified callback is for the destination client.
+ * This function is supplied in ipa3_connect.
+ */
+static void ipa3_tx_comp_usr_notify_release(void *user1, int user2)
+{
+	struct sk_buff *skb = (struct sk_buff *)user1;
+	int ep_idx = user2;
+
+	IPADBG_LOW("skb=%p ep=%d\n", skb, ep_idx);
+
+	IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_pkts_compl);
+
+	if (ipa3_ctx->ep[ep_idx].client_notify)
+		ipa3_ctx->ep[ep_idx].client_notify(ipa3_ctx->ep[ep_idx].priv,
+				IPA_WRITE_DONE, (unsigned long)skb);
+	else
+		dev_kfree_skb_any(skb);
+}
+
+static void ipa3_tx_cmd_comp(void *user1, int user2)
+{
+	ipahal_destroy_imm_cmd(user1);
+}
+
+/**
+ * ipa3_tx_dp() - Data-path tx handler
+ * @dst:	[in] which IPA destination to route tx packets to
+ * @skb:	[in] the packet to send
+ * @metadata:	[in] TX packet meta-data
+ *
+ * Data-path tx handler, this is used for both SW data-path which by-passes most
+ * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
+ * dst is a "valid" CONS type, then SW data-path is used. If dst is the
+ * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
+ * is an error. For errors, client needs to free the skb as needed. For success,
+ * IPA driver will later invoke client callback if one was supplied. That
+ * callback should free the skb. If no callback supplied, IPA driver will free
+ * the skb internally
+ *
+ * The function will use two descriptors for this send command
+ * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
+ * the first descriptor will be used to inform the IPA hardware that
+ * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
+ * Once this send was done from SPS point-of-view the IPA driver will
+ * get notified by the supplied callback - ipa_sps_irq_tx_comp()
+ *
+ * ipa_sps_irq_tx_comp will call to the user supplied
+ * callback (from ipa3_connect)
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+		struct ipa_tx_meta *meta)
+{
+	struct ipa3_desc *desc;
+	struct ipa3_desc _desc[3];
+	int dst_ep_idx;
+	struct ipahal_imm_cmd_ip_packet_init cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+	struct ipa3_sys_context *sys;
+	int src_ep_idx;
+	int num_frags, f;
+
+	if (unlikely(!ipa3_ctx)) {
+		IPAERR("IPA3 driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (skb->len == 0) {
+		IPAERR("packet size is 0\n");
+		return -EINVAL;
+	}
+
+	num_frags = skb_shinfo(skb)->nr_frags;
+	if (num_frags) {
+		/* 1 desc for tag to resolve status out-of-order issue;
+		 * 1 desc is needed for the linear portion of skb;
+		 * 1 desc may be needed for the PACKET_INIT;
+		 * 1 desc for each frag
+		 */
+		desc = kzalloc(sizeof(*desc) * (num_frags + 3), GFP_ATOMIC);
+		if (!desc) {
+			IPAERR("failed to alloc desc array\n");
+			goto fail_mem;
+		}
+	} else {
+		memset(_desc, 0, 3 * sizeof(struct ipa3_desc));
+		desc = &_desc[0];
+	}
+
+	/*
+	 * USB_CONS: PKT_INIT ep_idx = dst pipe
+	 * Q6_CONS: PKT_INIT ep_idx = sender pipe
+	 * A5_LAN_WAN_PROD: HW path ep_idx = sender pipe
+	 *
+	 * LAN TX: all PKT_INIT
+	 * WAN TX: PKT_INIT (cmd) + HW (data)
+	 *
+	 */
+	if (IPA_CLIENT_IS_CONS(dst)) {
+		src_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
+		if (-1 == src_ep_idx) {
+			IPAERR("Client %u is not mapped\n",
+				IPA_CLIENT_APPS_LAN_WAN_PROD);
+			goto fail_gen;
+		}
+		dst_ep_idx = ipa3_get_ep_mapping(dst);
+	} else {
+		src_ep_idx = ipa3_get_ep_mapping(dst);
+		if (-1 == src_ep_idx) {
+			IPAERR("Client %u is not mapped\n", dst);
+			goto fail_gen;
+		}
+		if (meta && meta->pkt_init_dst_ep_valid)
+			dst_ep_idx = meta->pkt_init_dst_ep;
+		else
+			dst_ep_idx = -1;
+	}
+
+	sys = ipa3_ctx->ep[src_ep_idx].sys;
+
+	if (!sys->ep->valid) {
+		IPAERR("pipe not valid\n");
+		goto fail_gen;
+	}
+
+	if (dst_ep_idx != -1) {
+		/* SW data path */
+		cmd.destination_pipe_index = dst_ep_idx;
+		cmd_pyld = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_IP_PACKET_INIT, &cmd, true);
+		if (unlikely(!cmd_pyld)) {
+			IPAERR("failed to construct ip_packet_init imm cmd\n");
+			goto fail_gen;
+		}
+
+		/* the tag field will be populated in ipa3_send() function */
+		desc[0].opcode = ipahal_imm_cmd_get_opcode(
+			IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
+		desc[0].type = IPA_IMM_CMD_DESC;
+		desc[0].callback = ipa3_tag_destroy_imm;
+		desc[1].opcode =
+			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_INIT);
+		desc[1].pyld = cmd_pyld->data;
+		desc[1].len = cmd_pyld->len;
+		desc[1].type = IPA_IMM_CMD_DESC;
+		desc[1].callback = ipa3_tx_cmd_comp;
+		desc[1].user1 = cmd_pyld;
+		desc[2].pyld = skb->data;
+		desc[2].len = skb_headlen(skb);
+		desc[2].type = IPA_DATA_DESC_SKB;
+		desc[2].callback = ipa3_tx_comp_usr_notify_release;
+		desc[2].user1 = skb;
+		desc[2].user2 = (meta && meta->pkt_init_dst_ep_valid &&
+				meta->pkt_init_dst_ep_remote) ?
+				src_ep_idx :
+				dst_ep_idx;
+		if (meta && meta->dma_address_valid) {
+			desc[2].dma_address_valid = true;
+			desc[2].dma_address = meta->dma_address;
+		}
+
+		for (f = 0; f < num_frags; f++) {
+			desc[3+f].frag = &skb_shinfo(skb)->frags[f];
+			desc[3+f].type = IPA_DATA_DESC_SKB_PAGED;
+			desc[3+f].len = skb_frag_size(desc[3+f].frag);
+		}
+		/* don't free skb till frag mappings are released */
+		if (num_frags) {
+			desc[3+f-1].callback = desc[2].callback;
+			desc[3+f-1].user1 = desc[2].user1;
+			desc[3+f-1].user2 = desc[2].user2;
+			desc[2].callback = NULL;
+		}
+
+		if (ipa3_send(sys, num_frags + 3, desc, true)) {
+			IPAERR("fail to send skb %p num_frags %u SWP\n",
+				skb, num_frags);
+			goto fail_send;
+		}
+		IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_sw_pkts);
+	} else {
+		/* HW data path */
+		desc[0].opcode =
+			ipahal_imm_cmd_get_opcode(
+				IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
+		desc[0].type = IPA_IMM_CMD_DESC;
+		desc[0].callback = ipa3_tag_destroy_imm;
+		desc[1].pyld = skb->data;
+		desc[1].len = skb_headlen(skb);
+		desc[1].type = IPA_DATA_DESC_SKB;
+		desc[1].callback = ipa3_tx_comp_usr_notify_release;
+		desc[1].user1 = skb;
+		desc[1].user2 = src_ep_idx;
+
+		if (meta && meta->dma_address_valid) {
+			desc[1].dma_address_valid = true;
+			desc[1].dma_address = meta->dma_address;
+		}
+		if (num_frags == 0) {
+			if (ipa3_send(sys, 2, desc, true)) {
+				IPAERR("fail to send skb %p HWP\n", skb);
+				goto fail_gen;
+			}
+		} else {
+			for (f = 0; f < num_frags; f++) {
+				desc[2+f].frag = &skb_shinfo(skb)->frags[f];
+				desc[2+f].type = IPA_DATA_DESC_SKB_PAGED;
+				desc[2+f].len = skb_frag_size(desc[2+f].frag);
+			}
+			/* don't free skb till frag mappings are released */
+			desc[2+f-1].callback = desc[1].callback;
+			desc[2+f-1].user1 = desc[1].user1;
+			desc[2+f-1].user2 = desc[1].user2;
+			desc[1].callback = NULL;
+
+			if (ipa3_send(sys, num_frags + 2, desc, true)) {
+				IPAERR("fail to send skb %p num_frags %u HWP\n",
+					skb, num_frags);
+				goto fail_gen;
+			}
+		}
+		IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_hw_pkts);
+	}
+
+	if (num_frags) {
+		kfree(desc);
+		IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_non_linear);
+	}
+	return 0;
+
+fail_send:
+	ipahal_destroy_imm_cmd(cmd_pyld);
+fail_gen:
+	if (num_frags)
+		kfree(desc);
+fail_mem:
+	return -EFAULT;
+}
+
+static void ipa3_wq_handle_rx(struct work_struct *work)
+{
+	struct ipa3_sys_context *sys;
+
+	sys = container_of(work, struct ipa3_sys_context, work);
+
+	if (sys->ep->napi_enabled) {
+		IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI");
+		sys->ep->client_notify(sys->ep->priv,
+				IPA_CLIENT_START_POLL, 0);
+	} else
+		ipa3_handle_rx(sys);
+}
+
+static void ipa3_wq_repl_rx(struct work_struct *work)
+{
+	struct ipa3_sys_context *sys;
+	void *ptr;
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	gfp_t flag = GFP_KERNEL;
+	u32 next;
+	u32 curr;
+
+	sys = container_of(work, struct ipa3_sys_context, repl_work);
+	curr = atomic_read(&sys->repl.tail_idx);
+
+begin:
+	while (1) {
+		next = (curr + 1) % sys->repl.capacity;
+		if (next == atomic_read(&sys->repl.head_idx))
+			goto fail_kmem_cache_alloc;
+
+		rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
+					   flag);
+		if (!rx_pkt) {
+			pr_err_ratelimited("%s fail alloc rx wrapper sys=%p\n",
+					__func__, sys);
+			goto fail_kmem_cache_alloc;
+		}
+
+		INIT_LIST_HEAD(&rx_pkt->link);
+		INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+		rx_pkt->sys = sys;
+
+		rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
+		if (rx_pkt->data.skb == NULL) {
+			pr_err_ratelimited("%s fail alloc skb sys=%p\n",
+					__func__, sys);
+			goto fail_skb_alloc;
+		}
+		ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+		rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
+						     sys->rx_buff_sz,
+						     DMA_FROM_DEVICE);
+		if (rx_pkt->data.dma_addr == 0 ||
+				rx_pkt->data.dma_addr == ~0) {
+			pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n",
+			       __func__, (void *)rx_pkt->data.dma_addr,
+			       ptr, sys);
+			goto fail_dma_mapping;
+		}
+
+		sys->repl.cache[curr] = rx_pkt;
+		curr = next;
+		/* ensure write is done before setting tail index */
+		mb();
+		atomic_set(&sys->repl.tail_idx, next);
+	}
+
+	return;
+
+fail_dma_mapping:
+	sys->free_skb(rx_pkt->data.skb);
+fail_skb_alloc:
+	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+fail_kmem_cache_alloc:
+	if (atomic_read(&sys->repl.tail_idx) ==
+			atomic_read(&sys->repl.head_idx)) {
+		if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_repl_rx_empty);
+		else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_repl_rx_empty);
+		else
+			WARN_ON(1);
+		pr_err_ratelimited("%s sys=%p repl ring empty\n",
+				__func__, sys);
+		goto begin;
+	}
+}
+
+static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt = NULL;
+	struct ipa3_rx_pkt_wrapper *tmp;
+	int ret;
+	struct gsi_xfer_elem gsi_xfer_elem_one;
+	u32 rx_len_cached = 0;
+
+	IPADBG_LOW("\n");
+
+	spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+	rx_len_cached = sys->len;
+
+	if (rx_len_cached < sys->rx_pool_sz) {
+		list_for_each_entry_safe(rx_pkt, tmp,
+			&ipa3_ctx->wc_memb.wlan_comm_desc_list, link) {
+			list_del(&rx_pkt->link);
+
+			if (ipa3_ctx->wc_memb.wlan_comm_free_cnt > 0)
+				ipa3_ctx->wc_memb.wlan_comm_free_cnt--;
+
+			INIT_LIST_HEAD(&rx_pkt->link);
+			rx_pkt->len = 0;
+			rx_pkt->sys = sys;
+
+			list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+			if (ipa3_ctx->transport_prototype ==
+					IPA_TRANSPORT_TYPE_GSI) {
+				memset(&gsi_xfer_elem_one, 0,
+					sizeof(gsi_xfer_elem_one));
+				gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
+				gsi_xfer_elem_one.len = IPA_WLAN_RX_BUFF_SZ;
+				gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
+				gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
+				gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
+				gsi_xfer_elem_one.xfer_user_data = rx_pkt;
+
+				ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
+					&gsi_xfer_elem_one, true);
+			} else {
+				ret = sps_transfer_one(sys->ep->ep_hdl,
+					rx_pkt->data.dma_addr,
+					IPA_WLAN_RX_BUFF_SZ, rx_pkt, 0);
+			}
+
+			if (ret) {
+				IPAERR("failed to provide buffer: %d\n", ret);
+				goto fail_provide_rx_buffer;
+			}
+
+			rx_len_cached = ++sys->len;
+
+			if (rx_len_cached >= sys->rx_pool_sz) {
+				spin_unlock_bh(
+					&ipa3_ctx->wc_memb.wlan_spinlock);
+				return;
+			}
+		}
+	}
+	spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+
+	if (rx_len_cached < sys->rx_pool_sz &&
+			ipa3_ctx->wc_memb.wlan_comm_total_cnt <
+			 IPA_WLAN_COMM_RX_POOL_HIGH) {
+		ipa3_replenish_rx_cache(sys);
+		ipa3_ctx->wc_memb.wlan_comm_total_cnt +=
+			(sys->rx_pool_sz - rx_len_cached);
+	}
+
+	return;
+
+fail_provide_rx_buffer:
+	list_del(&rx_pkt->link);
+	spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+}
+
+static void ipa3_cleanup_wlan_rx_common_cache(void)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	struct ipa3_rx_pkt_wrapper *tmp;
+
+	list_for_each_entry_safe(rx_pkt, tmp,
+		&ipa3_ctx->wc_memb.wlan_comm_desc_list, link) {
+		list_del(&rx_pkt->link);
+		dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+				IPA_WLAN_COMM_RX_POOL_LOW, DMA_FROM_DEVICE);
+		dev_kfree_skb_any(rx_pkt->data.skb);
+		kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+		ipa3_ctx->wc_memb.wlan_comm_free_cnt--;
+		ipa3_ctx->wc_memb.wlan_comm_total_cnt--;
+	}
+	ipa3_ctx->wc_memb.total_tx_pkts_freed = 0;
+
+	if (ipa3_ctx->wc_memb.wlan_comm_free_cnt != 0)
+		IPAERR("wlan comm buff free cnt: %d\n",
+			ipa3_ctx->wc_memb.wlan_comm_free_cnt);
+
+	if (ipa3_ctx->wc_memb.wlan_comm_total_cnt != 0)
+		IPAERR("wlan comm buff total cnt: %d\n",
+			ipa3_ctx->wc_memb.wlan_comm_total_cnt);
+
+}
+
+static void ipa3_alloc_wlan_rx_common_cache(u32 size)
+{
+	void *ptr;
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	int rx_len_cached = 0;
+	gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+
+	rx_len_cached = ipa3_ctx->wc_memb.wlan_comm_total_cnt;
+	while (rx_len_cached < size) {
+		rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
+					   flag);
+		if (!rx_pkt) {
+			IPAERR("failed to alloc rx wrapper\n");
+			goto fail_kmem_cache_alloc;
+		}
+
+		INIT_LIST_HEAD(&rx_pkt->link);
+		INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+
+		rx_pkt->data.skb =
+			ipa3_get_skb_ipa_rx(IPA_WLAN_RX_BUFF_SZ,
+						flag);
+		if (rx_pkt->data.skb == NULL) {
+			IPAERR("failed to alloc skb\n");
+			goto fail_skb_alloc;
+		}
+		ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
+		rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
+				IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
+		if (rx_pkt->data.dma_addr == 0 ||
+				rx_pkt->data.dma_addr == ~0) {
+			IPAERR("dma_map_single failure %p for %p\n",
+			       (void *)rx_pkt->data.dma_addr, ptr);
+			goto fail_dma_mapping;
+		}
+
+		list_add_tail(&rx_pkt->link,
+			&ipa3_ctx->wc_memb.wlan_comm_desc_list);
+		rx_len_cached = ++ipa3_ctx->wc_memb.wlan_comm_total_cnt;
+
+		ipa3_ctx->wc_memb.wlan_comm_free_cnt++;
+
+	}
+
+	return;
+
+fail_dma_mapping:
+	dev_kfree_skb_any(rx_pkt->data.skb);
+fail_skb_alloc:
+	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+fail_kmem_cache_alloc:
+	return;
+}
+
+
+/**
+ * ipa3_replenish_rx_cache() - Replenish the Rx packets cache.
+ *
+ * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
+ * are IPA_RX_POOL_CEIL buffers in the cache.
+ *   - Allocate a buffer in the cache
+ *   - Initialized the packets link
+ *   - Initialize the packets work struct
+ *   - Allocate the packets socket buffer (skb)
+ *   - Fill the packets skb with data
+ *   - Make the packet DMAable
+ *   - Add the packet to the system pipe linked list
+ *   - Initiate a SPS transfer so that SPS driver will use this packet later.
+ */
+static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys)
+{
+	void *ptr;
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	int ret;
+	int rx_len_cached = 0;
+	struct gsi_xfer_elem gsi_xfer_elem_one;
+	gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+
+	rx_len_cached = sys->len;
+
+	while (rx_len_cached < sys->rx_pool_sz) {
+		rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
+					   flag);
+		if (!rx_pkt) {
+			IPAERR("failed to alloc rx wrapper\n");
+			goto fail_kmem_cache_alloc;
+		}
+
+		INIT_LIST_HEAD(&rx_pkt->link);
+		INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+		rx_pkt->sys = sys;
+
+		rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
+		if (rx_pkt->data.skb == NULL) {
+			IPAERR("failed to alloc skb\n");
+			goto fail_skb_alloc;
+		}
+		ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+		rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
+						     sys->rx_buff_sz,
+						     DMA_FROM_DEVICE);
+		if (rx_pkt->data.dma_addr == 0 ||
+				rx_pkt->data.dma_addr == ~0) {
+			IPAERR("dma_map_single failure %p for %p\n",
+			       (void *)rx_pkt->data.dma_addr, ptr);
+			goto fail_dma_mapping;
+		}
+
+		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+		rx_len_cached = ++sys->len;
+
+		if (ipa3_ctx->transport_prototype ==
+				IPA_TRANSPORT_TYPE_GSI) {
+			memset(&gsi_xfer_elem_one, 0,
+				sizeof(gsi_xfer_elem_one));
+			gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
+			gsi_xfer_elem_one.len = sys->rx_buff_sz;
+			gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
+			gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
+			gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
+			gsi_xfer_elem_one.xfer_user_data = rx_pkt;
+
+			ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl,
+					1, &gsi_xfer_elem_one, true);
+			if (ret != GSI_STATUS_SUCCESS) {
+				IPAERR("failed to provide buffer: %d\n",
+					ret);
+				goto fail_provide_rx_buffer;
+			}
+		} else {
+			ret = sps_transfer_one(sys->ep->ep_hdl,
+				rx_pkt->data.dma_addr, sys->rx_buff_sz,
+				rx_pkt, 0);
+
+			if (ret) {
+				IPAERR("sps_transfer_one failed %d\n", ret);
+				goto fail_provide_rx_buffer;
+			}
+		}
+	}
+
+	return;
+
+fail_provide_rx_buffer:
+	list_del(&rx_pkt->link);
+	rx_len_cached = --sys->len;
+	dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+			sys->rx_buff_sz, DMA_FROM_DEVICE);
+fail_dma_mapping:
+	sys->free_skb(rx_pkt->data.skb);
+fail_skb_alloc:
+	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+fail_kmem_cache_alloc:
+	if (rx_len_cached == 0)
+		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+				msecs_to_jiffies(1));
+}
+
+static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
+{
+	void *ptr;
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	int ret;
+	int rx_len_cached = 0;
+	struct gsi_xfer_elem gsi_xfer_elem_one;
+	gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+
+	rx_len_cached = sys->len;
+
+	while (rx_len_cached < sys->rx_pool_sz) {
+		if (list_empty(&sys->rcycl_list)) {
+			rx_pkt = kmem_cache_zalloc(
+				ipa3_ctx->rx_pkt_wrapper_cache, flag);
+			if (!rx_pkt) {
+				IPAERR("failed to alloc rx wrapper\n");
+				goto fail_kmem_cache_alloc;
+			}
+
+			INIT_LIST_HEAD(&rx_pkt->link);
+			INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+			rx_pkt->sys = sys;
+
+			rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
+			if (rx_pkt->data.skb == NULL) {
+				IPAERR("failed to alloc skb\n");
+				kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache,
+					rx_pkt);
+				goto fail_kmem_cache_alloc;
+			}
+			ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+			rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
+				ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
+			if (rx_pkt->data.dma_addr == 0 ||
+				rx_pkt->data.dma_addr == ~0) {
+				IPAERR("dma_map_single failure %p for %p\n",
+					(void *)rx_pkt->data.dma_addr, ptr);
+				goto fail_dma_mapping;
+			}
+		} else {
+			spin_lock_bh(&sys->spinlock);
+			rx_pkt = list_first_entry(&sys->rcycl_list,
+				struct ipa3_rx_pkt_wrapper, link);
+			list_del(&rx_pkt->link);
+			spin_unlock_bh(&sys->spinlock);
+			INIT_LIST_HEAD(&rx_pkt->link);
+			ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+			rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
+				ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
+			if (rx_pkt->data.dma_addr == 0 ||
+				rx_pkt->data.dma_addr == ~0) {
+				IPAERR("dma_map_single failure %p for %p\n",
+					(void *)rx_pkt->data.dma_addr, ptr);
+				goto fail_dma_mapping;
+			}
+		}
+
+		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+		rx_len_cached = ++sys->len;
+		if (ipa3_ctx->transport_prototype ==
+				IPA_TRANSPORT_TYPE_GSI) {
+			memset(&gsi_xfer_elem_one, 0,
+				sizeof(gsi_xfer_elem_one));
+			gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
+			gsi_xfer_elem_one.len = sys->rx_buff_sz;
+			gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
+			gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
+			gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
+			gsi_xfer_elem_one.xfer_user_data = rx_pkt;
+
+			ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl,
+					1, &gsi_xfer_elem_one, true);
+			if (ret != GSI_STATUS_SUCCESS) {
+				IPAERR("failed to provide buffer: %d\n",
+					ret);
+				goto fail_provide_rx_buffer;
+			}
+		} else {
+			ret = sps_transfer_one(sys->ep->ep_hdl,
+				rx_pkt->data.dma_addr, sys->rx_buff_sz,
+				rx_pkt, 0);
+
+			if (ret) {
+				IPAERR("sps_transfer_one failed %d\n", ret);
+				goto fail_provide_rx_buffer;
+			}
+		}
+	}
+
+	return;
+fail_provide_rx_buffer:
+	rx_len_cached = --sys->len;
+	list_del(&rx_pkt->link);
+	INIT_LIST_HEAD(&rx_pkt->link);
+	dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+		sys->rx_buff_sz, DMA_FROM_DEVICE);
+fail_dma_mapping:
+	spin_lock_bh(&sys->spinlock);
+	list_add_tail(&rx_pkt->link, &sys->rcycl_list);
+	INIT_LIST_HEAD(&rx_pkt->link);
+	spin_unlock_bh(&sys->spinlock);
+fail_kmem_cache_alloc:
+	if (rx_len_cached == 0)
+		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+		msecs_to_jiffies(1));
+}
+
+static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	int ret;
+	int rx_len_cached = 0;
+	struct gsi_xfer_elem gsi_xfer_elem_one;
+	u32 curr;
+
+	rx_len_cached = sys->len;
+	curr = atomic_read(&sys->repl.head_idx);
+
+	while (rx_len_cached < sys->rx_pool_sz) {
+		if (curr == atomic_read(&sys->repl.tail_idx))
+			break;
+
+		rx_pkt = sys->repl.cache[curr];
+		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+
+		if (ipa3_ctx->transport_prototype ==
+				IPA_TRANSPORT_TYPE_GSI) {
+			memset(&gsi_xfer_elem_one, 0,
+				sizeof(gsi_xfer_elem_one));
+			gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
+			gsi_xfer_elem_one.len = sys->rx_buff_sz;
+			gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
+			gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
+			gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
+			gsi_xfer_elem_one.xfer_user_data = rx_pkt;
+
+			ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
+				&gsi_xfer_elem_one, true);
+			if (ret != GSI_STATUS_SUCCESS) {
+				IPAERR("failed to provide buffer: %d\n",
+					ret);
+				break;
+			}
+		} else {
+			ret = sps_transfer_one(sys->ep->ep_hdl,
+				rx_pkt->data.dma_addr, sys->rx_buff_sz,
+				rx_pkt, 0);
+
+			if (ret) {
+				IPAERR("sps_transfer_one failed %d\n", ret);
+				list_del(&rx_pkt->link);
+				break;
+			}
+		}
+		rx_len_cached = ++sys->len;
+		curr = (curr + 1) % sys->repl.capacity;
+		/* ensure write is done before setting head index */
+		mb();
+		atomic_set(&sys->repl.head_idx, curr);
+	}
+
+	queue_work(sys->repl_wq, &sys->repl_work);
+
+	if (rx_len_cached <= IPA_DEFAULT_SYS_YELLOW_WM) {
+		if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_rx_empty);
+		else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty);
+		else
+			WARN_ON(1);
+		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+				msecs_to_jiffies(1));
+	}
+}
+
+static void ipa3_replenish_rx_work_func(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct ipa3_sys_context *sys;
+
+	dwork = container_of(work, struct delayed_work, work);
+	sys = container_of(dwork, struct ipa3_sys_context, replenish_rx_work);
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	sys->repl_hdlr(sys);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+/**
+ * ipa3_cleanup_rx() - release RX queue resources
+ *
+ */
+static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	struct ipa3_rx_pkt_wrapper *r;
+	u32 head;
+	u32 tail;
+
+	list_for_each_entry_safe(rx_pkt, r,
+				 &sys->head_desc_list, link) {
+		list_del(&rx_pkt->link);
+		dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+			sys->rx_buff_sz, DMA_FROM_DEVICE);
+		sys->free_skb(rx_pkt->data.skb);
+		kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+	}
+
+	list_for_each_entry_safe(rx_pkt, r,
+				 &sys->rcycl_list, link) {
+		list_del(&rx_pkt->link);
+		dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+			sys->rx_buff_sz, DMA_FROM_DEVICE);
+		sys->free_skb(rx_pkt->data.skb);
+		kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+	}
+
+	if (sys->repl.cache) {
+		head = atomic_read(&sys->repl.head_idx);
+		tail = atomic_read(&sys->repl.tail_idx);
+		while (head != tail) {
+			rx_pkt = sys->repl.cache[head];
+			dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+					sys->rx_buff_sz, DMA_FROM_DEVICE);
+			sys->free_skb(rx_pkt->data.skb);
+			kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+			head = (head + 1) % sys->repl.capacity;
+		}
+		kfree(sys->repl.cache);
+	}
+}
+
+static struct sk_buff *ipa3_skb_copy_for_client(struct sk_buff *skb, int len)
+{
+	struct sk_buff *skb2 = NULL;
+
+	skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM, GFP_KERNEL);
+	if (likely(skb2)) {
+		/* Set the data pointer */
+		skb_reserve(skb2, IPA_RX_BUFF_CLIENT_HEADROOM);
+		memcpy(skb2->data, skb->data, len);
+		skb2->len = len;
+		skb_set_tail_pointer(skb2, len);
+	}
+
+	return skb2;
+}
+
+static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
+		struct ipa3_sys_context *sys)
+{
+	int rc = 0;
+	struct ipahal_pkt_status status;
+	u32 pkt_status_sz;
+	struct sk_buff *skb2;
+	int pad_len_byte;
+	int len;
+	unsigned char *buf;
+	int src_pipe;
+	unsigned int used = *(unsigned int *)skb->cb;
+	unsigned int used_align = ALIGN(used, 32);
+	unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
+	struct ipa3_tx_pkt_wrapper *tx_pkt = NULL;
+	unsigned long ptr;
+
+	IPA_DUMP_BUFF(skb->data, 0, skb->len);
+
+	if (skb->len == 0) {
+		IPAERR("ZLT\n");
+		return rc;
+	}
+
+	if (sys->len_partial) {
+		IPADBG_LOW("len_partial %d\n", sys->len_partial);
+		buf = skb_push(skb, sys->len_partial);
+		memcpy(buf, sys->prev_skb->data, sys->len_partial);
+		sys->len_partial = 0;
+		sys->free_skb(sys->prev_skb);
+		sys->prev_skb = NULL;
+		goto begin;
+	}
+
+	/* this pipe has TX comp (status only) + mux-ed LAN RX data
+	 * (status+data)
+	 */
+	if (sys->len_rem) {
+		IPADBG_LOW("rem %d skb %d pad %d\n", sys->len_rem, skb->len,
+				sys->len_pad);
+		if (sys->len_rem <= skb->len) {
+			if (sys->prev_skb) {
+				skb2 = skb_copy_expand(sys->prev_skb, 0,
+						sys->len_rem, GFP_KERNEL);
+				if (likely(skb2)) {
+					memcpy(skb_put(skb2, sys->len_rem),
+						skb->data, sys->len_rem);
+					skb_trim(skb2,
+						skb2->len - sys->len_pad);
+					skb2->truesize = skb2->len +
+						sizeof(struct sk_buff);
+					if (sys->drop_packet)
+						dev_kfree_skb_any(skb2);
+					else
+						sys->ep->client_notify(
+							sys->ep->priv,
+							IPA_RECEIVE,
+							(unsigned long)(skb2));
+				} else {
+					IPAERR("copy expand failed\n");
+				}
+				dev_kfree_skb_any(sys->prev_skb);
+			}
+			skb_pull(skb, sys->len_rem);
+			sys->prev_skb = NULL;
+			sys->len_rem = 0;
+			sys->len_pad = 0;
+		} else {
+			if (sys->prev_skb) {
+				skb2 = skb_copy_expand(sys->prev_skb, 0,
+					skb->len, GFP_KERNEL);
+				if (likely(skb2)) {
+					memcpy(skb_put(skb2, skb->len),
+						skb->data, skb->len);
+				} else {
+					IPAERR("copy expand failed\n");
+				}
+				dev_kfree_skb_any(sys->prev_skb);
+				sys->prev_skb = skb2;
+			}
+			sys->len_rem -= skb->len;
+			return rc;
+		}
+	}
+
+begin:
+	pkt_status_sz = ipahal_pkt_status_get_size();
+	while (skb->len) {
+		sys->drop_packet = false;
+		IPADBG_LOW("LEN_REM %d\n", skb->len);
+
+		if (skb->len < pkt_status_sz) {
+			WARN_ON(sys->prev_skb != NULL);
+			IPADBG_LOW("status straddles buffer\n");
+			sys->prev_skb = skb_copy(skb, GFP_KERNEL);
+			sys->len_partial = skb->len;
+			return rc;
+		}
+
+		ipahal_pkt_status_parse(skb->data, &status);
+		IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n",
+				status.status_opcode, status.endp_src_idx,
+				status.endp_dest_idx, status.pkt_len);
+		if (sys->status_stat) {
+			sys->status_stat->status[sys->status_stat->curr] =
+				status;
+			sys->status_stat->curr++;
+			if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
+				sys->status_stat->curr = 0;
+		}
+
+		if ((status.status_opcode !=
+			IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET) &&
+			(status.status_opcode !=
+			IPAHAL_PKT_STATUS_OPCODE_PACKET) &&
+			(status.status_opcode !=
+			IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET) &&
+			(status.status_opcode !=
+			IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS)) {
+			IPAERR("unsupported opcode(%d)\n",
+				status.status_opcode);
+			skb_pull(skb, pkt_status_sz);
+			continue;
+		}
+		IPA_STATS_EXCP_CNT(status.exception,
+				ipa3_ctx->stats.rx_excp_pkts);
+		if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes ||
+			status.endp_src_idx >= ipa3_ctx->ipa_num_pipes) {
+			IPAERR("status fields invalid\n");
+			IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n",
+				status.status_opcode, status.endp_src_idx,
+				status.endp_dest_idx, status.pkt_len);
+			WARN_ON(1);
+			BUG();
+		}
+		if (IPAHAL_PKT_STATUS_MASK_FLAG_VAL(
+			IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT, &status)) {
+			struct ipa3_tag_completion *comp;
+
+			IPADBG_LOW("TAG packet arrived\n");
+			if (status.tag_info == IPA_COOKIE) {
+				skb_pull(skb, pkt_status_sz);
+				if (skb->len < sizeof(comp)) {
+					IPAERR("TAG arrived without packet\n");
+					return rc;
+				}
+				memcpy(&comp, skb->data, sizeof(comp));
+				skb_pull(skb, sizeof(comp) +
+						IPA_SIZE_DL_CSUM_META_TRAILER);
+				complete(&comp->comp);
+				if (atomic_dec_return(&comp->cnt) == 0)
+					kfree(comp);
+				continue;
+			} else {
+				ptr = tag_to_pointer_wa(status.tag_info);
+				tx_pkt = (struct ipa3_tx_pkt_wrapper *)ptr;
+				IPADBG_LOW("tx_pkt recv = %p\n", tx_pkt);
+			}
+		}
+		if (status.pkt_len == 0) {
+			IPADBG_LOW("Skip aggr close status\n");
+			skb_pull(skb, pkt_status_sz);
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.aggr_close);
+			IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts
+				[IPAHAL_PKT_STATUS_EXCEPTION_NONE]);
+			continue;
+		}
+
+		if (status.endp_dest_idx == (sys->ep - ipa3_ctx->ep)) {
+			/* RX data */
+			src_pipe = status.endp_src_idx;
+
+			/*
+			 * A packet which is received back to the AP after
+			 * there was no route match.
+			 */
+			if (status.exception ==
+				IPAHAL_PKT_STATUS_EXCEPTION_NONE &&
+				ipahal_is_rule_miss_id(status.rt_rule_id))
+				sys->drop_packet = true;
+
+			if (skb->len == pkt_status_sz &&
+				status.exception ==
+				IPAHAL_PKT_STATUS_EXCEPTION_NONE) {
+				WARN_ON(sys->prev_skb != NULL);
+				IPADBG_LOW("Ins header in next buffer\n");
+				sys->prev_skb = skb_copy(skb, GFP_KERNEL);
+				sys->len_partial = skb->len;
+				return rc;
+			}
+
+			pad_len_byte = ((status.pkt_len + 3) & ~3) -
+					status.pkt_len;
+
+			len = status.pkt_len + pad_len_byte +
+				IPA_SIZE_DL_CSUM_META_TRAILER;
+			IPADBG_LOW("pad %d pkt_len %d len %d\n", pad_len_byte,
+					status.pkt_len, len);
+
+			if (status.exception ==
+					IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR) {
+				IPADBG_LOW(
+					"Dropping packet on DeAggr Exception\n");
+				sys->drop_packet = true;
+			}
+
+			skb2 = ipa3_skb_copy_for_client(skb,
+				min(status.pkt_len + pkt_status_sz, skb->len));
+			if (likely(skb2)) {
+				if (skb->len < len + pkt_status_sz) {
+					IPADBG_LOW("SPL skb len %d len %d\n",
+							skb->len, len);
+					sys->prev_skb = skb2;
+					sys->len_rem = len - skb->len +
+						pkt_status_sz;
+					sys->len_pad = pad_len_byte;
+					skb_pull(skb, skb->len);
+				} else {
+					skb_trim(skb2, status.pkt_len +
+							pkt_status_sz);
+					IPADBG_LOW("rx avail for %d\n",
+							status.endp_dest_idx);
+					if (sys->drop_packet) {
+						dev_kfree_skb_any(skb2);
+					} else if (status.pkt_len >
+						   IPA_GENERIC_AGGR_BYTE_LIMIT *
+						   1024) {
+						IPAERR("packet size invalid\n");
+						IPAERR("STATUS opcode=%d\n",
+							status.status_opcode);
+						IPAERR("src=%d dst=%d len=%d\n",
+							status.endp_src_idx,
+							status.endp_dest_idx,
+							status.pkt_len);
+						BUG();
+					} else {
+					skb2->truesize = skb2->len +
+						sizeof(struct sk_buff) +
+						(ALIGN(len +
+						pkt_status_sz, 32) *
+						unused / used_align);
+						sys->ep->client_notify(
+							sys->ep->priv,
+							IPA_RECEIVE,
+							(unsigned long)(skb2));
+					}
+					skb_pull(skb, len + pkt_status_sz);
+				}
+			} else {
+				IPAERR("fail to alloc skb\n");
+				if (skb->len < len) {
+					sys->prev_skb = NULL;
+					sys->len_rem = len - skb->len +
+						pkt_status_sz;
+					sys->len_pad = pad_len_byte;
+					skb_pull(skb, skb->len);
+				} else {
+					skb_pull(skb, len + pkt_status_sz);
+				}
+			}
+			/* TX comp */
+			ipa3_wq_write_done_status(src_pipe, tx_pkt);
+			IPADBG_LOW("tx comp imp for %d\n", src_pipe);
+		} else {
+			/* TX comp */
+			ipa3_wq_write_done_status(status.endp_src_idx, tx_pkt);
+			IPADBG_LOW("tx comp exp for %d\n",
+				status.endp_src_idx);
+			skb_pull(skb, pkt_status_sz);
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.stat_compl);
+			IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts
+				[IPAHAL_PKT_STATUS_EXCEPTION_NONE]);
+		}
+	};
+
+	return rc;
+}
+
+static struct sk_buff *ipa3_join_prev_skb(struct sk_buff *prev_skb,
+		struct sk_buff *skb, unsigned int len)
+{
+	struct sk_buff *skb2;
+
+	skb2 = skb_copy_expand(prev_skb, 0,
+			len, GFP_KERNEL);
+	if (likely(skb2)) {
+		memcpy(skb_put(skb2, len),
+			skb->data, len);
+	} else {
+		IPAERR("copy expand failed\n");
+		skb2 = NULL;
+	}
+	dev_kfree_skb_any(prev_skb);
+
+	return skb2;
+}
+
+static void ipa3_wan_rx_handle_splt_pyld(struct sk_buff *skb,
+		struct ipa3_sys_context *sys)
+{
+	struct sk_buff *skb2;
+
+	IPADBG_LOW("rem %d skb %d\n", sys->len_rem, skb->len);
+	if (sys->len_rem <= skb->len) {
+		if (sys->prev_skb) {
+			skb2 = ipa3_join_prev_skb(sys->prev_skb, skb,
+					sys->len_rem);
+			if (likely(skb2)) {
+				IPADBG_LOW(
+					"removing Status element from skb and sending to WAN client");
+				skb_pull(skb2, ipahal_pkt_status_get_size());
+				skb2->truesize = skb2->len +
+					sizeof(struct sk_buff);
+				sys->ep->client_notify(sys->ep->priv,
+					IPA_RECEIVE,
+					(unsigned long)(skb2));
+			}
+		}
+		skb_pull(skb, sys->len_rem);
+		sys->prev_skb = NULL;
+		sys->len_rem = 0;
+	} else {
+		if (sys->prev_skb) {
+			skb2 = ipa3_join_prev_skb(sys->prev_skb, skb,
+					skb->len);
+			sys->prev_skb = skb2;
+		}
+		sys->len_rem -= skb->len;
+		skb_pull(skb, skb->len);
+	}
+}
+
+static int ipa3_wan_rx_pyld_hdlr(struct sk_buff *skb,
+		struct ipa3_sys_context *sys)
+{
+	int rc = 0;
+	struct ipahal_pkt_status status;
+	unsigned char *skb_data;
+	u32 pkt_status_sz;
+	struct sk_buff *skb2;
+	u16 pkt_len_with_pad;
+	u32 qmap_hdr;
+	int checksum_trailer_exists;
+	int frame_len;
+	int ep_idx;
+	unsigned int used = *(unsigned int *)skb->cb;
+	unsigned int used_align = ALIGN(used, 32);
+	unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
+
+	IPA_DUMP_BUFF(skb->data, 0, skb->len);
+	if (skb->len == 0) {
+		IPAERR("ZLT\n");
+		goto bail;
+	}
+
+	if (ipa3_ctx->ipa_client_apps_wan_cons_agg_gro) {
+		sys->ep->client_notify(sys->ep->priv,
+			IPA_RECEIVE, (unsigned long)(skb));
+		return rc;
+	}
+	if (sys->repl_hdlr == ipa3_replenish_rx_cache_recycle) {
+		IPAERR("Recycle should enable only with GRO Aggr\n");
+		ipa_assert();
+	}
+
+	/*
+	 * payload splits across 2 buff or more,
+	 * take the start of the payload from prev_skb
+	 */
+	if (sys->len_rem)
+		ipa3_wan_rx_handle_splt_pyld(skb, sys);
+
+	pkt_status_sz = ipahal_pkt_status_get_size();
+	while (skb->len) {
+		IPADBG_LOW("LEN_REM %d\n", skb->len);
+		if (skb->len < pkt_status_sz) {
+			IPAERR("status straddles buffer\n");
+			WARN_ON(1);
+			goto bail;
+		}
+		ipahal_pkt_status_parse(skb->data, &status);
+		skb_data = skb->data;
+		IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n",
+				status.status_opcode, status.endp_src_idx,
+				status.endp_dest_idx, status.pkt_len);
+
+		if (sys->status_stat) {
+			sys->status_stat->status[sys->status_stat->curr] =
+				status;
+			sys->status_stat->curr++;
+			if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
+				sys->status_stat->curr = 0;
+		}
+
+		if ((status.status_opcode !=
+			IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET) &&
+			(status.status_opcode !=
+			IPAHAL_PKT_STATUS_OPCODE_PACKET) &&
+			(status.status_opcode !=
+			IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS)) {
+			IPAERR("unsupported opcode(%d)\n",
+				status.status_opcode);
+			skb_pull(skb, pkt_status_sz);
+			continue;
+		}
+
+		IPA_STATS_INC_CNT(ipa3_ctx->stats.rx_pkts);
+		if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes ||
+			status.endp_src_idx >= ipa3_ctx->ipa_num_pipes ||
+			status.pkt_len > IPA_GENERIC_AGGR_BYTE_LIMIT * 1024) {
+			IPAERR("status fields invalid\n");
+			WARN_ON(1);
+			goto bail;
+		}
+		if (status.pkt_len == 0) {
+			IPADBG_LOW("Skip aggr close status\n");
+			skb_pull(skb, pkt_status_sz);
+			IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_pkts);
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_aggr_close);
+			continue;
+		}
+		ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+		if (status.endp_dest_idx != ep_idx) {
+			IPAERR("expected endp_dest_idx %d received %d\n",
+					ep_idx, status.endp_dest_idx);
+			WARN_ON(1);
+			goto bail;
+		}
+		/* RX data */
+		if (skb->len == pkt_status_sz) {
+			IPAERR("Ins header in next buffer\n");
+			WARN_ON(1);
+			goto bail;
+		}
+		qmap_hdr = *(u32 *)(skb_data + pkt_status_sz);
+		/*
+		 * Take the pkt_len_with_pad from the last 2 bytes of the QMAP
+		 * header
+		 */
+
+		/*QMAP is BE: convert the pkt_len field from BE to LE*/
+		pkt_len_with_pad = ntohs((qmap_hdr>>16) & 0xffff);
+		IPADBG_LOW("pkt_len with pad %d\n", pkt_len_with_pad);
+		/*get the CHECKSUM_PROCESS bit*/
+		checksum_trailer_exists = IPAHAL_PKT_STATUS_MASK_FLAG_VAL(
+			IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT, &status);
+		IPADBG_LOW("checksum_trailer_exists %d\n",
+				checksum_trailer_exists);
+
+		frame_len = pkt_status_sz + IPA_QMAP_HEADER_LENGTH +
+			    pkt_len_with_pad;
+		if (checksum_trailer_exists)
+			frame_len += IPA_DL_CHECKSUM_LENGTH;
+		IPADBG_LOW("frame_len %d\n", frame_len);
+
+		skb2 = skb_clone(skb, GFP_KERNEL);
+		if (likely(skb2)) {
+			/*
+			 * the len of actual data is smaller than expected
+			 * payload split across 2 buff
+			 */
+			if (skb->len < frame_len) {
+				IPADBG_LOW("SPL skb len %d len %d\n",
+						skb->len, frame_len);
+				sys->prev_skb = skb2;
+				sys->len_rem = frame_len - skb->len;
+				skb_pull(skb, skb->len);
+			} else {
+				skb_trim(skb2, frame_len);
+				IPADBG_LOW("rx avail for %d\n",
+						status.endp_dest_idx);
+				IPADBG_LOW(
+					"removing Status element from skb and sending to WAN client");
+				skb_pull(skb2, pkt_status_sz);
+				skb2->truesize = skb2->len +
+					sizeof(struct sk_buff) +
+					(ALIGN(frame_len, 32) *
+					 unused / used_align);
+				sys->ep->client_notify(sys->ep->priv,
+					IPA_RECEIVE, (unsigned long)(skb2));
+				skb_pull(skb, frame_len);
+			}
+		} else {
+			IPAERR("fail to clone\n");
+			if (skb->len < frame_len) {
+				sys->prev_skb = NULL;
+				sys->len_rem = frame_len - skb->len;
+				skb_pull(skb, skb->len);
+			} else {
+				skb_pull(skb, frame_len);
+			}
+		}
+	};
+bail:
+	sys->free_skb(skb);
+	return rc;
+}
+
+static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags)
+{
+	return __dev_alloc_skb(len, flags);
+}
+
+static void ipa3_free_skb_rx(struct sk_buff *skb)
+{
+	dev_kfree_skb_any(skb);
+}
+
+void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
+{
+	struct sk_buff *rx_skb = (struct sk_buff *)data;
+	struct ipahal_pkt_status status;
+	struct ipa3_ep_context *ep;
+	unsigned int src_pipe;
+	u32 metadata;
+
+	ipahal_pkt_status_parse(rx_skb->data, &status);
+	src_pipe = status.endp_src_idx;
+	metadata = status.metadata;
+	ep = &ipa3_ctx->ep[src_pipe];
+	if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes ||
+		!ep->valid ||
+		!ep->client_notify)) {
+		IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
+		  src_pipe, ep->valid, ep->client_notify);
+		dev_kfree_skb_any(rx_skb);
+		return;
+	}
+	if (status.exception == IPAHAL_PKT_STATUS_EXCEPTION_NONE)
+		skb_pull(rx_skb, ipahal_pkt_status_get_size() +
+				IPA_LAN_RX_HEADER_LENGTH);
+	else
+		skb_pull(rx_skb, ipahal_pkt_status_get_size());
+
+	/* Metadata Info
+	 *  ------------------------------------------
+	 *  |   3     |   2     |    1        |  0   |
+	 *  | fw_desc | vdev_id | qmap mux id | Resv |
+	 *  ------------------------------------------
+	 */
+	*(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF);
+	IPADBG_LOW("meta_data: 0x%x cb: 0x%x\n",
+			metadata, *(u32 *)rx_skb->cb);
+
+	ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
+}
+
+static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
+{
+	rx_pkt->data.dma_addr = 0;
+	ipa3_skb_recycle(rx_pkt->data.skb);
+	INIT_LIST_HEAD(&rx_pkt->link);
+	spin_lock_bh(&rx_pkt->sys->spinlock);
+	list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list);
+	spin_unlock_bh(&rx_pkt->sys->spinlock);
+}
+
+void ipa3_recycle_wan_skb(struct sk_buff *skb)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	int ep_idx = ipa3_get_ep_mapping(
+	   IPA_CLIENT_APPS_WAN_CONS);
+	gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+
+	if (unlikely(ep_idx == -1)) {
+		IPAERR("dest EP does not exist\n");
+		ipa_assert();
+	}
+
+	rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
+					flag);
+	if (!rx_pkt)
+		ipa_assert();
+
+	INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+	rx_pkt->sys = ipa3_ctx->ep[ep_idx].sys;
+
+	rx_pkt->data.skb = skb;
+	ipa3_recycle_rx_wrapper(rx_pkt);
+}
+
+static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt_expected;
+	struct sk_buff *rx_skb;
+
+	if (unlikely(list_empty(&sys->head_desc_list))) {
+		WARN_ON(1);
+		return;
+	}
+	rx_pkt_expected = list_first_entry(&sys->head_desc_list,
+					   struct ipa3_rx_pkt_wrapper,
+					   link);
+	list_del(&rx_pkt_expected->link);
+	sys->len--;
+	if (size)
+		rx_pkt_expected->len = size;
+	rx_skb = rx_pkt_expected->data.skb;
+	dma_unmap_single(ipa3_ctx->pdev, rx_pkt_expected->data.dma_addr,
+			sys->rx_buff_sz, DMA_FROM_DEVICE);
+	skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
+	rx_skb->len = rx_pkt_expected->len;
+	*(unsigned int *)rx_skb->cb = rx_skb->len;
+	rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
+	sys->pyld_hdlr(rx_skb, sys);
+	sys->free_rx_wrapper(rx_pkt_expected);
+	sys->repl_hdlr(sys);
+}
+
+static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys, u32 size)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt_expected;
+	struct sk_buff *rx_skb;
+
+	if (unlikely(list_empty(&sys->head_desc_list))) {
+		WARN_ON(1);
+		return;
+	}
+	rx_pkt_expected = list_first_entry(&sys->head_desc_list,
+					   struct ipa3_rx_pkt_wrapper,
+					   link);
+	list_del(&rx_pkt_expected->link);
+	sys->len--;
+
+	if (size)
+		rx_pkt_expected->len = size;
+
+	rx_skb = rx_pkt_expected->data.skb;
+	skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
+	rx_skb->len = rx_pkt_expected->len;
+	rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
+	sys->ep->wstats.tx_pkts_rcvd++;
+	if (sys->len <= IPA_WLAN_RX_POOL_SZ_LOW_WM) {
+		ipa3_free_skb(&rx_pkt_expected->data);
+		sys->ep->wstats.tx_pkts_dropped++;
+	} else {
+		sys->ep->wstats.tx_pkts_sent++;
+		sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
+				(unsigned long)(&rx_pkt_expected->data));
+	}
+	ipa3_replenish_wlan_rx_cache(sys);
+}
+
+static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys,
+	struct ipa_mem_buffer *mem_info)
+{
+	IPADBG_LOW("ENTER.\n");
+	if (unlikely(list_empty(&sys->head_desc_list))) {
+		IPAERR("descriptor list is empty!\n");
+		WARN_ON(1);
+		return;
+	}
+	sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
+				(unsigned long)(mem_info));
+	IPADBG_LOW("EXIT\n");
+}
+
+static void ipa3_wq_rx_avail(struct work_struct *work)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	struct ipa3_sys_context *sys;
+
+	rx_pkt = container_of(work, struct ipa3_rx_pkt_wrapper, work);
+	if (unlikely(rx_pkt == NULL))
+		WARN_ON(1);
+	sys = rx_pkt->sys;
+	ipa3_wq_rx_common(sys, 0);
+}
+
+/**
+ * ipa3_sps_irq_rx_no_aggr_notify() - Callback function which will be called by
+ * the SPS driver after a Rx operation is complete.
+ * Called in an interrupt context.
+ * @notify:	SPS driver supplied notification struct
+ *
+ * This function defer the work for this event to a workqueue.
+ */
+void ipa3_sps_irq_rx_no_aggr_notify(struct sps_event_notify *notify)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+
+	switch (notify->event_id) {
+	case SPS_EVENT_EOT:
+		rx_pkt = notify->data.transfer.user;
+		if (IPA_CLIENT_IS_APPS_CONS(rx_pkt->sys->ep->client))
+			atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+		rx_pkt->len = notify->data.transfer.iovec.size;
+		IPADBG_LOW("event %d notified sys=%p len=%u\n",
+				notify->event_id,
+				notify->user, rx_pkt->len);
+		queue_work(rx_pkt->sys->wq, &rx_pkt->work);
+		break;
+	default:
+		IPAERR("received unexpected event id %d sys=%p\n",
+				notify->event_id, notify->user);
+	}
+}
+
+static int ipa3_odu_rx_pyld_hdlr(struct sk_buff *rx_skb,
+	struct ipa3_sys_context *sys)
+{
+	if (sys->ep->client_notify) {
+		sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
+			(unsigned long)(rx_skb));
+	} else {
+		dev_kfree_skb_any(rx_skb);
+		WARN_ON(1);
+	}
+
+	return 0;
+}
+
+static void ipa3_free_rx_wrapper(struct ipa3_rx_pkt_wrapper *rk_pkt)
+{
+	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rk_pkt);
+}
+
+static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
+		struct ipa3_sys_context *sys)
+{
+	if (in->client == IPA_CLIENT_APPS_CMD_PROD) {
+		sys->policy = IPA_POLICY_INTR_MODE;
+		sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT);
+		sys->sps_callback = ipa3_sps_irq_tx_no_aggr_notify;
+		return 0;
+	}
+
+	if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client)) {
+		sys->policy = IPA_POLICY_NOINTR_MODE;
+		sys->sps_option = SPS_O_AUTO_ENABLE;
+		sys->sps_callback = NULL;
+		return 0;
+	}
+
+	if (IPA_CLIENT_IS_PROD(in->client)) {
+		if (sys->ep->skip_ep_cfg) {
+			sys->policy = IPA_POLICY_INTR_POLL_MODE;
+			sys->sps_option = (SPS_O_AUTO_ENABLE|
+				SPS_O_EOT | SPS_O_ACK_TRANSFERS);
+			sys->sps_callback = ipa3_sps_irq_tx_notify;
+			INIT_WORK(&sys->work, ipa3_wq_handle_tx);
+			INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+				ipa3_switch_to_intr_tx_work_func);
+			atomic_set(&sys->curr_polling_state, 0);
+		} else {
+			sys->policy = IPA_POLICY_NOINTR_MODE;
+			sys->sps_option = SPS_O_AUTO_ENABLE;
+			sys->sps_callback = NULL;
+			sys->ep->status.status_en = true;
+			sys->ep->status.status_ep = ipa3_get_ep_mapping(
+					IPA_CLIENT_APPS_LAN_CONS);
+		}
+	} else {
+		if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
+		    in->client == IPA_CLIENT_APPS_WAN_CONS) {
+			sys->ep->status.status_en = true;
+			sys->policy = IPA_POLICY_INTR_POLL_MODE;
+			sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+					| SPS_O_ACK_TRANSFERS);
+			sys->sps_callback = ipa3_sps_irq_rx_notify;
+			INIT_WORK(&sys->work, ipa3_wq_handle_rx);
+			INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+				ipa3_switch_to_intr_rx_work_func);
+			INIT_DELAYED_WORK(&sys->replenish_rx_work,
+					ipa3_replenish_rx_work_func);
+			INIT_WORK(&sys->repl_work, ipa3_wq_repl_rx);
+			atomic_set(&sys->curr_polling_state, 0);
+			sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
+				IPA_GENERIC_RX_BUFF_BASE_SZ);
+			sys->get_skb = ipa3_get_skb_ipa_rx;
+			sys->free_skb = ipa3_free_skb_rx;
+			in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
+			in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
+			in->ipa_ep_cfg.aggr.aggr_time_limit =
+				IPA_GENERIC_AGGR_TIME_LIMIT;
+			if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
+				sys->pyld_hdlr = ipa3_lan_rx_pyld_hdlr;
+				sys->repl_hdlr =
+					ipa3_replenish_rx_cache_recycle;
+				sys->free_rx_wrapper =
+					ipa3_recycle_rx_wrapper;
+				sys->rx_pool_sz =
+					ipa3_ctx->lan_rx_ring_size;
+				in->ipa_ep_cfg.aggr.aggr_byte_limit =
+				IPA_GENERIC_AGGR_BYTE_LIMIT;
+				in->ipa_ep_cfg.aggr.aggr_pkt_limit =
+				IPA_GENERIC_AGGR_PKT_LIMIT;
+			} else if (in->client ==
+					IPA_CLIENT_APPS_WAN_CONS) {
+				sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
+				sys->free_rx_wrapper = ipa3_free_rx_wrapper;
+				if (in->napi_enabled) {
+					sys->repl_hdlr =
+					   ipa3_replenish_rx_cache_recycle;
+					sys->rx_pool_sz =
+					   IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+				} else {
+					if (nr_cpu_ids > 1) {
+						sys->repl_hdlr =
+						   ipa3_fast_replenish_rx_cache;
+					} else {
+						sys->repl_hdlr =
+						   ipa3_replenish_rx_cache;
+					}
+					sys->rx_pool_sz =
+					   ipa3_ctx->wan_rx_ring_size;
+				}
+				in->ipa_ep_cfg.aggr.aggr_sw_eof_active
+					= true;
+				if (ipa3_ctx->
+				ipa_client_apps_wan_cons_agg_gro) {
+					IPAERR("get close-by %u\n",
+					ipa_adjust_ra_buff_base_sz(
+					in->ipa_ep_cfg.aggr.
+					aggr_byte_limit));
+					IPAERR("set rx_buff_sz %lu\n",
+					(unsigned long int)
+					IPA_GENERIC_RX_BUFF_SZ(
+					ipa_adjust_ra_buff_base_sz(
+					in->ipa_ep_cfg.
+						aggr.aggr_byte_limit)));
+					/* disable ipa_status */
+					sys->ep->status.
+						status_en = false;
+					sys->rx_buff_sz =
+					IPA_GENERIC_RX_BUFF_SZ(
+					ipa_adjust_ra_buff_base_sz(
+					in->ipa_ep_cfg.aggr.
+						aggr_byte_limit));
+					in->ipa_ep_cfg.aggr.
+						aggr_byte_limit =
+					sys->rx_buff_sz < in->
+					ipa_ep_cfg.aggr.
+					aggr_byte_limit ?
+					IPA_ADJUST_AGGR_BYTE_LIMIT(
+					sys->rx_buff_sz) :
+					IPA_ADJUST_AGGR_BYTE_LIMIT(
+					in->ipa_ep_cfg.
+					aggr.aggr_byte_limit);
+					IPAERR("set aggr_limit %lu\n",
+					(unsigned long int)
+					in->ipa_ep_cfg.aggr.
+					aggr_byte_limit);
+				} else {
+					in->ipa_ep_cfg.aggr.
+						aggr_byte_limit =
+					IPA_GENERIC_AGGR_BYTE_LIMIT;
+					in->ipa_ep_cfg.aggr.
+						aggr_pkt_limit =
+					IPA_GENERIC_AGGR_PKT_LIMIT;
+				}
+			}
+		} else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) {
+			IPADBG("assigning policy to client:%d",
+				in->client);
+
+			sys->policy = IPA_POLICY_INTR_POLL_MODE;
+			sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+				| SPS_O_ACK_TRANSFERS);
+			sys->sps_callback = ipa3_sps_irq_rx_notify;
+			INIT_WORK(&sys->work, ipa3_wq_handle_rx);
+			INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+				ipa3_switch_to_intr_rx_work_func);
+			INIT_DELAYED_WORK(&sys->replenish_rx_work,
+				ipa3_replenish_rx_work_func);
+			atomic_set(&sys->curr_polling_state, 0);
+			sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ;
+			sys->rx_pool_sz = in->desc_fifo_sz/
+				sizeof(struct sps_iovec) - 1;
+			if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
+				sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
+			sys->pyld_hdlr = NULL;
+			sys->repl_hdlr = ipa3_replenish_wlan_rx_cache;
+			sys->get_skb = ipa3_get_skb_ipa_rx;
+			sys->free_skb = ipa3_free_skb_rx;
+			sys->free_rx_wrapper = ipa3_free_rx_wrapper;
+			in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+		} else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
+			IPADBG("assigning policy to client:%d",
+				in->client);
+
+			sys->policy = IPA_POLICY_INTR_POLL_MODE;
+			sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+				| SPS_O_ACK_TRANSFERS);
+			sys->sps_callback = ipa3_sps_irq_rx_notify;
+			INIT_WORK(&sys->work, ipa3_wq_handle_rx);
+			INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+			ipa3_switch_to_intr_rx_work_func);
+			INIT_DELAYED_WORK(&sys->replenish_rx_work,
+				ipa3_replenish_rx_work_func);
+			atomic_set(&sys->curr_polling_state, 0);
+			sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
+			sys->rx_pool_sz = in->desc_fifo_sz /
+				sizeof(struct sps_iovec) - 1;
+			if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ)
+				sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ;
+			sys->pyld_hdlr = ipa3_odu_rx_pyld_hdlr;
+			sys->get_skb = ipa3_get_skb_ipa_rx;
+			sys->free_skb = ipa3_free_skb_rx;
+			sys->free_rx_wrapper = ipa3_free_rx_wrapper;
+			sys->repl_hdlr = ipa3_replenish_rx_cache;
+		} else if (in->client ==
+				IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
+			IPADBG("assigning policy to client:%d",
+				in->client);
+
+			sys->policy = IPA_POLICY_INTR_POLL_MODE;
+			sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+					| SPS_O_ACK_TRANSFERS);
+			sys->sps_callback = ipa3_sps_irq_rx_notify;
+			INIT_WORK(&sys->work, ipa3_wq_handle_rx);
+			INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+				ipa3_switch_to_intr_rx_work_func);
+		} else if (in->client ==
+				IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
+			IPADBG("assigning policy to client:%d",
+				in->client);
+
+			sys->policy = IPA_POLICY_NOINTR_MODE;
+			sys->sps_option = SPS_O_AUTO_ENABLE |
+			SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+		} else {
+			IPAERR("Need to install a RX pipe hdlr\n");
+			WARN_ON(1);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_tx_client_rx_notify_release() - Callback function
+ * which will call the user supplied callback function to
+ * release the skb, or release it on its own if no callback
+ * function was supplied
+ *
+ * @user1: [in] - Data Descriptor
+ * @user2: [in] - endpoint idx
+ *
+ * This notified callback is for the destination client
+ * This function is supplied in ipa3_tx_dp_mul
+ */
+static void ipa3_tx_client_rx_notify_release(void *user1, int user2)
+{
+	struct ipa_tx_data_desc *dd = (struct ipa_tx_data_desc *)user1;
+	int ep_idx = user2;
+
+	IPADBG_LOW("Received data desc anchor:%p\n", dd);
+
+	atomic_inc(&ipa3_ctx->ep[ep_idx].avail_fifo_desc);
+	ipa3_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
+
+  /* wlan host driver waits till tx complete before unload */
+	IPADBG_LOW("ep=%d fifo_desc_free_count=%d\n",
+		ep_idx, atomic_read(&ipa3_ctx->ep[ep_idx].avail_fifo_desc));
+	IPADBG_LOW("calling client notify callback with priv:%p\n",
+		ipa3_ctx->ep[ep_idx].priv);
+
+	if (ipa3_ctx->ep[ep_idx].client_notify) {
+		ipa3_ctx->ep[ep_idx].client_notify(ipa3_ctx->ep[ep_idx].priv,
+				IPA_WRITE_DONE, (unsigned long)user1);
+		ipa3_ctx->ep[ep_idx].wstats.rx_hd_reply++;
+	}
+}
+/**
+ * ipa3_tx_client_rx_pkt_status() - Callback function
+ * which will call the user supplied callback function to
+ * increase the available fifo descriptor
+ *
+ * @user1: [in] - Data Descriptor
+ * @user2: [in] - endpoint idx
+ *
+ * This notified callback is for the destination client
+ * This function is supplied in ipa3_tx_dp_mul
+ */
+static void ipa3_tx_client_rx_pkt_status(void *user1, int user2)
+{
+	int ep_idx = user2;
+
+	atomic_inc(&ipa3_ctx->ep[ep_idx].avail_fifo_desc);
+	ipa3_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
+}
+
+
+/**
+ * ipa3_tx_dp_mul() - Data-path tx handler for multiple packets
+ * @src: [in] - Client that is sending data
+ * @ipa_tx_data_desc:	[in] data descriptors from wlan
+ *
+ * this is used for to transfer data descriptors that received
+ * from WLAN1_PROD pipe to IPA HW
+ *
+ * The function will send data descriptors from WLAN1_PROD (one
+ * at a time) using sps_transfer_one. Will set EOT flag for last
+ * descriptor Once this send was done from SPS point-of-view the
+ * IPA driver will get notified by the supplied callback -
+ * ipa3_sps_irq_tx_no_aggr_notify()
+ *
+ * ipa3_sps_irq_tx_no_aggr_notify will call to the user supplied
+ * callback (from ipa3_connect)
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_tx_dp_mul(enum ipa_client_type src,
+			struct ipa_tx_data_desc *data_desc)
+{
+	/* The second byte in wlan header holds qmap id */
+#define IPA_WLAN_HDR_QMAP_ID_OFFSET 1
+	struct ipa_tx_data_desc *entry;
+	struct ipa3_sys_context *sys;
+	struct ipa3_desc desc[2];
+	u32 num_desc, cnt;
+	int ep_idx;
+
+	IPADBG_LOW("Received data desc anchor:%p\n", data_desc);
+
+	spin_lock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
+
+	ep_idx = ipa3_get_ep_mapping(src);
+	if (unlikely(ep_idx == -1)) {
+		IPAERR("dest EP does not exist.\n");
+		goto fail_send;
+	}
+	IPADBG_LOW("ep idx:%d\n", ep_idx);
+	sys = ipa3_ctx->ep[ep_idx].sys;
+
+	if (unlikely(ipa3_ctx->ep[ep_idx].valid == 0)) {
+		IPAERR("dest EP not valid.\n");
+		goto fail_send;
+	}
+	sys->ep->wstats.rx_hd_rcvd++;
+
+	/* Calculate the number of descriptors */
+	num_desc = 0;
+	list_for_each_entry(entry, &data_desc->link, link) {
+		num_desc++;
+	}
+	IPADBG_LOW("Number of Data Descriptors:%d", num_desc);
+
+	if (atomic_read(&sys->ep->avail_fifo_desc) < num_desc) {
+		IPAERR("Insufficient data descriptors available\n");
+		goto fail_send;
+	}
+
+	/* Assign callback only for last data descriptor */
+	cnt = 0;
+	list_for_each_entry(entry, &data_desc->link, link) {
+		memset(desc, 0, 2 * sizeof(struct ipa3_desc));
+
+		IPADBG_LOW("Parsing data desc :%d\n", cnt);
+		cnt++;
+		((u8 *)entry->pyld_buffer)[IPA_WLAN_HDR_QMAP_ID_OFFSET] =
+			(u8)sys->ep->cfg.meta.qmap_id;
+
+		/* the tag field will be populated in ipa3_send() function */
+		desc[0].opcode =
+			ipahal_imm_cmd_get_opcode(
+				IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
+		desc[0].type = IPA_IMM_CMD_DESC;
+		desc[0].callback = ipa3_tag_destroy_imm;
+		desc[1].pyld = entry->pyld_buffer;
+		desc[1].len = entry->pyld_len;
+		desc[1].type = IPA_DATA_DESC_SKB;
+		desc[1].user1 = data_desc;
+		desc[1].user2 = ep_idx;
+		IPADBG_LOW("priv:%p pyld_buf:0x%p pyld_len:%d\n",
+			entry->priv, desc[1].pyld, desc[1].len);
+
+		/* In case of last descriptor populate callback */
+		if (cnt == num_desc) {
+			IPADBG_LOW("data desc:%p\n", data_desc);
+			desc[1].callback = ipa3_tx_client_rx_notify_release;
+		} else {
+			desc[1].callback = ipa3_tx_client_rx_pkt_status;
+		}
+
+		IPADBG_LOW("calling ipa3_send_one()\n");
+		if (ipa3_send(sys, 2, desc, true)) {
+			IPAERR("fail to send skb\n");
+			sys->ep->wstats.rx_pkt_leak += (cnt-1);
+			sys->ep->wstats.rx_dp_fail++;
+			goto fail_send;
+		}
+
+		if (atomic_read(&sys->ep->avail_fifo_desc) >= 0)
+			atomic_dec(&sys->ep->avail_fifo_desc);
+
+		sys->ep->wstats.rx_pkts_rcvd++;
+		IPADBG_LOW("ep=%d fifo desc=%d\n",
+			ep_idx, atomic_read(&sys->ep->avail_fifo_desc));
+	}
+
+	sys->ep->wstats.rx_hd_processed++;
+	spin_unlock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
+	return 0;
+
+fail_send:
+	spin_unlock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
+	return -EFAULT;
+
+}
+
+void ipa3_free_skb(struct ipa_rx_data *data)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+
+	spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+
+	ipa3_ctx->wc_memb.total_tx_pkts_freed++;
+	rx_pkt = container_of(data, struct ipa3_rx_pkt_wrapper, data);
+
+	ipa3_skb_recycle(rx_pkt->data.skb);
+	(void)skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
+
+	list_add_tail(&rx_pkt->link,
+		&ipa3_ctx->wc_memb.wlan_comm_desc_list);
+	ipa3_ctx->wc_memb.wlan_comm_free_cnt++;
+
+	spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+}
+
+/* Functions added to support kernel tests */
+
+int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in,
+			unsigned long *ipa_bam_or_gsi_hdl,
+			u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status)
+{
+	struct ipa3_ep_context *ep;
+	int ipa_ep_idx;
+	int result = -EINVAL;
+
+	if (sys_in == NULL || clnt_hdl == NULL) {
+		IPAERR("NULL args\n");
+		goto fail_gen;
+	}
+
+	if (ipa_bam_or_gsi_hdl == NULL || ipa_pipe_num == NULL) {
+		IPAERR("NULL args\n");
+		goto fail_gen;
+	}
+	if (sys_in->client >= IPA_CLIENT_MAX) {
+		IPAERR("bad parm client:%d\n", sys_in->client);
+		goto fail_gen;
+	}
+
+	ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("Invalid client :%d\n", sys_in->client);
+		goto fail_gen;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
+
+	if (ep->valid == 1) {
+		if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
+			IPAERR("EP %d already allocated\n", ipa_ep_idx);
+			goto fail_and_disable_clocks;
+		} else {
+			if (ipa3_cfg_ep_hdr(ipa_ep_idx,
+						&sys_in->ipa_ep_cfg.hdr)) {
+				IPAERR("fail to configure hdr prop of EP %d\n",
+						ipa_ep_idx);
+				result = -EFAULT;
+				goto fail_and_disable_clocks;
+			}
+			if (ipa3_cfg_ep_cfg(ipa_ep_idx,
+						&sys_in->ipa_ep_cfg.cfg)) {
+				IPAERR("fail to configure cfg prop of EP %d\n",
+						ipa_ep_idx);
+				result = -EFAULT;
+				goto fail_and_disable_clocks;
+			}
+			IPAERR("client %d (ep: %d) overlay ok sys=%p\n",
+					sys_in->client, ipa_ep_idx, ep->sys);
+			ep->client_notify = sys_in->notify;
+			ep->priv = sys_in->priv;
+			*clnt_hdl = ipa_ep_idx;
+			if (!ep->keep_ipa_awake)
+				IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+			return 0;
+		}
+	}
+
+	memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
+
+	ep->valid = 1;
+	ep->client = sys_in->client;
+	ep->client_notify = sys_in->notify;
+	ep->priv = sys_in->priv;
+	ep->keep_ipa_awake = true;
+	if (en_status) {
+		ep->status.status_en = true;
+		ep->status.status_ep = ipa_ep_idx;
+	}
+
+	result = ipa3_enable_data_path(ipa_ep_idx);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d.\n",
+				 result, ipa_ep_idx);
+		goto fail_gen2;
+	}
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa3_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto fail_gen2;
+		}
+		if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
+			IPAERR("fail to configure status of EP.\n");
+			goto fail_gen2;
+		}
+		IPADBG("ep configuration successful\n");
+	} else {
+		IPADBG("skipping ep configuration\n");
+	}
+
+	*clnt_hdl = ipa_ep_idx;
+
+	*ipa_pipe_num = ipa_ep_idx;
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
+		*ipa_bam_or_gsi_hdl = ipa3_ctx->gsi_dev_hdl;
+	else
+		*ipa_bam_or_gsi_hdl = ipa3_ctx->bam_handle;
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+	ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+	IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
+			ipa_ep_idx, ep->sys);
+
+	return 0;
+
+fail_gen2:
+fail_and_disable_clocks:
+	IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+fail_gen:
+	return result;
+}
+
+int ipa3_sys_teardown(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipa3_disable_data_path(clnt_hdl);
+	ep->valid = 0;
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+	return 0;
+}
+
+int ipa3_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
+	unsigned long gsi_ev_hdl)
+{
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	ep->gsi_chan_hdl = gsi_ch_hdl;
+	ep->gsi_evt_ring_hdl = gsi_ev_hdl;
+
+	return 0;
+}
+
+static void ipa_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
+{
+	switch (notify->evt_id) {
+	case GSI_EVT_OUT_OF_BUFFERS_ERR:
+		IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_EVT_OUT_OF_RESOURCES_ERR:
+		IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_EVT_EVT_RING_EMPTY_ERR:
+		IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n");
+		break;
+	default:
+		IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+}
+
+static void ipa_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
+{
+	switch (notify->evt_id) {
+	case GSI_CHAN_INVALID_TRE_ERR:
+		IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
+		break;
+	case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
+		IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_BUFFERS_ERR:
+		IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_RESOURCES_ERR:
+		IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_CHAN_HWO_1_ERR:
+		IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
+		break;
+	default:
+		IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+}
+
+static void ipa_gsi_irq_tx_notify_cb(struct gsi_chan_xfer_notify *notify)
+{
+	struct ipa3_tx_pkt_wrapper *tx_pkt;
+
+	IPADBG_LOW("event %d notified\n", notify->evt_id);
+
+	switch (notify->evt_id) {
+	case GSI_CHAN_EVT_EOT:
+		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+		tx_pkt = notify->xfer_user_data;
+		queue_work(tx_pkt->sys->wq, &tx_pkt->work);
+		break;
+	default:
+		IPAERR("received unexpected event id %d\n", notify->evt_id);
+	}
+}
+
+static void ipa_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
+{
+	struct ipa3_sys_context *sys;
+	struct ipa3_rx_pkt_wrapper *rx_pkt_expected, *rx_pkt_rcvd;
+
+	if (!notify) {
+		IPAERR("gsi notify is NULL.\n");
+		return;
+	}
+	IPADBG_LOW("event %d notified\n", notify->evt_id);
+
+	sys = (struct ipa3_sys_context *)notify->chan_user_data;
+	rx_pkt_expected = list_first_entry(&sys->head_desc_list,
+					   struct ipa3_rx_pkt_wrapper, link);
+	rx_pkt_rcvd = (struct ipa3_rx_pkt_wrapper *)notify->xfer_user_data;
+
+	if (rx_pkt_expected != rx_pkt_rcvd) {
+		IPAERR("Pkt was not filled in head of rx buffer.\n");
+		WARN_ON(1);
+		return;
+	}
+	sys->ep->bytes_xfered_valid = true;
+	sys->ep->bytes_xfered = notify->bytes_xfered;
+	sys->ep->phys_base = rx_pkt_rcvd->data.dma_addr;
+
+	switch (notify->evt_id) {
+	case GSI_CHAN_EVT_EOT:
+	case GSI_CHAN_EVT_EOB:
+		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+		if (!atomic_read(&sys->curr_polling_state)) {
+			/* put the gsi channel into polling mode */
+			gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
+				GSI_CHAN_MODE_POLL);
+			ipa3_inc_acquire_wakelock();
+			atomic_set(&sys->curr_polling_state, 1);
+			queue_work(sys->wq, &sys->work);
+		}
+		break;
+	default:
+		IPAERR("received unexpected event id %d\n", notify->evt_id);
+	}
+}
+
+static void ipa_dma_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
+{
+	struct ipa3_sys_context *sys;
+	struct ipa3_dma_xfer_wrapper *rx_pkt_expected, *rx_pkt_rcvd;
+
+	if (!notify) {
+		IPAERR("gsi notify is NULL.\n");
+		return;
+	}
+	IPADBG_LOW("event %d notified\n", notify->evt_id);
+
+	sys = (struct ipa3_sys_context *)notify->chan_user_data;
+	if (sys->ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
+		IPAERR("IRQ_RX Callback was called for DMA_SYNC_CONS.\n");
+		return;
+	}
+	rx_pkt_expected = list_first_entry(&sys->head_desc_list,
+	struct ipa3_dma_xfer_wrapper, link);
+		rx_pkt_rcvd = (struct ipa3_dma_xfer_wrapper *)notify
+			->xfer_user_data;
+	if (rx_pkt_expected != rx_pkt_rcvd) {
+		IPAERR("Pkt was not filled in head of rx buffer.\n");
+		WARN_ON(1);
+		return;
+	}
+
+	sys->ep->bytes_xfered_valid = true;
+	sys->ep->bytes_xfered = notify->bytes_xfered;
+	sys->ep->phys_base = rx_pkt_rcvd->phys_addr_dest;
+
+	switch (notify->evt_id) {
+	case GSI_CHAN_EVT_EOT:
+		if (!atomic_read(&sys->curr_polling_state)) {
+			/* put the gsi channel into polling mode */
+			gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
+				GSI_CHAN_MODE_POLL);
+			ipa3_inc_acquire_wakelock();
+			atomic_set(&sys->curr_polling_state, 1);
+			queue_work(sys->wq, &sys->work);
+		}
+		break;
+	default:
+		IPAERR("received unexpected event id %d\n", notify->evt_id);
+	}
+}
+
+static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
+	struct ipa3_ep_context *ep)
+{
+	struct gsi_evt_ring_props gsi_evt_ring_props;
+	struct gsi_chan_props gsi_channel_props;
+	union __packed gsi_channel_scratch ch_scratch;
+	struct ipa_gsi_ep_config *gsi_ep_info;
+	dma_addr_t dma_addr;
+	int result;
+
+	if (!ep) {
+		IPAERR("EP context is empty\n");
+		return -EINVAL;
+	}
+
+	ep->gsi_evt_ring_hdl = ~0;
+	/*
+	 * allocate event ring for all interrupt-policy
+	 * pipes and IPA consumers pipes
+	 */
+	if (ep->sys->policy != IPA_POLICY_NOINTR_MODE ||
+	     IPA_CLIENT_IS_CONS(ep->client)) {
+		memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
+		gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
+		gsi_evt_ring_props.intr = GSI_INTR_IRQ;
+		gsi_evt_ring_props.re_size =
+			GSI_EVT_RING_RE_SIZE_16B;
+
+		gsi_evt_ring_props.ring_len = IPA_GSI_EVT_RING_LEN;
+		gsi_evt_ring_props.ring_base_vaddr =
+			dma_alloc_coherent(ipa3_ctx->pdev, IPA_GSI_EVT_RING_LEN,
+			&dma_addr, 0);
+		gsi_evt_ring_props.ring_base_addr = dma_addr;
+
+		/* copy mem info */
+		ep->gsi_mem_info.evt_ring_len = gsi_evt_ring_props.ring_len;
+		ep->gsi_mem_info.evt_ring_base_addr =
+			gsi_evt_ring_props.ring_base_addr;
+		ep->gsi_mem_info.evt_ring_base_vaddr =
+			gsi_evt_ring_props.ring_base_vaddr;
+
+		gsi_evt_ring_props.int_modt = IPA_GSI_EVT_RING_INT_MODT;
+		gsi_evt_ring_props.int_modc = 1;
+		gsi_evt_ring_props.rp_update_addr = 0;
+		gsi_evt_ring_props.exclusive = true;
+		gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb;
+		gsi_evt_ring_props.user_data = NULL;
+
+		result = gsi_alloc_evt_ring(&gsi_evt_ring_props,
+			ipa3_ctx->gsi_dev_hdl, &ep->gsi_evt_ring_hdl);
+		if (result != GSI_STATUS_SUCCESS)
+			goto fail_alloc_evt_ring;
+	}
+
+	memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
+	gsi_channel_props.prot = GSI_CHAN_PROT_GPI;
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
+	} else {
+		gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
+		gsi_channel_props.max_re_expected = ep->sys->rx_pool_sz;
+	}
+
+	gsi_ep_info = ipa3_get_gsi_ep_info(ipa3_get_ep_mapping(ep->client));
+	if (!gsi_ep_info) {
+		IPAERR("Invalid ep number\n");
+		result = -EINVAL;
+		goto fail_alloc_evt_ring;
+	} else
+		gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num;
+
+	gsi_channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
+	gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
+
+	/*
+	 * GSI ring length is calculated based on the desc_fifo_sz which was
+	 * meant to define the BAM desc fifo. GSI descriptors are 16B as opposed
+	 * to 8B for BAM. For PROD pipes there is also an additional descriptor
+	 * for TAG STATUS immediate command.
+	 */
+	if (IPA_CLIENT_IS_PROD(ep->client))
+		gsi_channel_props.ring_len = 4 * in->desc_fifo_sz;
+	else
+		gsi_channel_props.ring_len = 2 * in->desc_fifo_sz;
+	gsi_channel_props.ring_base_vaddr =
+		dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
+			&dma_addr, 0);
+	gsi_channel_props.ring_base_addr = dma_addr;
+
+	/* copy mem info */
+	ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len;
+	ep->gsi_mem_info.chan_ring_base_addr =
+		gsi_channel_props.ring_base_addr;
+	ep->gsi_mem_info.chan_ring_base_vaddr =
+		gsi_channel_props.ring_base_vaddr;
+
+	gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+	gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	if (ep->client == IPA_CLIENT_APPS_CMD_PROD)
+		gsi_channel_props.low_weight = IPA_GSI_MAX_CH_LOW_WEIGHT;
+	else
+		gsi_channel_props.low_weight = 1;
+	gsi_channel_props.chan_user_data = ep->sys;
+	gsi_channel_props.err_cb = ipa_gsi_chan_err_cb;
+	if (IPA_CLIENT_IS_PROD(ep->client))
+		gsi_channel_props.xfer_cb = ipa_gsi_irq_tx_notify_cb;
+	else
+		gsi_channel_props.xfer_cb = ipa_gsi_irq_rx_notify_cb;
+	if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(ep->client))
+		gsi_channel_props.xfer_cb = ipa_dma_gsi_irq_rx_notify_cb;
+	result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
+		&ep->gsi_chan_hdl);
+	if (result != GSI_STATUS_SUCCESS)
+		goto fail_alloc_channel;
+
+	memset(&ch_scratch, 0, sizeof(ch_scratch));
+	ch_scratch.gpi.max_outstanding_tre = gsi_ep_info->ipa_if_tlv *
+		GSI_CHAN_RE_SIZE_16B;
+	ch_scratch.gpi.outstanding_threshold = 2 * GSI_CHAN_RE_SIZE_16B;
+	result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to write scratch %d\n", result);
+		goto fail_start_channel;
+	}
+
+	result = gsi_start_channel(ep->gsi_chan_hdl);
+	if (result != GSI_STATUS_SUCCESS)
+		goto fail_start_channel;
+	if (ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS)
+		gsi_config_channel_mode(ep->gsi_chan_hdl,
+				GSI_CHAN_MODE_POLL);
+	return 0;
+
+fail_start_channel:
+	if (gsi_dealloc_channel(ep->gsi_chan_hdl)
+		!= GSI_STATUS_SUCCESS) {
+		IPAERR("Failed to dealloc GSI chan.\n");
+		BUG();
+	}
+fail_alloc_channel:
+	if (ep->gsi_evt_ring_hdl != ~0) {
+		gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+		ep->gsi_evt_ring_hdl = ~0;
+	}
+fail_alloc_evt_ring:
+	IPAERR("Return with err: %d\n", result);
+	return result;
+}
+
+static int ipa_populate_tag_field(struct ipa3_desc *desc,
+		struct ipa3_tx_pkt_wrapper *tx_pkt,
+		struct ipahal_imm_cmd_pyld **tag_pyld_ret)
+{
+	struct ipahal_imm_cmd_pyld *tag_pyld;
+	struct ipahal_imm_cmd_ip_packet_tag_status tag_cmd = {0};
+
+	/* populate tag field only if it is NULL */
+	if (desc->pyld == NULL) {
+		tag_cmd.tag = pointer_to_tag_wa(tx_pkt);
+		tag_pyld = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_IP_PACKET_TAG_STATUS, &tag_cmd, true);
+		if (unlikely(!tag_pyld)) {
+			IPAERR("Failed to construct ip_packet_tag_status\n");
+			return -EFAULT;
+		}
+		/*
+		 * This is for 32-bit pointer, will need special
+		 * handling if 64-bit pointer is used
+		 */
+		IPADBG_LOW("tx_pkt sent in tag: 0x%p\n", tx_pkt);
+		desc->pyld = tag_pyld->data;
+		desc->len = tag_pyld->len;
+		desc->user1 = tag_pyld;
+
+		*tag_pyld_ret = tag_pyld;
+	}
+	return 0;
+}
+
+static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys,
+		struct ipa_mem_buffer *mem_info)
+{
+	int ret;
+	struct gsi_chan_xfer_notify xfer_notify;
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+
+	if (sys->ep->bytes_xfered_valid) {
+		mem_info->phys_base = sys->ep->phys_base;
+		mem_info->size = (u32)sys->ep->bytes_xfered;
+		sys->ep->bytes_xfered_valid = false;
+		return GSI_STATUS_SUCCESS;
+	}
+
+	ret = gsi_poll_channel(sys->ep->gsi_chan_hdl,
+		&xfer_notify);
+	if (ret == GSI_STATUS_POLL_EMPTY)
+		return ret;
+	else if (ret != GSI_STATUS_SUCCESS) {
+		IPAERR("Poll channel err: %d\n", ret);
+		return ret;
+	}
+
+	rx_pkt = (struct ipa3_rx_pkt_wrapper *)
+		xfer_notify.xfer_user_data;
+	mem_info->phys_base = rx_pkt->data.dma_addr;
+	mem_info->size = xfer_notify.bytes_xfered;
+
+	return ret;
+}
+
+static int ipa_handle_rx_core_gsi(struct ipa3_sys_context *sys,
+	bool process_all, bool in_poll_state)
+{
+	int ret;
+	int cnt = 0;
+	struct ipa_mem_buffer mem_info = {0};
+
+	while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
+			!atomic_read(&sys->curr_polling_state))) {
+		if (cnt && !process_all)
+			break;
+
+		ret = ipa_poll_gsi_pkt(sys, &mem_info);
+		if (ret)
+			break;
+
+		if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
+			ipa3_dma_memcpy_notify(sys, &mem_info);
+		else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
+			ipa3_wlan_wq_rx_common(sys, mem_info.size);
+		else
+			ipa3_wq_rx_common(sys, mem_info.size);
+
+		cnt++;
+	}
+	return cnt;
+}
+
+static int ipa_poll_sps_pkt(struct ipa3_sys_context *sys,
+		struct ipa_mem_buffer *mem_info)
+{
+	int ret;
+	struct sps_iovec iov;
+
+	ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+	if (ret) {
+		IPAERR("sps_get_iovec failed %d\n", ret);
+		return ret;
+	}
+
+	if (iov.addr == 0)
+		return -EIO;
+
+	mem_info->phys_base = iov.addr;
+	mem_info->size = iov.size;
+	return 0;
+}
+
+static int ipa_handle_rx_core_sps(struct ipa3_sys_context *sys,
+	bool process_all, bool in_poll_state)
+{
+	int ret;
+	int cnt = 0;
+	struct ipa_mem_buffer mem_info = {0};
+
+	while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
+			!atomic_read(&sys->curr_polling_state))) {
+		if (cnt && !process_all)
+			break;
+
+		ret = ipa_poll_sps_pkt(sys, &mem_info);
+		if (ret)
+			break;
+
+		if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
+			ipa3_dma_memcpy_notify(sys, &mem_info);
+		else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
+			ipa3_wlan_wq_rx_common(sys, mem_info.size);
+		else
+			ipa3_wq_rx_common(sys, mem_info.size);
+
+		cnt++;
+	}
+
+	return cnt;
+}
+
+/**
+ * ipa3_rx_poll() - Poll the rx packets from IPA HW. This
+ * function is exectued in the softirq context
+ *
+ * if input budget is zero, the driver switches back to
+ * interrupt mode
+ *
+ * return number of polled packets, on error 0(zero)
+ */
+int ipa3_rx_poll(u32 clnt_hdl, int weight)
+{
+	struct ipa3_ep_context *ep;
+	int ret;
+	int cnt = 0;
+	unsigned int delay = 1;
+	struct ipa_mem_buffer mem_info = {0};
+
+	IPADBG("\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm 0x%x\n", clnt_hdl);
+		return cnt;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	while (cnt < weight &&
+		   atomic_read(&ep->sys->curr_polling_state)) {
+
+		if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
+			ret = ipa_poll_gsi_pkt(ep->sys, &mem_info);
+		else
+			ret = ipa_poll_sps_pkt(ep->sys, &mem_info);
+
+		if (ret)
+			break;
+
+		ipa3_wq_rx_common(ep->sys, mem_info.size);
+		cnt += 5;
+	};
+
+	if (cnt == 0) {
+		ep->inactive_cycles++;
+		ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
+
+		if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
+			ep->switch_to_intr = true;
+			delay = 0;
+		}
+		queue_delayed_work(ep->sys->wq,
+			&ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
+	} else
+		ep->inactive_cycles = 0;
+
+	return cnt;
+}
+
+static unsigned long tag_to_pointer_wa(uint64_t tag)
+{
+	return 0xFFFF000000000000 | (unsigned long) tag;
+}
+
+static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt)
+{
+	u16 temp;
+	/* Add the check but it might have throughput issue */
+	if (ipa3_is_msm_device()) {
+		temp = (u16) (~((unsigned long) tx_pkt &
+			0xFFFF000000000000) >> 48);
+		if (temp) {
+			IPAERR("The 16 prefix is not all 1s (%p)\n",
+			tx_pkt);
+			BUG();
+		}
+	}
+	return (unsigned long)tx_pkt & 0x0000FFFFFFFFFFFF;
+}
+
+/**
+ * ipa_gsi_ch20_wa() - software workaround for IPA GSI channel 20
+ *
+ * A hardware limitation requires to avoid using GSI physical channel 20.
+ * This function allocates GSI physical channel 20 and holds it to prevent
+ * others to use it.
+ *
+ * Return codes: 0 on success, negative on failure
+ */
+int ipa_gsi_ch20_wa(void)
+{
+	struct gsi_chan_props gsi_channel_props;
+	dma_addr_t dma_addr;
+	int result;
+	int i;
+	unsigned long chan_hdl[IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC];
+	unsigned long chan_hdl_to_keep;
+
+
+	memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
+	gsi_channel_props.prot = GSI_CHAN_PROT_GPI;
+	gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
+	gsi_channel_props.evt_ring_hdl = ~0;
+	gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
+	gsi_channel_props.ring_len = 4 * gsi_channel_props.re_size;
+	gsi_channel_props.ring_base_vaddr =
+		dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
+		&dma_addr, 0);
+	gsi_channel_props.ring_base_addr = dma_addr;
+	gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+	gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	gsi_channel_props.low_weight = 1;
+	gsi_channel_props.err_cb = ipa_gsi_chan_err_cb;
+	gsi_channel_props.xfer_cb = ipa_gsi_irq_tx_notify_cb;
+
+	/* first allocate channels up to channel 20 */
+	for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) {
+		gsi_channel_props.ch_id = i;
+		result = gsi_alloc_channel(&gsi_channel_props,
+			ipa3_ctx->gsi_dev_hdl,
+			&chan_hdl[i]);
+		if (result != GSI_STATUS_SUCCESS) {
+			IPAERR("failed to alloc channel %d err %d\n",
+				i, result);
+			return result;
+		}
+	}
+
+	/* allocate channel 20 */
+	gsi_channel_props.ch_id = IPA_GSI_CH_20_WA_VIRT_CHAN;
+	result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
+		&chan_hdl_to_keep);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to alloc channel %d err %d\n",
+			i, result);
+		return result;
+	}
+
+	/* release all other channels */
+	for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) {
+		result = gsi_dealloc_channel(chan_hdl[i]);
+		if (result != GSI_STATUS_SUCCESS) {
+			IPAERR("failed to dealloc channel %d err %d\n",
+				i, result);
+			return result;
+		}
+	}
+
+	/* DMA memory shall not be freed as it is used by channel 20 */
+	return 0;
+}
+
+/**
+ * ipa_adjust_ra_buff_base_sz()
+ *
+ * Return value: the largest power of two which is smaller
+ * than the input value
+ */
+static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit)
+{
+	aggr_byte_limit += IPA_MTU;
+	aggr_byte_limit += IPA_GENERIC_RX_BUFF_LIMIT;
+	aggr_byte_limit--;
+	aggr_byte_limit |= aggr_byte_limit >> 1;
+	aggr_byte_limit |= aggr_byte_limit >> 2;
+	aggr_byte_limit |= aggr_byte_limit >> 4;
+	aggr_byte_limit |= aggr_byte_limit >> 8;
+	aggr_byte_limit |= aggr_byte_limit >> 16;
+	aggr_byte_limit++;
+	return aggr_byte_limit >> 1;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
new file mode 100644
index 0000000..e7af53f
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -0,0 +1,1592 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+
+#define IPA_FLT_TABLE_INDEX_NOT_FOUND		(-1)
+#define IPA_FLT_STATUS_OF_ADD_FAILED		(-1)
+#define IPA_FLT_STATUS_OF_DEL_FAILED		(-1)
+#define IPA_FLT_STATUS_OF_MDFY_FAILED		(-1)
+
+#define IPA_FLT_GET_RULE_TYPE(__entry) \
+	( \
+	((__entry)->rule.hashable) ? \
+	(IPA_RULE_HASHABLE):(IPA_RULE_NON_HASHABLE) \
+	)
+
+/**
+ * ipa3_generate_flt_hw_rule() - generates the filtering hardware rule
+ * @ip: the ip address family type
+ * @entry: filtering entry
+ * @buf: output buffer, buf == NULL means
+ *		caller wants to know the size of the rule as seen
+ *		by HW so they did not pass a valid buffer, we will use a
+ *		scratch buffer instead.
+ *		With this scheme we are going to
+ *		generate the rule twice, once to know size using scratch
+ *		buffer and second to write the rule to the actual caller
+ *		supplied buffer which is of required size
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa3_generate_flt_hw_rule(enum ipa_ip_type ip,
+		struct ipa3_flt_entry *entry, u8 *buf)
+{
+	struct ipahal_flt_rule_gen_params gen_params;
+	int res = 0;
+
+	memset(&gen_params, 0, sizeof(gen_params));
+
+	gen_params.ipt = ip;
+	if (entry->rt_tbl)
+		gen_params.rt_tbl_idx = entry->rt_tbl->idx;
+	else
+		gen_params.rt_tbl_idx = entry->rule.rt_tbl_idx;
+
+	gen_params.priority = entry->prio;
+	gen_params.id = entry->rule_id;
+	gen_params.rule = (const struct ipa_flt_rule *)&entry->rule;
+
+	res = ipahal_flt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
+	if (res)
+		IPAERR("failed to generate flt h/w rule\n");
+
+	return 0;
+}
+
+static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip, enum ipa_rule_type rlt)
+{
+	struct ipa3_flt_tbl *tbl;
+	int i;
+
+	IPADBG_LOW("reaping sys flt tbls ip=%d rlt=%d\n", ip, rlt);
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i))
+			continue;
+
+		tbl = &ipa3_ctx->flt_tbl[i][ip];
+		if (tbl->prev_mem[rlt].phys_base) {
+			IPADBG_LOW("reaping flt tbl (prev) pipe=%d\n", i);
+			ipahal_free_dma_mem(&tbl->prev_mem[rlt]);
+		}
+
+		if (list_empty(&tbl->head_flt_rule_list)) {
+			if (tbl->curr_mem[rlt].phys_base) {
+				IPADBG_LOW("reaping flt tbl (curr) pipe=%d\n",
+					i);
+				ipahal_free_dma_mem(&tbl->curr_mem[rlt]);
+			}
+		}
+	}
+}
+
+/**
+ * ipa_prep_flt_tbl_for_cmt() - preparing the flt table for commit
+ *  assign priorities to the rules, calculate their sizes and calculate
+ *  the overall table size
+ * @ip: the ip address family type
+ * @tbl: the flt tbl to be prepared
+ * @pipe_idx: the ep pipe appropriate for the given tbl
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_prep_flt_tbl_for_cmt(enum ipa_ip_type ip,
+	struct ipa3_flt_tbl *tbl, int pipe_idx)
+{
+	struct ipa3_flt_entry *entry;
+	int prio_i;
+	int max_prio;
+	u32 hdr_width;
+
+	tbl->sz[IPA_RULE_HASHABLE] = 0;
+	tbl->sz[IPA_RULE_NON_HASHABLE] = 0;
+
+	max_prio = ipahal_get_rule_max_priority();
+
+	prio_i = max_prio;
+	list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+
+		if (entry->rule.max_prio) {
+			entry->prio = max_prio;
+		} else {
+			if (ipahal_rule_decrease_priority(&prio_i)) {
+				IPAERR("cannot decrease rule priority - %d\n",
+					prio_i);
+				return -EPERM;
+			}
+			entry->prio = prio_i;
+		}
+
+		if (ipa3_generate_flt_hw_rule(ip, entry, NULL)) {
+			IPAERR("failed to calculate HW FLT rule size\n");
+			return -EPERM;
+		}
+		IPADBG("pipe %d rule_id (handle) %u hw_len %d priority %u\n",
+			pipe_idx, entry->rule_id, entry->hw_len, entry->prio);
+
+		if (entry->rule.hashable)
+			tbl->sz[IPA_RULE_HASHABLE] += entry->hw_len;
+		else
+			tbl->sz[IPA_RULE_NON_HASHABLE] += entry->hw_len;
+	}
+
+	if ((tbl->sz[IPA_RULE_HASHABLE] +
+		tbl->sz[IPA_RULE_NON_HASHABLE]) == 0) {
+		IPADBG_LOW("flt tbl pipe %d is with zero total size\n",
+			pipe_idx);
+		return 0;
+	}
+
+	hdr_width = ipahal_get_hw_tbl_hdr_width();
+
+	/* for the header word */
+	if (tbl->sz[IPA_RULE_HASHABLE])
+		tbl->sz[IPA_RULE_HASHABLE] += hdr_width;
+	if (tbl->sz[IPA_RULE_NON_HASHABLE])
+		tbl->sz[IPA_RULE_NON_HASHABLE] += hdr_width;
+
+	IPADBG_LOW("FLT tbl pipe idx %d hash sz %u non-hash sz %u\n", pipe_idx,
+		tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]);
+
+	return 0;
+}
+
+/**
+ * ipa_translate_flt_tbl_to_hw_fmt() - translate the flt driver structures
+ *  (rules and tables) to HW format and fill it in the given buffers
+ * @ip: the ip address family type
+ * @rlt: the type of the rules to translate (hashable or non-hashable)
+ * @base: the rules body buffer to be filled
+ * @hdr: the rules header (addresses/offsets) buffer to be filled
+ * @body_ofst: the offset of the rules body from the rules header at
+ *  ipa sram
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_translate_flt_tbl_to_hw_fmt(enum ipa_ip_type ip,
+	enum ipa_rule_type rlt, u8 *base, u8 *hdr, u32 body_ofst)
+{
+	u64 offset;
+	u8 *body_i;
+	int res;
+	struct ipa3_flt_entry *entry;
+	u8 *tbl_mem_buf;
+	struct ipa_mem_buffer tbl_mem;
+	struct ipa3_flt_tbl *tbl;
+	int i;
+	int hdr_idx = 0;
+
+	body_i = base;
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i))
+			continue;
+		tbl = &ipa3_ctx->flt_tbl[i][ip];
+		if (tbl->sz[rlt] == 0) {
+			hdr_idx++;
+			continue;
+		}
+		if (tbl->in_sys[rlt]) {
+			/* only body (no header) */
+			tbl_mem.size = tbl->sz[rlt] -
+				ipahal_get_hw_tbl_hdr_width();
+			if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) {
+				IPAERR("fail to alloc sys tbl of size %d\n",
+					tbl_mem.size);
+				goto err;
+			}
+
+			if (ipahal_fltrt_write_addr_to_hdr(tbl_mem.phys_base,
+				hdr, hdr_idx, true)) {
+				IPAERR("fail to wrt sys tbl addr to hdr\n");
+				goto hdr_update_fail;
+			}
+
+			tbl_mem_buf = tbl_mem.base;
+
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_flt_rule_list,
+				link) {
+				if (IPA_FLT_GET_RULE_TYPE(entry) != rlt)
+					continue;
+				res = ipa3_generate_flt_hw_rule(
+					ip, entry, tbl_mem_buf);
+				if (res) {
+					IPAERR("failed to gen HW FLT rule\n");
+					goto hdr_update_fail;
+				}
+				tbl_mem_buf += entry->hw_len;
+			}
+
+			if (tbl->curr_mem[rlt].phys_base) {
+				WARN_ON(tbl->prev_mem[rlt].phys_base);
+				tbl->prev_mem[rlt] = tbl->curr_mem[rlt];
+			}
+			tbl->curr_mem[rlt] = tbl_mem;
+		} else {
+			offset = body_i - base + body_ofst;
+
+			/* update the hdr at the right index */
+			if (ipahal_fltrt_write_addr_to_hdr(offset, hdr,
+				hdr_idx, true)) {
+				IPAERR("fail to wrt lcl tbl ofst to hdr\n");
+				goto hdr_update_fail;
+			}
+
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_flt_rule_list,
+				link) {
+				if (IPA_FLT_GET_RULE_TYPE(entry) != rlt)
+					continue;
+				res = ipa3_generate_flt_hw_rule(
+					ip, entry, body_i);
+				if (res) {
+					IPAERR("failed to gen HW FLT rule\n");
+					goto err;
+				}
+				body_i += entry->hw_len;
+			}
+
+			/**
+			 * advance body_i to next table alignment as local
+			 * tables are order back-to-back
+			 */
+			body_i += ipahal_get_lcl_tbl_addr_alignment();
+			body_i = (u8 *)((long)body_i &
+				~ipahal_get_lcl_tbl_addr_alignment());
+		}
+		hdr_idx++;
+	}
+
+	return 0;
+
+hdr_update_fail:
+	ipahal_free_dma_mem(&tbl_mem);
+err:
+	return -EPERM;
+}
+
+/**
+ * ipa_generate_flt_hw_tbl_img() - generates the flt hw tbls.
+ *  headers and bodies are being created into buffers that will be filled into
+ *  the local memory (sram)
+ * @ip: the ip address family type
+ * @alloc_params: In and Out parameters for the allocations of the buffers
+ *  4 buffers: hdr and bdy, each hashable and non-hashable
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_generate_flt_hw_tbl_img(enum ipa_ip_type ip,
+	struct ipahal_fltrt_alloc_imgs_params *alloc_params)
+{
+	u32 hash_bdy_start_ofst, nhash_bdy_start_ofst;
+	int rc = 0;
+
+	if (ip == IPA_IP_v4) {
+		nhash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_nhash_ofst) -
+			IPA_MEM_PART(v4_flt_nhash_ofst);
+		hash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_hash_ofst) -
+			IPA_MEM_PART(v4_flt_hash_ofst);
+	} else {
+		nhash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_nhash_ofst) -
+			IPA_MEM_PART(v6_flt_nhash_ofst);
+		hash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_hash_ofst) -
+			IPA_MEM_PART(v6_flt_hash_ofst);
+	}
+
+	if (ipahal_fltrt_allocate_hw_tbl_imgs(alloc_params)) {
+		IPAERR("fail to allocate FLT HW TBL images. IP %d\n", ip);
+		rc = -ENOMEM;
+		goto allocate_failed;
+	}
+
+	if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_HASHABLE,
+		alloc_params->hash_bdy.base, alloc_params->hash_hdr.base,
+		hash_bdy_start_ofst)) {
+		IPAERR("fail to translate hashable flt tbls to hw format\n");
+		rc = -EPERM;
+		goto translate_fail;
+	}
+	if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_NON_HASHABLE,
+		alloc_params->nhash_bdy.base, alloc_params->nhash_hdr.base,
+		nhash_bdy_start_ofst)) {
+		IPAERR("fail to translate non-hash flt tbls to hw format\n");
+		rc = -EPERM;
+		goto translate_fail;
+	}
+
+	return rc;
+
+translate_fail:
+	if (alloc_params->hash_hdr.size)
+		ipahal_free_dma_mem(&alloc_params->hash_hdr);
+	ipahal_free_dma_mem(&alloc_params->nhash_hdr);
+	if (alloc_params->hash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params->hash_bdy);
+	if (alloc_params->nhash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params->nhash_bdy);
+allocate_failed:
+	return rc;
+}
+
+/**
+ * ipa_flt_valid_lcl_tbl_size() - validate if the space allocated for flt
+ * tbl bodies at the sram is enough for the commit
+ * @ipt: the ip address family type
+ * @rlt: the rule type (hashable or non-hashable)
+ *
+ * Return: true if enough space available or false in other cases
+ */
+static bool ipa_flt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
+	enum ipa_rule_type rlt, struct ipa_mem_buffer *bdy)
+{
+	u16 avail;
+
+	if (!bdy) {
+		IPAERR("Bad parameters, bdy = NULL\n");
+		return false;
+	}
+
+	if (ipt == IPA_IP_v4)
+		avail = (rlt == IPA_RULE_HASHABLE) ?
+			IPA_MEM_PART(apps_v4_flt_hash_size) :
+			IPA_MEM_PART(apps_v4_flt_nhash_size);
+	else
+		avail = (rlt == IPA_RULE_HASHABLE) ?
+			IPA_MEM_PART(apps_v6_flt_hash_size) :
+			IPA_MEM_PART(apps_v6_flt_nhash_size);
+
+	if (bdy->size <= avail)
+		return true;
+
+	IPAERR("tbl too big, needed %d avail %d ipt %d rlt %d\n",
+	       bdy->size, avail, ipt, rlt);
+	return false;
+}
+
+/**
+ * ipa_flt_alloc_cmd_buffers() - alloc descriptors and imm cmds
+ *  payload pointers buffers for headers and bodies of flt structure
+ *  as well as place for flush imm.
+ * @ipt: the ip address family type
+ * @desc: [OUT] descriptor buffer
+ * @cmd: [OUT] imm commands payload pointers buffer
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_flt_alloc_cmd_buffers(enum ipa_ip_type ip,
+	struct ipa3_desc **desc, struct ipahal_imm_cmd_pyld ***cmd_pyld)
+{
+	u16 entries;
+
+	/* +3: 2 for bodies (hashable and non-hashable) and 1 for flushing */
+	entries = (ipa3_ctx->ep_flt_num) * 2 + 3;
+
+	*desc = kcalloc(entries, sizeof(**desc), GFP_ATOMIC);
+	if (*desc == NULL) {
+		IPAERR("fail to alloc desc blob ip %d\n", ip);
+		goto fail_desc_alloc;
+	}
+
+	*cmd_pyld = kcalloc(entries, sizeof(**cmd_pyld), GFP_ATOMIC);
+	if (*cmd_pyld == NULL) {
+		IPAERR("fail to alloc cmd pyld blob ip %d\n", ip);
+		goto fail_cmd_alloc;
+	}
+
+	return 0;
+
+fail_cmd_alloc:
+	kfree(*desc);
+fail_desc_alloc:
+	return -ENOMEM;
+}
+
+/**
+ * ipa_flt_skip_pipe_config() - skip ep flt configuration or not?
+ *  will skip according to pre-configuration or modem pipes
+ * @pipe: the EP pipe index
+ *
+ * Return: true if to skip, false otherwize
+ */
+static bool ipa_flt_skip_pipe_config(int pipe)
+{
+	if (ipa_is_modem_pipe(pipe)) {
+		IPADBG_LOW("skip %d - modem owned pipe\n", pipe);
+		return true;
+	}
+
+	if (ipa3_ctx->skip_ep_cfg_shadow[pipe]) {
+		IPADBG_LOW("skip %d\n", pipe);
+		return true;
+	}
+
+	if ((ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD) == pipe
+		&& ipa3_ctx->modem_cfg_emb_pipe_flt)) {
+		IPADBG_LOW("skip %d\n", pipe);
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * __ipa_commit_flt_v3() - commit flt tables to the hw
+ *  commit the headers and the bodies if are local with internal cache flushing.
+ *  The headers (and local bodies) will first be created into dma buffers and
+ *  then written via IC to the SRAM
+ * @ipt: the ip address family type
+ *
+ * Return: 0 on success, negative on failure
+ */
+int __ipa_commit_flt_v3(enum ipa_ip_type ip)
+{
+	struct ipahal_fltrt_alloc_imgs_params alloc_params;
+	int rc = 0;
+	struct ipa3_desc *desc;
+	struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
+	struct ipahal_imm_cmd_dma_shared_mem mem_cmd = {0};
+	struct ipahal_imm_cmd_pyld **cmd_pyld;
+	int num_cmd = 0;
+	int i;
+	int hdr_idx;
+	u32 lcl_hash_hdr, lcl_nhash_hdr;
+	u32 lcl_hash_bdy, lcl_nhash_bdy;
+	bool lcl_hash, lcl_nhash;
+	struct ipahal_reg_fltrt_hash_flush flush;
+	struct ipahal_reg_valmask valmask;
+	u32 tbl_hdr_width;
+	struct ipa3_flt_tbl *tbl;
+
+	tbl_hdr_width = ipahal_get_hw_tbl_hdr_width();
+	memset(&alloc_params, 0, sizeof(alloc_params));
+	alloc_params.ipt = ip;
+	alloc_params.tbls_num = ipa3_ctx->ep_flt_num;
+
+	if (ip == IPA_IP_v4) {
+		lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v4_flt_hash_ofst) +
+			tbl_hdr_width; /* to skip the bitmap */
+		lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v4_flt_nhash_ofst) +
+			tbl_hdr_width; /* to skip the bitmap */
+		lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v4_flt_hash_ofst);
+		lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v4_flt_nhash_ofst);
+		lcl_hash = ipa3_ctx->ip4_flt_tbl_hash_lcl;
+		lcl_nhash = ipa3_ctx->ip4_flt_tbl_nhash_lcl;
+	} else {
+		lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v6_flt_hash_ofst) +
+			tbl_hdr_width; /* to skip the bitmap */
+		lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v6_flt_nhash_ofst) +
+			tbl_hdr_width; /* to skip the bitmap */
+		lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v6_flt_hash_ofst);
+		lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v6_flt_nhash_ofst);
+		lcl_hash = ipa3_ctx->ip6_flt_tbl_hash_lcl;
+		lcl_nhash = ipa3_ctx->ip6_flt_tbl_nhash_lcl;
+	}
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i))
+			continue;
+		tbl = &ipa3_ctx->flt_tbl[i][ip];
+		if (ipa_prep_flt_tbl_for_cmt(ip, tbl, i)) {
+			rc = -EPERM;
+			goto prep_failed;
+		}
+		if (!tbl->in_sys[IPA_RULE_HASHABLE] &&
+			tbl->sz[IPA_RULE_HASHABLE]) {
+			alloc_params.num_lcl_hash_tbls++;
+			alloc_params.total_sz_lcl_hash_tbls +=
+				tbl->sz[IPA_RULE_HASHABLE];
+			alloc_params.total_sz_lcl_hash_tbls -= tbl_hdr_width;
+
+		}
+		if (!tbl->in_sys[IPA_RULE_NON_HASHABLE] &&
+			tbl->sz[IPA_RULE_NON_HASHABLE]) {
+			alloc_params.num_lcl_nhash_tbls++;
+			alloc_params.total_sz_lcl_nhash_tbls +=
+				tbl->sz[IPA_RULE_NON_HASHABLE];
+			alloc_params.total_sz_lcl_nhash_tbls -= tbl_hdr_width;
+		}
+	}
+
+	if (ipa_generate_flt_hw_tbl_img(ip, &alloc_params)) {
+		IPAERR("fail to generate FLT HW TBL image. IP %d\n", ip);
+		rc = -EFAULT;
+		goto prep_failed;
+	}
+
+	if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE,
+		&alloc_params.hash_bdy)) {
+		rc = -EFAULT;
+		goto fail_size_valid;
+	}
+	if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE,
+		&alloc_params.nhash_bdy)) {
+		rc = -EFAULT;
+		goto fail_size_valid;
+	}
+
+	if (ipa_flt_alloc_cmd_buffers(ip, &desc, &cmd_pyld)) {
+		rc = -ENOMEM;
+		goto fail_size_valid;
+	}
+
+	/* flushing ipa internal hashable flt rules cache */
+	memset(&flush, 0, sizeof(flush));
+	if (ip == IPA_IP_v4)
+		flush.v4_flt = true;
+	else
+		flush.v6_flt = true;
+	ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
+	reg_write_cmd.skip_pipeline_clear = false;
+	reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
+	reg_write_cmd.value = valmask.val;
+	reg_write_cmd.value_mask = valmask.mask;
+	cmd_pyld[0] = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_REGISTER_WRITE, &reg_write_cmd, false);
+	if (!cmd_pyld[0]) {
+		IPAERR("fail construct register_write imm cmd: IP %d\n", ip);
+		rc = -EFAULT;
+		goto fail_reg_write_construct;
+	}
+	desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+	desc[0].pyld = cmd_pyld[0]->data;
+	desc[0].len = cmd_pyld[0]->len;
+	desc[0].type = IPA_IMM_CMD_DESC;
+	num_cmd++;
+
+	hdr_idx = 0;
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i)) {
+			IPADBG_LOW("skip %d - not filtering pipe\n", i);
+			continue;
+		}
+
+		if (ipa_flt_skip_pipe_config(i)) {
+			hdr_idx++;
+			continue;
+		}
+
+		IPADBG_LOW("Prepare imm cmd for hdr at index %d for pipe %d\n",
+			hdr_idx, i);
+
+		mem_cmd.is_read = false;
+		mem_cmd.skip_pipeline_clear = false;
+		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		mem_cmd.size = tbl_hdr_width;
+		mem_cmd.system_addr = alloc_params.nhash_hdr.phys_base +
+			hdr_idx * tbl_hdr_width;
+		mem_cmd.local_addr = lcl_nhash_hdr +
+			hdr_idx * tbl_hdr_width;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
+				ip);
+			goto fail_imm_cmd_construct;
+		}
+		desc[num_cmd].opcode =
+			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+		desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+		desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+		desc[num_cmd++].type = IPA_IMM_CMD_DESC;
+
+		mem_cmd.is_read = false;
+		mem_cmd.skip_pipeline_clear = false;
+		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		mem_cmd.size = tbl_hdr_width;
+		mem_cmd.system_addr = alloc_params.hash_hdr.phys_base +
+			hdr_idx * tbl_hdr_width;
+		mem_cmd.local_addr = lcl_hash_hdr +
+			hdr_idx * tbl_hdr_width;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
+				ip);
+			goto fail_imm_cmd_construct;
+		}
+		desc[num_cmd].opcode =
+			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+		desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+		desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+		desc[num_cmd++].type = IPA_IMM_CMD_DESC;
+
+		hdr_idx++;
+	}
+
+	if (lcl_nhash) {
+		mem_cmd.is_read = false;
+		mem_cmd.skip_pipeline_clear = false;
+		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		mem_cmd.size = alloc_params.nhash_bdy.size;
+		mem_cmd.system_addr = alloc_params.nhash_bdy.phys_base;
+		mem_cmd.local_addr = lcl_nhash_bdy;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
+				ip);
+			goto fail_imm_cmd_construct;
+		}
+		desc[num_cmd].opcode =
+			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+		desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+		desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+		desc[num_cmd++].type = IPA_IMM_CMD_DESC;
+	}
+	if (lcl_hash) {
+		mem_cmd.is_read = false;
+		mem_cmd.skip_pipeline_clear = false;
+		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		mem_cmd.size = alloc_params.hash_bdy.size;
+		mem_cmd.system_addr = alloc_params.hash_bdy.phys_base;
+		mem_cmd.local_addr = lcl_hash_bdy;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
+				ip);
+			goto fail_imm_cmd_construct;
+		}
+		desc[num_cmd].opcode =
+			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+		desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+		desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+		desc[num_cmd++].type = IPA_IMM_CMD_DESC;
+	}
+
+	if (ipa3_send_cmd(num_cmd, desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+		goto fail_imm_cmd_construct;
+	}
+
+	IPADBG_LOW("Hashable HEAD\n");
+	IPA_DUMP_BUFF(alloc_params.hash_hdr.base,
+		alloc_params.hash_hdr.phys_base, alloc_params.hash_hdr.size);
+
+	IPADBG_LOW("Non-Hashable HEAD\n");
+	IPA_DUMP_BUFF(alloc_params.nhash_hdr.base,
+		alloc_params.nhash_hdr.phys_base, alloc_params.nhash_hdr.size);
+
+	if (alloc_params.hash_bdy.size) {
+		IPADBG_LOW("Hashable BODY\n");
+		IPA_DUMP_BUFF(alloc_params.hash_bdy.base,
+			alloc_params.hash_bdy.phys_base,
+			alloc_params.hash_bdy.size);
+	}
+
+	if (alloc_params.nhash_bdy.size) {
+		IPADBG_LOW("Non-Hashable BODY\n");
+		IPA_DUMP_BUFF(alloc_params.nhash_bdy.base,
+			alloc_params.nhash_bdy.phys_base,
+			alloc_params.nhash_bdy.size);
+	}
+
+	__ipa_reap_sys_flt_tbls(ip, IPA_RULE_HASHABLE);
+	__ipa_reap_sys_flt_tbls(ip, IPA_RULE_NON_HASHABLE);
+
+fail_imm_cmd_construct:
+	for (i = 0 ; i < num_cmd ; i++)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
+fail_reg_write_construct:
+	kfree(desc);
+	kfree(cmd_pyld);
+fail_size_valid:
+	if (alloc_params.hash_hdr.size)
+		ipahal_free_dma_mem(&alloc_params.hash_hdr);
+	ipahal_free_dma_mem(&alloc_params.nhash_hdr);
+	if (alloc_params.hash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params.hash_bdy);
+	if (alloc_params.nhash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params.nhash_bdy);
+prep_failed:
+	return rc;
+}
+
+static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule,
+		struct ipa3_rt_tbl **rt_tbl, enum ipa_ip_type ip)
+{
+	if (rule->action != IPA_PASS_TO_EXCEPTION) {
+		if (!rule->eq_attrib_type) {
+			if (!rule->rt_tbl_hdl) {
+				IPAERR("invalid RT tbl\n");
+				goto error;
+			}
+
+			*rt_tbl = ipa3_id_find(rule->rt_tbl_hdl);
+			if (*rt_tbl == NULL) {
+				IPAERR("RT tbl not found\n");
+				goto error;
+			}
+
+			if ((*rt_tbl)->cookie != IPA_COOKIE) {
+				IPAERR("RT table cookie is invalid\n");
+				goto error;
+			}
+		} else {
+			if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ?
+				IPA_MEM_PART(v4_modem_rt_index_hi) :
+				IPA_MEM_PART(v6_modem_rt_index_hi))) {
+				IPAERR("invalid RT tbl\n");
+				goto error;
+			}
+		}
+	}
+
+	if (rule->rule_id) {
+		if (!(rule->rule_id & ipahal_get_rule_id_hi_bit())) {
+			IPAERR("invalid rule_id provided 0x%x\n"
+				"rule_id with bit 0x%x are auto generated\n",
+				rule->rule_id, ipahal_get_rule_id_hi_bit());
+			goto error;
+		}
+	}
+
+	return 0;
+
+error:
+	return -EPERM;
+}
+
+static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry,
+		const struct ipa_flt_rule *rule, struct ipa3_rt_tbl *rt_tbl,
+		struct ipa3_flt_tbl *tbl)
+{
+	int id;
+
+	*entry = kmem_cache_zalloc(ipa3_ctx->flt_rule_cache, GFP_KERNEL);
+	if (!*entry) {
+		IPAERR("failed to alloc FLT rule object\n");
+		goto error;
+	}
+	INIT_LIST_HEAD(&((*entry)->link));
+	(*entry)->rule = *rule;
+	(*entry)->cookie = IPA_COOKIE;
+	(*entry)->rt_tbl = rt_tbl;
+	(*entry)->tbl = tbl;
+	if (rule->rule_id) {
+		id = rule->rule_id;
+	} else {
+		id = ipa3_alloc_rule_id(&tbl->rule_ids);
+		if (id < 0) {
+			IPAERR("failed to allocate rule id\n");
+			WARN_ON(1);
+			goto rule_id_fail;
+		}
+	}
+	(*entry)->rule_id = id;
+
+	return 0;
+
+rule_id_fail:
+	kmem_cache_free(ipa3_ctx->flt_rule_cache, *entry);
+error:
+	return -EPERM;
+}
+
+static int __ipa_finish_flt_rule_add(struct ipa3_flt_tbl *tbl,
+		struct ipa3_flt_entry *entry, u32 *rule_hdl)
+{
+	int id;
+
+	tbl->rule_cnt++;
+	if (entry->rt_tbl)
+		entry->rt_tbl->ref_cnt++;
+	id = ipa3_id_alloc(entry);
+	if (id < 0) {
+		IPAERR("failed to add to tree\n");
+		WARN_ON(1);
+	}
+	*rule_hdl = id;
+	entry->id = id;
+	IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
+
+	return 0;
+}
+
+static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip,
+			      const struct ipa_flt_rule *rule, u8 add_rear,
+			      u32 *rule_hdl)
+{
+	struct ipa3_flt_entry *entry;
+	struct ipa3_rt_tbl *rt_tbl = NULL;
+
+	if (__ipa_validate_flt_rule(rule, &rt_tbl, ip))
+		goto error;
+
+	if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl))
+		goto error;
+
+	if (add_rear) {
+		if (tbl->sticky_rear)
+			list_add_tail(&entry->link,
+					tbl->head_flt_rule_list.prev);
+		else
+			list_add_tail(&entry->link, &tbl->head_flt_rule_list);
+	} else {
+		list_add(&entry->link, &tbl->head_flt_rule_list);
+	}
+
+	__ipa_finish_flt_rule_add(tbl, entry, rule_hdl);
+
+	return 0;
+
+error:
+	return -EPERM;
+}
+
+static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl,
+				const struct ipa_flt_rule *rule,
+				u32 *rule_hdl,
+				enum ipa_ip_type ip,
+				struct ipa3_flt_entry **add_after_entry)
+{
+	struct ipa3_flt_entry *entry;
+	struct ipa3_rt_tbl *rt_tbl = NULL;
+
+	if (!*add_after_entry)
+		goto error;
+
+	if (rule == NULL || rule_hdl == NULL) {
+		IPAERR("bad parms rule=%p rule_hdl=%p\n", rule,
+				rule_hdl);
+		goto error;
+	}
+
+	if (__ipa_validate_flt_rule(rule, &rt_tbl, ip))
+		goto error;
+
+	if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl))
+		goto error;
+
+	list_add(&entry->link, &((*add_after_entry)->link));
+
+	__ipa_finish_flt_rule_add(tbl, entry, rule_hdl);
+
+	/*
+	 * prepare for next insertion
+	 */
+	*add_after_entry = entry;
+
+	return 0;
+
+error:
+	*add_after_entry = NULL;
+	return -EPERM;
+}
+
+static int __ipa_del_flt_rule(u32 rule_hdl)
+{
+	struct ipa3_flt_entry *entry;
+	int id;
+
+	entry = ipa3_id_find(rule_hdl);
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		return -EINVAL;
+	}
+
+	if (entry->cookie != IPA_COOKIE) {
+		IPAERR("bad params\n");
+		return -EINVAL;
+	}
+	id = entry->id;
+
+	list_del(&entry->link);
+	entry->tbl->rule_cnt--;
+	if (entry->rt_tbl)
+		entry->rt_tbl->ref_cnt--;
+	IPADBG("del flt rule rule_cnt=%d rule_id=%d\n",
+		entry->tbl->rule_cnt, entry->rule_id);
+	entry->cookie = 0;
+	/* if rule id was allocated from idr, remove it */
+	if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
+		idr_remove(&entry->tbl->rule_ids, entry->rule_id);
+
+	kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
+
+	/* remove the handle from the database */
+	ipa3_id_remove(id);
+
+	return 0;
+}
+
+static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule,
+		enum ipa_ip_type ip)
+{
+	struct ipa3_flt_entry *entry;
+	struct ipa3_rt_tbl *rt_tbl = NULL;
+
+	entry = ipa3_id_find(frule->rule_hdl);
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		goto error;
+	}
+
+	if (entry->cookie != IPA_COOKIE) {
+		IPAERR("bad params\n");
+		goto error;
+	}
+
+	if (entry->rt_tbl)
+		entry->rt_tbl->ref_cnt--;
+
+	if (frule->rule.action != IPA_PASS_TO_EXCEPTION) {
+		if (!frule->rule.eq_attrib_type) {
+			if (!frule->rule.rt_tbl_hdl) {
+				IPAERR("invalid RT tbl\n");
+				goto error;
+			}
+
+			rt_tbl = ipa3_id_find(frule->rule.rt_tbl_hdl);
+			if (rt_tbl == NULL) {
+				IPAERR("RT tbl not found\n");
+				goto error;
+			}
+
+			if (rt_tbl->cookie != IPA_COOKIE) {
+				IPAERR("RT table cookie is invalid\n");
+				goto error;
+			}
+		} else {
+			if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ?
+				IPA_MEM_PART(v4_modem_rt_index_hi) :
+				IPA_MEM_PART(v6_modem_rt_index_hi))) {
+				IPAERR("invalid RT tbl\n");
+				goto error;
+			}
+		}
+	}
+
+	entry->rule = frule->rule;
+	entry->rt_tbl = rt_tbl;
+	if (entry->rt_tbl)
+		entry->rt_tbl->ref_cnt++;
+	entry->hw_len = 0;
+	entry->prio = 0;
+
+	return 0;
+
+error:
+	return -EPERM;
+}
+
+static int __ipa_add_flt_get_ep_idx(enum ipa_client_type ep, int *ipa_ep_idx)
+{
+	*ipa_ep_idx = ipa3_get_ep_mapping(ep);
+	if (*ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND) {
+		IPAERR("ep not valid ep=%d\n", ep);
+		return -EINVAL;
+	}
+	if (ipa3_ctx->ep[*ipa_ep_idx].valid == 0)
+		IPADBG("ep not connected ep_idx=%d\n", *ipa_ep_idx);
+
+	if (!ipa_is_ep_support_flt(*ipa_ep_idx)) {
+		IPAERR("ep do not support filtering ep=%d\n", ep);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
+				 const struct ipa_flt_rule *rule, u8 add_rear,
+				 u32 *rule_hdl)
+{
+	struct ipa3_flt_tbl *tbl;
+	int ipa_ep_idx;
+
+	if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
+		IPAERR("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
+				rule_hdl, ep);
+
+		return -EINVAL;
+	}
+
+	if (__ipa_add_flt_get_ep_idx(ep, &ipa_ep_idx))
+		return -EINVAL;
+
+	tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][ip];
+	IPADBG_LOW("add ep flt rule ip=%d ep=%d\n", ip, ep);
+
+	return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
+}
+
+/**
+ * ipa3_add_flt_rule() - Add the specified filtering rules to SW and optionally
+ * commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
+{
+	int i;
+	int result;
+
+	if (rules == NULL || rules->num_rules == 0 ||
+			rules->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < rules->num_rules; i++) {
+		if (!rules->global)
+			result = __ipa_add_ep_flt_rule(rules->ip, rules->ep,
+					&rules->rules[i].rule,
+					rules->rules[i].at_rear,
+					&rules->rules[i].flt_rule_hdl);
+		else
+			result = -1;
+
+		if (result) {
+			IPAERR("failed to add flt rule %d\n", i);
+			rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
+		} else {
+			rules->rules[i].status = 0;
+		}
+	}
+
+	if (rules->global) {
+		IPAERR("no support for global filter rules\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_add_flt_rule_after() - Add the specified filtering rules to SW after
+ *  the rule which its handle is given and optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
+{
+	int i;
+	int result;
+	struct ipa3_flt_tbl *tbl;
+	int ipa_ep_idx;
+	struct ipa3_flt_entry *entry;
+
+	if (rules == NULL || rules->num_rules == 0 ||
+			rules->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	if (rules->ep >= IPA_CLIENT_MAX) {
+		IPAERR("bad parms ep=%d\n", rules->ep);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	if (__ipa_add_flt_get_ep_idx(rules->ep, &ipa_ep_idx)) {
+		result = -EINVAL;
+		goto bail;
+	}
+
+	tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][rules->ip];
+
+	entry = ipa3_id_find(rules->add_after_hdl);
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (entry->tbl != tbl) {
+		IPAERR("given entry does not match the table\n");
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (tbl->sticky_rear)
+		if (&entry->link == tbl->head_flt_rule_list.prev) {
+			IPAERR("cannot add rule at end of a sticky table");
+			result = -EINVAL;
+			goto bail;
+		}
+
+	IPADBG("add ep flt rule ip=%d ep=%d after hdl %d\n",
+			rules->ip, rules->ep, rules->add_after_hdl);
+
+	/*
+	 * we add all rules one after the other, if one insertion fails, it cuts
+	 * the chain (all following will receive fail status) following calls to
+	 * __ipa_add_flt_rule_after will fail (entry == NULL)
+	 */
+
+	for (i = 0; i < rules->num_rules; i++) {
+		result = __ipa_add_flt_rule_after(tbl,
+				&rules->rules[i].rule,
+				&rules->rules[i].flt_rule_hdl,
+				rules->ip,
+				&entry);
+
+		if (result) {
+			IPAERR("failed to add flt rule %d\n", i);
+			rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
+		} else {
+			rules->rules[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
+			IPAERR("failed to commit flt rules\n");
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_del_flt_rule() - Remove the specified filtering rules from SW and
+ * optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
+{
+	int i;
+	int result;
+
+	if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
+			IPAERR("failed to del flt rule %i\n", i);
+			hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_mdfy_flt_rule() - Modify the specified filtering rules in SW and
+ * optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
+{
+	int i;
+	int result;
+
+	if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_rules; i++) {
+		if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) {
+			IPAERR("failed to mdfy flt rule %i\n", i);
+			hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
+		} else {
+			hdls->rules[i].status = 0;
+		}
+	}
+
+	if (hdls->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+
+/**
+ * ipa3_commit_flt() - Commit the current SW filtering table of specified type
+ * to IPA HW
+ * @ip:	[in] the family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_commit_flt(enum ipa_ip_type ip)
+{
+	int result;
+
+	if (ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	if (ipa3_ctx->ctrl->ipa3_commit_flt(ip)) {
+		result = -EPERM;
+		goto bail;
+	}
+	result = 0;
+
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_reset_flt() - Reset the current SW filtering table of specified type
+ * (does not commit to HW)
+ * @ip:	[in] the family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_reset_flt(enum ipa_ip_type ip)
+{
+	struct ipa3_flt_tbl *tbl;
+	struct ipa3_flt_entry *entry;
+	struct ipa3_flt_entry *next;
+	int i;
+	int id;
+
+	if (ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i))
+			continue;
+
+		tbl = &ipa3_ctx->flt_tbl[i][ip];
+		list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list,
+				link) {
+			if (ipa3_id_find(entry->id) == NULL) {
+				WARN_ON(1);
+				mutex_unlock(&ipa3_ctx->lock);
+				return -EFAULT;
+			}
+			list_del(&entry->link);
+			entry->tbl->rule_cnt--;
+			if (entry->rt_tbl)
+				entry->rt_tbl->ref_cnt--;
+			/* if rule id was allocated from idr, remove it */
+			if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
+				idr_remove(&entry->tbl->rule_ids,
+					entry->rule_id);
+			entry->cookie = 0;
+			id = entry->id;
+			kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
+
+			/* remove the handle from the database */
+			ipa3_id_remove(id);
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return 0;
+}
+
+void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx)
+{
+	struct ipa3_flt_tbl *tbl;
+	struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx];
+	struct ipa_flt_rule rule;
+
+	if (!ipa_is_ep_support_flt(ipa_ep_idx)) {
+		IPADBG("cannot add flt rules to non filtering pipe num %d\n",
+			ipa_ep_idx);
+		return;
+	}
+
+	memset(&rule, 0, sizeof(rule));
+
+	mutex_lock(&ipa3_ctx->lock);
+	tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
+	tbl->sticky_rear = true;
+	rule.action = IPA_PASS_TO_EXCEPTION;
+	__ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, false,
+			&ep->dflt_flt4_rule_hdl);
+	ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
+
+	tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
+	tbl->sticky_rear = true;
+	rule.action = IPA_PASS_TO_EXCEPTION;
+	__ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, false,
+			&ep->dflt_flt6_rule_hdl);
+	ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
+	mutex_unlock(&ipa3_ctx->lock);
+}
+
+void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx)
+{
+	struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	mutex_lock(&ipa3_ctx->lock);
+	if (ep->dflt_flt4_rule_hdl) {
+		__ipa_del_flt_rule(ep->dflt_flt4_rule_hdl);
+		ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
+		ep->dflt_flt4_rule_hdl = 0;
+	}
+	if (ep->dflt_flt6_rule_hdl) {
+		__ipa_del_flt_rule(ep->dflt_flt6_rule_hdl);
+		ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
+		ep->dflt_flt6_rule_hdl = 0;
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+}
+
+/**
+ * ipa3_set_flt_tuple_mask() - Sets the flt tuple masking for the given pipe
+ *  Pipe must be for AP EP (not modem) and support filtering
+ *  updates the the filtering masking values without changing the rt ones.
+ *
+ * @pipe_idx: filter pipe index to configure the tuple masking
+ * @tuple: the tuple members masking
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple)
+{
+	struct ipahal_reg_fltrt_hash_tuple fltrt_tuple;
+
+	if (!tuple) {
+		IPAERR("bad tuple\n");
+		return -EINVAL;
+	}
+
+	if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
+		IPAERR("bad pipe index!\n");
+		return -EINVAL;
+	}
+
+	if (!ipa_is_ep_support_flt(pipe_idx)) {
+		IPAERR("pipe %d not filtering pipe\n", pipe_idx);
+		return -EINVAL;
+	}
+
+	if (ipa_is_modem_pipe(pipe_idx)) {
+		IPAERR("modem pipe tuple is not configured by AP\n");
+		return -EINVAL;
+	}
+
+	ipahal_read_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+		pipe_idx, &fltrt_tuple);
+	fltrt_tuple.flt = *tuple;
+	ipahal_write_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+		pipe_idx, &fltrt_tuple);
+
+	return 0;
+}
+
+/**
+ * ipa3_flt_read_tbl_from_hw() -Read filtering table from IPA HW
+ * @pipe_idx: IPA endpoint index
+ * @ip_type: IPv4 or IPv6 table
+ * @hashable: hashable or non-hashable table
+ * @entry: array to fill the table entries
+ * @num_entry: number of entries in entry array. set by the caller to indicate
+ *  entry array size. Then set by this function as an output parameter to
+ *  indicate the number of entries in the array
+ *
+ * This function reads the filtering table from IPA SRAM and prepares an array
+ * of entries. This function is mainly used for debugging purposes.
+ *
+ * If empty table or Modem Apps table, zero entries will be returned.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_flt_read_tbl_from_hw(u32 pipe_idx, enum ipa_ip_type ip_type,
+	bool hashable, struct ipahal_flt_rule_entry entry[], int *num_entry)
+{
+	void *ipa_sram_mmio;
+	u64 hdr_base_ofst;
+	int tbl_entry_idx;
+	int i;
+	int res = 0;
+	u64 tbl_addr;
+	bool is_sys;
+	u8 *rule_addr;
+	struct ipa_mem_buffer *sys_tbl_mem;
+	int rule_idx;
+
+	IPADBG("pipe_idx=%d ip=%d hashable=%d entry=0x%p num_entry=0x%p\n",
+		pipe_idx, ip_type, hashable, entry, num_entry);
+
+	if (pipe_idx >= ipa3_ctx->ipa_num_pipes || ip_type >= IPA_IP_MAX ||
+	    !entry || !num_entry) {
+		IPAERR("Invalid params\n");
+		return -EFAULT;
+	}
+
+	if (!ipa_is_ep_support_flt(pipe_idx)) {
+		IPAERR("pipe %d does not support filtering\n", pipe_idx);
+		return -EINVAL;
+	}
+
+	/* map IPA SRAM */
+	ipa_sram_mmio = ioremap(ipa3_ctx->ipa_wrapper_base +
+		ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
+			ipa3_ctx->smem_restricted_bytes / 4),
+		ipa3_ctx->smem_sz);
+	if (!ipa_sram_mmio) {
+		IPAERR("fail to ioremap IPA SRAM\n");
+		return -ENOMEM;
+	}
+
+	memset(entry, 0, sizeof(*entry) * (*num_entry));
+	if (hashable) {
+		if (ip_type == IPA_IP_v4)
+			hdr_base_ofst =
+				IPA_MEM_PART(v4_flt_hash_ofst);
+		else
+			hdr_base_ofst =
+				IPA_MEM_PART(v6_flt_hash_ofst);
+	} else {
+		if (ip_type == IPA_IP_v4)
+			hdr_base_ofst =
+				IPA_MEM_PART(v4_flt_nhash_ofst);
+		else
+			hdr_base_ofst =
+				IPA_MEM_PART(v6_flt_nhash_ofst);
+	}
+
+	/* calculate the index of the tbl entry */
+	tbl_entry_idx = 1; /* skip the bitmap */
+	for (i = 0; i < pipe_idx; i++)
+		if (ipa3_ctx->ep_flt_bitmap & (1 << i))
+			tbl_entry_idx++;
+
+	IPADBG("hdr_base_ofst=0x%llx tbl_entry_idx=%d\n",
+		hdr_base_ofst, tbl_entry_idx);
+
+	res = ipahal_fltrt_read_addr_from_hdr(ipa_sram_mmio + hdr_base_ofst,
+		tbl_entry_idx, &tbl_addr, &is_sys);
+	if (res) {
+		IPAERR("failed to read table address from header structure\n");
+		goto bail;
+	}
+	IPADBG("flt tbl ep=%d: tbl_addr=0x%llx is_sys=%d\n",
+		pipe_idx, tbl_addr, is_sys);
+	if (!tbl_addr) {
+		IPAERR("invalid flt tbl addr\n");
+		res = -EFAULT;
+		goto bail;
+	}
+
+	/* for tables resides in DDR access it from the virtual memory */
+	if (is_sys) {
+		sys_tbl_mem = &ipa3_ctx->flt_tbl[pipe_idx][ip_type].
+			curr_mem[hashable ? IPA_RULE_HASHABLE :
+				IPA_RULE_NON_HASHABLE];
+		if (sys_tbl_mem->phys_base &&
+			sys_tbl_mem->phys_base != tbl_addr) {
+			IPAERR("mismatch addr: parsed=%llx sw=%pad\n",
+				tbl_addr, &sys_tbl_mem->phys_base);
+		}
+		if (sys_tbl_mem->phys_base)
+			rule_addr = sys_tbl_mem->base;
+		else
+			rule_addr = NULL;
+	} else {
+		rule_addr = ipa_sram_mmio + hdr_base_ofst + tbl_addr;
+	}
+
+	IPADBG("First rule addr 0x%p\n", rule_addr);
+
+	if (!rule_addr) {
+		/* Modem table in system memory or empty table */
+		*num_entry = 0;
+		goto bail;
+	}
+
+	rule_idx = 0;
+	while (rule_idx < *num_entry) {
+		res = ipahal_flt_parse_hw_rule(rule_addr, &entry[rule_idx]);
+		if (res) {
+			IPAERR("failed parsing flt rule\n");
+			goto bail;
+		}
+
+		IPADBG("rule_size=%d\n", entry[rule_idx].rule_size);
+		if (!entry[rule_idx].rule_size)
+			break;
+
+		rule_addr += entry[rule_idx].rule_size;
+		rule_idx++;
+	}
+	*num_entry = rule_idx;
+bail:
+	iounmap(ipa_sram_mmio);
+	return 0;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
new file mode 100644
index 0000000..da52b26
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -0,0 +1,1173 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+
+static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 60};
+static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64};
+
+#define HDR_TYPE_IS_VALID(type) \
+	((type) >= 0 && (type) < IPA_HDR_L2_MAX)
+
+#define HDR_PROC_TYPE_IS_VALID(type) \
+	((type) >= 0 && (type) < IPA_HDR_PROC_MAX)
+
+/**
+ * ipa3_generate_hdr_hw_tbl() - generates the headers table
+ * @mem:	[out] buffer to put the header table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+static int ipa3_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
+{
+	struct ipa3_hdr_entry *entry;
+
+	mem->size = ipa3_ctx->hdr_tbl.end;
+
+	if (mem->size == 0) {
+		IPAERR("hdr tbl empty\n");
+		return -EPERM;
+	}
+	IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_tbl.end);
+
+	mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
+			&mem->phys_base, GFP_KERNEL);
+	if (!mem->base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+		return -ENOMEM;
+	}
+
+	memset(mem->base, 0, mem->size);
+	list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
+			link) {
+		if (entry->is_hdr_proc_ctx)
+			continue;
+		IPADBG_LOW("hdr of len %d ofst=%d\n", entry->hdr_len,
+				entry->offset_entry->offset);
+		ipahal_cp_hdr_to_hw_buff(mem->base, entry->offset_entry->offset,
+				entry->hdr, entry->hdr_len);
+	}
+
+	return 0;
+}
+
+static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
+	u32 hdr_base_addr)
+{
+	struct ipa3_hdr_proc_ctx_entry *entry;
+	int ret;
+
+	list_for_each_entry(entry,
+			&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
+			link) {
+		IPADBG_LOW("processing type %d ofst=%d\n",
+			entry->type, entry->offset_entry->offset);
+		ret = ipahal_cp_proc_ctx_to_hw_buff(entry->type, mem->base,
+				entry->offset_entry->offset,
+				entry->hdr->hdr_len,
+				entry->hdr->is_hdr_proc_ctx,
+				entry->hdr->phys_base,
+				hdr_base_addr,
+				entry->hdr->offset_entry);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_generate_hdr_proc_ctx_hw_tbl() -
+ * generates the headers processing context table.
+ * @mem:		[out] buffer to put the processing context table
+ * @aligned_mem:	[out] actual processing context table (with alignment).
+ *			Processing context table needs to be 8 Bytes aligned.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+static int ipa3_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr,
+	struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
+{
+	u32 hdr_base_addr;
+
+	mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4;
+
+	/* make sure table is aligned */
+	mem->size += IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
+
+	IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end);
+
+	mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
+			&mem->phys_base, GFP_KERNEL);
+	if (!mem->base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+		return -ENOMEM;
+	}
+
+	aligned_mem->phys_base =
+		IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(mem->phys_base);
+	aligned_mem->base = mem->base +
+		(aligned_mem->phys_base - mem->phys_base);
+	aligned_mem->size = mem->size - IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
+	memset(aligned_mem->base, 0, aligned_mem->size);
+	hdr_base_addr = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_ofst) :
+		hdr_sys_addr;
+	return ipa3_hdr_proc_ctx_to_hw_format(aligned_mem, hdr_base_addr);
+}
+
+/**
+ * __ipa_commit_hdr_v3_0() - Commits the header table from memory to HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int __ipa_commit_hdr_v3_0(void)
+{
+	struct ipa3_desc desc[2];
+	struct ipa_mem_buffer hdr_mem;
+	struct ipa_mem_buffer ctx_mem;
+	struct ipa_mem_buffer aligned_ctx_mem;
+	struct ipahal_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
+	struct ipahal_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
+	struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
+	struct ipahal_imm_cmd_hdr_init_system hdr_init_cmd = {0};
+	struct ipahal_imm_cmd_pyld *hdr_cmd_pyld = NULL;
+	struct ipahal_imm_cmd_pyld *ctx_cmd_pyld = NULL;
+	int rc = -EFAULT;
+	u32 proc_ctx_size;
+	u32 proc_ctx_ofst;
+	u32 proc_ctx_size_ddr;
+
+	memset(desc, 0, 2 * sizeof(struct ipa3_desc));
+
+	if (ipa3_generate_hdr_hw_tbl(&hdr_mem)) {
+		IPAERR("fail to generate HDR HW TBL\n");
+		goto end;
+	}
+
+	if (ipa3_generate_hdr_proc_ctx_hw_tbl(hdr_mem.phys_base, &ctx_mem,
+	    &aligned_ctx_mem)) {
+		IPAERR("fail to generate HDR PROC CTX HW TBL\n");
+		goto end;
+	}
+
+	if (ipa3_ctx->hdr_tbl_lcl) {
+		if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) {
+			IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
+				IPA_MEM_PART(apps_hdr_size));
+			goto end;
+		} else {
+			dma_cmd_hdr.is_read = false; /* write operation */
+			dma_cmd_hdr.skip_pipeline_clear = false;
+			dma_cmd_hdr.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+			dma_cmd_hdr.system_addr = hdr_mem.phys_base;
+			dma_cmd_hdr.size = hdr_mem.size;
+			dma_cmd_hdr.local_addr =
+				ipa3_ctx->smem_restricted_bytes +
+				IPA_MEM_PART(apps_hdr_ofst);
+			hdr_cmd_pyld = ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_DMA_SHARED_MEM,
+				&dma_cmd_hdr, false);
+			if (!hdr_cmd_pyld) {
+				IPAERR("fail construct dma_shared_mem cmd\n");
+				goto end;
+			}
+			desc[0].opcode = ipahal_imm_cmd_get_opcode(
+				IPA_IMM_CMD_DMA_SHARED_MEM);
+			desc[0].pyld = hdr_cmd_pyld->data;
+			desc[0].len = hdr_cmd_pyld->len;
+		}
+	} else {
+		if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
+			IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
+				IPA_MEM_PART(apps_hdr_size_ddr));
+			goto end;
+		} else {
+			hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base;
+			hdr_cmd_pyld = ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_HDR_INIT_SYSTEM,
+				&hdr_init_cmd, false);
+			if (!hdr_cmd_pyld) {
+				IPAERR("fail construct hdr_init_system cmd\n");
+				goto end;
+			}
+			desc[0].opcode = ipahal_imm_cmd_get_opcode(
+				IPA_IMM_CMD_HDR_INIT_SYSTEM);
+			desc[0].pyld = hdr_cmd_pyld->data;
+			desc[0].len = hdr_cmd_pyld->len;
+		}
+	}
+	desc[0].type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
+
+	proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size);
+	proc_ctx_ofst = IPA_MEM_PART(apps_hdr_proc_ctx_ofst);
+	if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
+		if (aligned_ctx_mem.size > proc_ctx_size) {
+			IPAERR("tbl too big needed %d avail %d\n",
+				aligned_ctx_mem.size,
+				proc_ctx_size);
+			goto end;
+		} else {
+			dma_cmd_ctx.is_read = false; /* Write operation */
+			dma_cmd_ctx.skip_pipeline_clear = false;
+			dma_cmd_ctx.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+			dma_cmd_ctx.system_addr = aligned_ctx_mem.phys_base;
+			dma_cmd_ctx.size = aligned_ctx_mem.size;
+			dma_cmd_ctx.local_addr =
+				ipa3_ctx->smem_restricted_bytes +
+				proc_ctx_ofst;
+			ctx_cmd_pyld = ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_DMA_SHARED_MEM,
+				&dma_cmd_ctx, false);
+			if (!ctx_cmd_pyld) {
+				IPAERR("fail construct dma_shared_mem cmd\n");
+				goto end;
+			}
+			desc[1].opcode = ipahal_imm_cmd_get_opcode(
+				IPA_IMM_CMD_DMA_SHARED_MEM);
+			desc[1].pyld = ctx_cmd_pyld->data;
+			desc[1].len = ctx_cmd_pyld->len;
+		}
+	} else {
+		proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
+		if (aligned_ctx_mem.size > proc_ctx_size_ddr) {
+			IPAERR("tbl too big, needed %d avail %d\n",
+				aligned_ctx_mem.size,
+				proc_ctx_size_ddr);
+			goto end;
+		} else {
+			reg_write_cmd.skip_pipeline_clear = false;
+			reg_write_cmd.pipeline_clear_options =
+				IPAHAL_HPS_CLEAR;
+			reg_write_cmd.offset =
+				ipahal_get_reg_ofst(
+				IPA_SYS_PKT_PROC_CNTXT_BASE);
+			reg_write_cmd.value = aligned_ctx_mem.phys_base;
+			reg_write_cmd.value_mask =
+				~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1);
+			ctx_cmd_pyld = ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_REGISTER_WRITE,
+				&reg_write_cmd, false);
+			if (!ctx_cmd_pyld) {
+				IPAERR("fail construct register_write cmd\n");
+				goto end;
+			}
+			desc[1].opcode = ipahal_imm_cmd_get_opcode(
+				IPA_IMM_CMD_REGISTER_WRITE);
+			desc[1].pyld = ctx_cmd_pyld->data;
+			desc[1].len = ctx_cmd_pyld->len;
+		}
+	}
+	desc[1].type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(ctx_mem.base, ctx_mem.phys_base, ctx_mem.size);
+
+	if (ipa3_send_cmd(2, desc))
+		IPAERR("fail to send immediate command\n");
+	else
+		rc = 0;
+
+	if (ipa3_ctx->hdr_tbl_lcl) {
+		dma_free_coherent(ipa3_ctx->pdev, hdr_mem.size, hdr_mem.base,
+			hdr_mem.phys_base);
+	} else {
+		if (!rc) {
+			if (ipa3_ctx->hdr_mem.phys_base)
+				dma_free_coherent(ipa3_ctx->pdev,
+				ipa3_ctx->hdr_mem.size,
+				ipa3_ctx->hdr_mem.base,
+				ipa3_ctx->hdr_mem.phys_base);
+			ipa3_ctx->hdr_mem = hdr_mem;
+		}
+	}
+
+	if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
+		dma_free_coherent(ipa3_ctx->pdev, ctx_mem.size, ctx_mem.base,
+			ctx_mem.phys_base);
+	} else {
+		if (!rc) {
+			if (ipa3_ctx->hdr_proc_ctx_mem.phys_base)
+				dma_free_coherent(ipa3_ctx->pdev,
+					ipa3_ctx->hdr_proc_ctx_mem.size,
+					ipa3_ctx->hdr_proc_ctx_mem.base,
+					ipa3_ctx->hdr_proc_ctx_mem.phys_base);
+			ipa3_ctx->hdr_proc_ctx_mem = ctx_mem;
+		}
+	}
+
+end:
+	if (ctx_cmd_pyld)
+		ipahal_destroy_imm_cmd(ctx_cmd_pyld);
+
+	if (hdr_cmd_pyld)
+		ipahal_destroy_imm_cmd(hdr_cmd_pyld);
+
+	return rc;
+}
+
+static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
+	bool add_ref_hdr)
+{
+	struct ipa3_hdr_entry *hdr_entry;
+	struct ipa3_hdr_proc_ctx_entry *entry;
+	struct ipa3_hdr_proc_ctx_offset_entry *offset;
+	u32 bin;
+	struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
+	int id;
+	int needed_len;
+	int mem_size;
+
+	IPADBG_LOW("processing type %d hdr_hdl %d\n",
+		proc_ctx->type, proc_ctx->hdr_hdl);
+
+	if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
+		IPAERR("invalid processing type %d\n", proc_ctx->type);
+		return -EINVAL;
+	}
+
+	hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl);
+	if (!hdr_entry || (hdr_entry->cookie != IPA_COOKIE)) {
+		IPAERR("hdr_hdl is invalid\n");
+		return -EINVAL;
+	}
+
+	entry = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_cache, GFP_KERNEL);
+	if (!entry) {
+		IPAERR("failed to alloc proc_ctx object\n");
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&entry->link);
+
+	entry->type = proc_ctx->type;
+	entry->hdr = hdr_entry;
+	if (add_ref_hdr)
+		hdr_entry->ref_cnt++;
+	entry->cookie = IPA_COOKIE;
+
+	needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type);
+
+	if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0]) {
+		bin = IPA_HDR_PROC_CTX_BIN0;
+	} else if (needed_len <=
+			ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
+		bin = IPA_HDR_PROC_CTX_BIN1;
+	} else {
+		IPAERR("unexpected needed len %d\n", needed_len);
+		WARN_ON(1);
+		goto bad_len;
+	}
+
+	mem_size = (ipa3_ctx->hdr_proc_ctx_tbl_lcl) ?
+		IPA_MEM_PART(apps_hdr_proc_ctx_size) :
+		IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
+	if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
+		IPAERR("hdr proc ctx table overflow\n");
+		goto bad_len;
+	}
+
+	if (list_empty(&htbl->head_free_offset_list[bin])) {
+		offset = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_offset_cache,
+					   GFP_KERNEL);
+		if (!offset) {
+			IPAERR("failed to alloc offset object\n");
+			goto bad_len;
+		}
+		INIT_LIST_HEAD(&offset->link);
+		/*
+		 * for a first item grow, set the bin and offset which are set
+		 * in stone
+		 */
+		offset->offset = htbl->end;
+		offset->bin = bin;
+		htbl->end += ipa_hdr_proc_ctx_bin_sz[bin];
+		list_add(&offset->link,
+				&htbl->head_offset_list[bin]);
+	} else {
+		/* get the first free slot */
+		offset =
+		    list_first_entry(&htbl->head_free_offset_list[bin],
+				struct ipa3_hdr_proc_ctx_offset_entry, link);
+		list_move(&offset->link, &htbl->head_offset_list[bin]);
+	}
+
+	entry->offset_entry = offset;
+	list_add(&entry->link, &htbl->head_proc_ctx_entry_list);
+	htbl->proc_ctx_cnt++;
+	IPADBG_LOW("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
+			htbl->proc_ctx_cnt, offset->offset);
+
+	id = ipa3_id_alloc(entry);
+	if (id < 0) {
+		IPAERR("failed to alloc id\n");
+		WARN_ON(1);
+	}
+	entry->id = id;
+	proc_ctx->proc_ctx_hdl = id;
+	entry->ref_cnt++;
+
+	return 0;
+
+bad_len:
+	if (add_ref_hdr)
+		hdr_entry->ref_cnt--;
+	entry->cookie = 0;
+	kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
+	return -EPERM;
+}
+
+
+static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
+{
+	struct ipa3_hdr_entry *entry;
+	struct ipa_hdr_offset_entry *offset;
+	u32 bin;
+	struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
+	int id;
+	int mem_size;
+
+	if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
+		IPAERR("bad parm\n");
+		goto error;
+	}
+
+	if (!HDR_TYPE_IS_VALID(hdr->type)) {
+		IPAERR("invalid hdr type %d\n", hdr->type);
+		goto error;
+	}
+
+	entry = kmem_cache_zalloc(ipa3_ctx->hdr_cache, GFP_KERNEL);
+	if (!entry) {
+		IPAERR("failed to alloc hdr object\n");
+		goto error;
+	}
+
+	INIT_LIST_HEAD(&entry->link);
+
+	memcpy(entry->hdr, hdr->hdr, hdr->hdr_len);
+	entry->hdr_len = hdr->hdr_len;
+	strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX);
+	entry->is_partial = hdr->is_partial;
+	entry->type = hdr->type;
+	entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
+	entry->eth2_ofst = hdr->eth2_ofst;
+	entry->cookie = IPA_COOKIE;
+
+	if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
+		bin = IPA_HDR_BIN0;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
+		bin = IPA_HDR_BIN1;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2])
+		bin = IPA_HDR_BIN2;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3])
+		bin = IPA_HDR_BIN3;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
+		bin = IPA_HDR_BIN4;
+	else {
+		IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
+		goto bad_hdr_len;
+	}
+
+	mem_size = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) :
+		IPA_MEM_PART(apps_hdr_size_ddr);
+
+	/* if header does not fit to table, place it in DDR */
+	if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
+		entry->is_hdr_proc_ctx = true;
+		entry->phys_base = dma_map_single(ipa3_ctx->pdev,
+			entry->hdr,
+			entry->hdr_len,
+			DMA_TO_DEVICE);
+	} else {
+		entry->is_hdr_proc_ctx = false;
+		if (list_empty(&htbl->head_free_offset_list[bin])) {
+			offset = kmem_cache_zalloc(ipa3_ctx->hdr_offset_cache,
+						   GFP_KERNEL);
+			if (!offset) {
+				IPAERR("failed to alloc hdr offset object\n");
+				goto bad_hdr_len;
+			}
+			INIT_LIST_HEAD(&offset->link);
+			/*
+			 * for a first item grow, set the bin and offset which
+			 * are set in stone
+			 */
+			offset->offset = htbl->end;
+			offset->bin = bin;
+			htbl->end += ipa_hdr_bin_sz[bin];
+			list_add(&offset->link,
+					&htbl->head_offset_list[bin]);
+		} else {
+			/* get the first free slot */
+			offset =
+			list_first_entry(&htbl->head_free_offset_list[bin],
+					struct ipa_hdr_offset_entry, link);
+			list_move(&offset->link, &htbl->head_offset_list[bin]);
+		}
+
+		entry->offset_entry = offset;
+	}
+
+	list_add(&entry->link, &htbl->head_hdr_entry_list);
+	htbl->hdr_cnt++;
+	if (entry->is_hdr_proc_ctx)
+		IPADBG_LOW("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
+			hdr->hdr_len,
+			htbl->hdr_cnt,
+			&entry->phys_base);
+	else
+		IPADBG_LOW("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
+			hdr->hdr_len,
+			htbl->hdr_cnt,
+			entry->offset_entry->offset);
+
+	id = ipa3_id_alloc(entry);
+	if (id < 0) {
+		IPAERR("failed to alloc id\n");
+		WARN_ON(1);
+	}
+	entry->id = id;
+	hdr->hdr_hdl = id;
+	entry->ref_cnt++;
+
+	if (entry->is_hdr_proc_ctx) {
+		struct ipa_hdr_proc_ctx_add proc_ctx;
+
+		IPADBG("adding processing context for header %s\n", hdr->name);
+		proc_ctx.type = IPA_HDR_PROC_NONE;
+		proc_ctx.hdr_hdl = id;
+		if (__ipa_add_hdr_proc_ctx(&proc_ctx, false)) {
+			IPAERR("failed to add hdr proc ctx\n");
+			goto fail_add_proc_ctx;
+		}
+		entry->proc_ctx = ipa3_id_find(proc_ctx.proc_ctx_hdl);
+	}
+
+	return 0;
+
+fail_add_proc_ctx:
+	entry->ref_cnt--;
+	hdr->hdr_hdl = 0;
+	ipa3_id_remove(id);
+	htbl->hdr_cnt--;
+	list_del(&entry->link);
+	dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
+			entry->hdr_len, DMA_TO_DEVICE);
+bad_hdr_len:
+	entry->cookie = 0;
+	kmem_cache_free(ipa3_ctx->hdr_cache, entry);
+error:
+	return -EPERM;
+}
+
+static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl, bool release_hdr)
+{
+	struct ipa3_hdr_proc_ctx_entry *entry;
+	struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
+
+	entry = ipa3_id_find(proc_ctx_hdl);
+	if (!entry || (entry->cookie != IPA_COOKIE)) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	IPADBG("del ctx proc cnt=%d ofst=%d\n",
+		htbl->proc_ctx_cnt, entry->offset_entry->offset);
+
+	if (--entry->ref_cnt) {
+		IPADBG("proc_ctx_hdl %x ref_cnt %d\n",
+			proc_ctx_hdl, entry->ref_cnt);
+		return 0;
+	}
+
+	if (release_hdr)
+		__ipa3_del_hdr(entry->hdr->id);
+
+	/* move the offset entry to appropriate free list */
+	list_move(&entry->offset_entry->link,
+		&htbl->head_free_offset_list[entry->offset_entry->bin]);
+	list_del(&entry->link);
+	htbl->proc_ctx_cnt--;
+	entry->cookie = 0;
+	kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
+
+	/* remove the handle from the database */
+	ipa3_id_remove(proc_ctx_hdl);
+
+	return 0;
+}
+
+
+int __ipa3_del_hdr(u32 hdr_hdl)
+{
+	struct ipa3_hdr_entry *entry;
+	struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
+
+	entry = ipa3_id_find(hdr_hdl);
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		return -EINVAL;
+	}
+
+	if (!entry || (entry->cookie != IPA_COOKIE)) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	if (entry->is_hdr_proc_ctx)
+		IPADBG("del hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
+			entry->hdr_len, htbl->hdr_cnt, &entry->phys_base);
+	else
+		IPADBG("del hdr of sz=%d hdr_cnt=%d ofst=%d\n", entry->hdr_len,
+			htbl->hdr_cnt, entry->offset_entry->offset);
+
+	if (--entry->ref_cnt) {
+		IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
+		return 0;
+	}
+
+	if (entry->is_hdr_proc_ctx) {
+		dma_unmap_single(ipa3_ctx->pdev,
+			entry->phys_base,
+			entry->hdr_len,
+			DMA_TO_DEVICE);
+		__ipa3_del_hdr_proc_ctx(entry->proc_ctx->id, false);
+	} else {
+		/* move the offset entry to appropriate free list */
+		list_move(&entry->offset_entry->link,
+			&htbl->head_free_offset_list[entry->offset_entry->bin]);
+	}
+	list_del(&entry->link);
+	htbl->hdr_cnt--;
+	entry->cookie = 0;
+	kmem_cache_free(ipa3_ctx->hdr_cache, entry);
+
+	/* remove the handle from the database */
+	ipa3_id_remove(hdr_hdl);
+
+	return 0;
+}
+
+/**
+ * ipa3_add_hdr() - add the specified headers to SW and optionally commit them
+ * to IPA HW
+ * @hdrs:	[inout] set of headers to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs)
+{
+	int i;
+	int result = -EFAULT;
+
+	if (hdrs == NULL || hdrs->num_hdrs == 0) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	IPADBG("adding %d headers to IPA driver internal data struct\n",
+			hdrs->num_hdrs);
+	for (i = 0; i < hdrs->num_hdrs; i++) {
+		if (__ipa_add_hdr(&hdrs->hdr[i])) {
+			IPAERR("failed to add hdr %d\n", i);
+			hdrs->hdr[i].status = -1;
+		} else {
+			hdrs->hdr[i].status = 0;
+		}
+	}
+
+	if (hdrs->commit) {
+		IPADBG("committing all headers to IPA core");
+		if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+			result = -EPERM;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa3_del_hdr() - Remove the specified headers from SW and optionally commit
+ * them to IPA HW
+ * @hdls:	[inout] set of headers to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls)
+{
+	int i;
+	int result = -EFAULT;
+
+	if (hdls == NULL || hdls->num_hdls == 0) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa3_del_hdr(hdls->hdl[i].hdl)) {
+			IPAERR("failed to del hdr %i\n", i);
+			hdls->hdl[i].status = -1;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit) {
+		if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+			result = -EPERM;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa3_add_hdr_proc_ctx() - add the specified headers to SW
+ * and optionally commit them to IPA HW
+ * @proc_ctxs:	[inout] set of processing context headers to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
+{
+	int i;
+	int result = -EFAULT;
+
+	if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	IPADBG("adding %d header processing contextes to IPA driver\n",
+			proc_ctxs->num_proc_ctxs);
+	for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
+		if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
+			IPAERR("failed to add hdr pric ctx %d\n", i);
+			proc_ctxs->proc_ctx[i].status = -1;
+		} else {
+			proc_ctxs->proc_ctx[i].status = 0;
+		}
+	}
+
+	if (proc_ctxs->commit) {
+		IPADBG("committing all headers to IPA core");
+		if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+			result = -EPERM;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa3_del_hdr_proc_ctx() -
+ * Remove the specified processing context headers from SW and
+ * optionally commit them to IPA HW.
+ * @hdls:	[inout] set of processing context headers to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
+{
+	int i;
+	int result;
+
+	if (hdls == NULL || hdls->num_hdls == 0) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa3_del_hdr_proc_ctx(hdls->hdl[i].hdl, true)) {
+			IPAERR("failed to del hdr %i\n", i);
+			hdls->hdl[i].status = -1;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit) {
+		if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+			result = -EPERM;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa3_commit_hdr() - commit to IPA HW the current header table in SW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_commit_hdr(void)
+{
+	int result = -EFAULT;
+
+	/*
+	 * issue a commit on the routing module since routing rules point to
+	 * header table entries
+	 */
+	if (ipa3_commit_rt(IPA_IP_v4))
+		return -EPERM;
+	if (ipa3_commit_rt(IPA_IP_v6))
+		return -EPERM;
+
+	mutex_lock(&ipa3_ctx->lock);
+	if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+		result = -EPERM;
+		goto bail;
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa3_reset_hdr() - reset the current header table in SW (does not commit to
+ * HW)
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_reset_hdr(void)
+{
+	struct ipa3_hdr_entry *entry;
+	struct ipa3_hdr_entry *next;
+	struct ipa3_hdr_proc_ctx_entry *ctx_entry;
+	struct ipa3_hdr_proc_ctx_entry *ctx_next;
+	struct ipa_hdr_offset_entry *off_entry;
+	struct ipa_hdr_offset_entry *off_next;
+	struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_entry;
+	struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_next;
+	int i;
+
+	/*
+	 * issue a reset on the routing module since routing rules point to
+	 * header table entries
+	 */
+	if (ipa3_reset_rt(IPA_IP_v4))
+		IPAERR("fail to reset v4 rt\n");
+	if (ipa3_reset_rt(IPA_IP_v6))
+		IPAERR("fail to reset v4 rt\n");
+
+	mutex_lock(&ipa3_ctx->lock);
+	IPADBG("reset hdr\n");
+	list_for_each_entry_safe(entry, next,
+			&ipa3_ctx->hdr_tbl.head_hdr_entry_list, link) {
+
+		/* do not remove the default header */
+		if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
+			if (entry->is_hdr_proc_ctx) {
+				IPAERR("default header is proc ctx\n");
+				mutex_unlock(&ipa3_ctx->lock);
+				WARN_ON(1);
+				return -EFAULT;
+			}
+			continue;
+		}
+
+		if (ipa3_id_find(entry->id) == NULL) {
+			mutex_unlock(&ipa3_ctx->lock);
+			WARN_ON(1);
+			return -EFAULT;
+		}
+		if (entry->is_hdr_proc_ctx) {
+			dma_unmap_single(ipa3_ctx->pdev,
+				entry->phys_base,
+				entry->hdr_len,
+				DMA_TO_DEVICE);
+			entry->proc_ctx = NULL;
+		}
+		list_del(&entry->link);
+		entry->ref_cnt = 0;
+		entry->cookie = 0;
+
+		/* remove the handle from the database */
+		ipa3_id_remove(entry->id);
+		kmem_cache_free(ipa3_ctx->hdr_cache, entry);
+
+	}
+	for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+		list_for_each_entry_safe(off_entry, off_next,
+					 &ipa3_ctx->hdr_tbl.head_offset_list[i],
+					 link) {
+
+			/*
+			 * do not remove the default exception header which is
+			 * at offset 0
+			 */
+			if (off_entry->offset == 0)
+				continue;
+
+			list_del(&off_entry->link);
+			kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
+		}
+		list_for_each_entry_safe(off_entry, off_next,
+				&ipa3_ctx->hdr_tbl.head_free_offset_list[i],
+				link) {
+			list_del(&off_entry->link);
+			kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
+		}
+	}
+	/* there is one header of size 8 */
+	ipa3_ctx->hdr_tbl.end = 8;
+	ipa3_ctx->hdr_tbl.hdr_cnt = 1;
+
+	IPADBG("reset hdr proc ctx\n");
+	list_for_each_entry_safe(
+		ctx_entry,
+		ctx_next,
+		&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
+		link) {
+
+		if (ipa3_id_find(ctx_entry->id) == NULL) {
+			mutex_unlock(&ipa3_ctx->lock);
+			WARN_ON(1);
+			return -EFAULT;
+		}
+		list_del(&ctx_entry->link);
+		ctx_entry->ref_cnt = 0;
+		ctx_entry->cookie = 0;
+
+		/* remove the handle from the database */
+		ipa3_id_remove(ctx_entry->id);
+		kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, ctx_entry);
+
+	}
+	for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
+		list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+				&ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i],
+				link) {
+
+			list_del(&ctx_off_entry->link);
+			kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
+					ctx_off_entry);
+		}
+		list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+			&ipa3_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i],
+			link) {
+			list_del(&ctx_off_entry->link);
+			kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
+				ctx_off_entry);
+		}
+	}
+	ipa3_ctx->hdr_proc_ctx_tbl.end = 0;
+	ipa3_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0;
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return 0;
+}
+
+static struct ipa3_hdr_entry *__ipa_find_hdr(const char *name)
+{
+	struct ipa3_hdr_entry *entry;
+
+	if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
+		IPAERR("Header name too long: %s\n", name);
+		return NULL;
+	}
+
+	list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
+			link) {
+		if (!strcmp(name, entry->name))
+			return entry;
+	}
+
+	return NULL;
+}
+
+/**
+ * ipa3_get_hdr() - Lookup the specified header resource
+ * @lookup:	[inout] header to lookup and its handle
+ *
+ * lookup the specified header resource and return handle if it exists
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ *		Caller should call ipa3_put_hdr later if this function succeeds
+ */
+int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup)
+{
+	struct ipa3_hdr_entry *entry;
+	int result = -1;
+
+	if (lookup == NULL) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+	mutex_lock(&ipa3_ctx->lock);
+	entry = __ipa_find_hdr(lookup->name);
+	if (entry) {
+		lookup->hdl = entry->id;
+		result = 0;
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * __ipa3_release_hdr() - drop reference to header and cause
+ * deletion if reference count permits
+ * @hdr_hdl:	[in] handle of header to be released
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int __ipa3_release_hdr(u32 hdr_hdl)
+{
+	int result = 0;
+
+	if (__ipa3_del_hdr(hdr_hdl)) {
+		IPADBG("fail to del hdr %x\n", hdr_hdl);
+		result = -EFAULT;
+		goto bail;
+	}
+
+	/* commit for put */
+	if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+		IPAERR("fail to commit hdr\n");
+		result = -EFAULT;
+		goto bail;
+	}
+
+bail:
+	return result;
+}
+
+/**
+ * __ipa3_release_hdr_proc_ctx() - drop reference to processing context
+ *  and cause deletion if reference count permits
+ * @proc_ctx_hdl:	[in] handle of processing context to be released
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl)
+{
+	int result = 0;
+
+	if (__ipa3_del_hdr_proc_ctx(proc_ctx_hdl, true)) {
+		IPADBG("fail to del hdr %x\n", proc_ctx_hdl);
+		result = -EFAULT;
+		goto bail;
+	}
+
+	/* commit for put */
+	if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+		IPAERR("fail to commit hdr\n");
+		result = -EFAULT;
+		goto bail;
+	}
+
+bail:
+	return result;
+}
+
+/**
+ * ipa3_put_hdr() - Release the specified header handle
+ * @hdr_hdl:	[in] the header handle to release
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_put_hdr(u32 hdr_hdl)
+{
+	struct ipa3_hdr_entry *entry;
+	int result = -EFAULT;
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	entry = ipa3_id_find(hdr_hdl);
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (entry->cookie != IPA_COOKIE) {
+		IPAERR("invalid header entry\n");
+		result = -EINVAL;
+		goto bail;
+	}
+
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa3_copy_hdr() - Lookup the specified header resource and return a copy of
+ * it
+ * @copy:	[inout] header to lookup and its copy
+ *
+ * lookup the specified header resource and return a copy of it (along with its
+ * attributes) if it exists, this would be called for partial headers
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy)
+{
+	struct ipa3_hdr_entry *entry;
+	int result = -EFAULT;
+
+	if (copy == NULL) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+	mutex_lock(&ipa3_ctx->lock);
+	entry = __ipa_find_hdr(copy->name);
+	if (entry) {
+		memcpy(copy->hdr, entry->hdr, entry->hdr_len);
+		copy->hdr_len = entry->hdr_len;
+		copy->type = entry->type;
+		copy->is_partial = entry->is_partial;
+		copy->is_eth2_ofst_valid = entry->is_eth2_ofst_valid;
+		copy->eth2_ofst = entry->eth2_ofst;
+		result = 0;
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h
new file mode 100644
index 0000000..dff3a3f
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h
@@ -0,0 +1,44 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_HW_DEFS_H
+#define _IPA_HW_DEFS_H
+#include <linux/bitops.h>
+
+/* This header defines various HW related data types */
+
+
+#define IPA_A5_MUX_HDR_EXCP_FLAG_IP		BIT(7)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_NAT		BIT(6)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT	BIT(5)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_TAG		BIT(4)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED	BIT(3)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_IHL		BIT(2)
+
+/**
+ * struct ipa3_a5_mux_hdr - A5 MUX header definition
+ * @interface_id: interface ID
+ * @src_pipe_index: source pipe index
+ * @flags: flags
+ * @metadata: metadata
+ *
+ * A5 MUX header is in BE, A5 runs in LE. This struct definition
+ * allows A5 SW to correctly parse the header
+ */
+struct ipa3_a5_mux_hdr {
+	u16 interface_id;
+	u8 src_pipe_index;
+	u8 flags;
+	u32 metadata;
+};
+
+#endif /* _IPA_HW_DEFS_H */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
new file mode 100644
index 0000000..4cb4d5a
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -0,0 +1,2022 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA3_I_H_
+#define _IPA3_I_H_
+
+#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/export.h>
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/ipa.h>
+#include <linux/ipa_usb.h>
+#include <linux/msm-sps.h>
+#include <asm/dma-iommu.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include "ipa_hw_defs.h"
+#include "ipa_qmi_service.h"
+#include "../ipa_api.h"
+#include "ipahal/ipahal_reg.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+#include "../ipa_common_i.h"
+#include "ipa_uc_offload_i.h"
+
+#define DRV_NAME "ipa"
+#define NAT_DEV_NAME "ipaNatTable"
+#define IPA_COOKIE 0x57831603
+#define MTU_BYTE 1500
+
+#define IPA3_MAX_NUM_PIPES 31
+#define IPA_WAN_CONS_DESC_FIFO_SZ 0x5E80
+#define IPA_WAN_NAPI_CONS_RX_POOL_SZ 3000
+#define IPA_SYS_DESC_FIFO_SZ 0x800
+#define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000
+#define IPA_LAN_RX_HEADER_LENGTH (2)
+#define IPA_QMAP_HEADER_LENGTH (4)
+#define IPA_DL_CHECKSUM_LENGTH (8)
+#define IPA_NUM_DESC_PER_SW_TX (3)
+#define IPA_GENERIC_RX_POOL_SZ 192
+#define IPA_UC_FINISH_MAX 6
+#define IPA_UC_WAIT_MIN_SLEEP 1000
+#define IPA_UC_WAII_MAX_SLEEP 1200
+
+#define IPA_MAX_STATUS_STAT_NUM 30
+
+#define IPA_IPC_LOG_PAGES 50
+
+#define IPADBG(fmt, args...) \
+	do { \
+		pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\
+		if (ipa3_ctx) { \
+			IPA_IPC_LOGGING(ipa3_ctx->logbuf, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+			IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+		} \
+	} while (0)
+
+#define IPADBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\
+		if (ipa3_ctx) \
+			IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPAERR(fmt, args...) \
+	do { \
+		pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\
+		if (ipa3_ctx) { \
+			IPA_IPC_LOGGING(ipa3_ctx->logbuf, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+			IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+		} \
+	} while (0)
+
+#define WLAN_AMPDU_TX_EP 15
+#define WLAN_PROD_TX_EP  19
+#define WLAN1_CONS_RX_EP  14
+#define WLAN2_CONS_RX_EP  16
+#define WLAN3_CONS_RX_EP  17
+#define WLAN4_CONS_RX_EP  18
+
+#define IPA_RAM_NAT_OFST    0
+#define IPA_RAM_NAT_SIZE    0
+#define IPA_MEM_CANARY_VAL 0xdeadbeef
+
+#define IPA_STATS
+
+#ifdef IPA_STATS
+#define IPA_STATS_INC_CNT(val) (++val)
+#define IPA_STATS_DEC_CNT(val) (--val)
+#define IPA_STATS_EXCP_CNT(__excp, __base) do {				\
+	if (__excp < 0 || __excp >= IPAHAL_PKT_STATUS_EXCEPTION_MAX)	\
+		break;							\
+	++__base[__excp];						\
+	} while (0)
+#else
+#define IPA_STATS_INC_CNT(x) do { } while (0)
+#define IPA_STATS_DEC_CNT(x)
+#define IPA_STATS_EXCP_CNT(__excp, __base) do { } while (0)
+#endif
+
+#define IPA_HDR_BIN0 0
+#define IPA_HDR_BIN1 1
+#define IPA_HDR_BIN2 2
+#define IPA_HDR_BIN3 3
+#define IPA_HDR_BIN4 4
+#define IPA_HDR_BIN_MAX 5
+
+#define IPA_HDR_PROC_CTX_BIN0 0
+#define IPA_HDR_PROC_CTX_BIN1 1
+#define IPA_HDR_PROC_CTX_BIN_MAX 2
+
+#define IPA_EVENT_THRESHOLD 0x10
+
+/*
+ * Due to ZLT issue with USB 3.0 core, IPA BAM threashold need to be set
+ * to max packet size + 1. After setting the threshold, USB core
+ * will not be notified on ZLTs
+ */
+#define IPA_USB_EVENT_THRESHOLD 0x4001
+
+#define IPA_RX_POOL_CEIL 32
+#define IPA_RX_SKB_SIZE 1792
+
+#define IPA_A5_MUX_HDR_NAME "ipa_excp_hdr"
+#define IPA_LAN_RX_HDR_NAME "ipa_lan_hdr"
+#define IPA_INVALID_L4_PROTOCOL 0xFF
+
+#define IPA_CLIENT_IS_PROD(x) (x >= IPA_CLIENT_PROD && x < IPA_CLIENT_CONS)
+#define IPA_CLIENT_IS_CONS(x) (x >= IPA_CLIENT_CONS && x < IPA_CLIENT_MAX)
+
+#define IPA_PIPE_MEM_START_OFST_ALIGNMENT(start_ofst) \
+	(((start_ofst) + 127) & ~127)
+
+#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE 8
+#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(start_ofst) \
+	(((start_ofst) + IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1) & \
+	~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1))
+
+#define MAX_RESOURCE_TO_CLIENTS (IPA_CLIENT_MAX)
+#define IPA_MEM_PART(x_) (ipa3_ctx->ctrl->mem_partition.x_)
+
+#define IPA_GSI_CHANNEL_STOP_MAX_RETRY 10
+#define IPA_GSI_CHANNEL_STOP_PKT_SIZE 1
+
+#define IPA_GSI_CHANNEL_EMPTY_MAX_RETRY 15
+#define IPA_GSI_CHANNEL_EMPTY_SLEEP_MIN_USEC (1000)
+#define IPA_GSI_CHANNEL_EMPTY_SLEEP_MAX_USEC (2000)
+
+#define IPA_SLEEP_CLK_RATE_KHZ (32)
+
+#define IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES 120
+#define IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN 96
+#define IPA3_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50
+#define IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN 40
+
+struct ipa3_active_client_htable_entry {
+	struct hlist_node list;
+	char id_string[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
+	int count;
+	enum ipa_active_client_log_type type;
+};
+
+struct ipa3_active_clients_log_ctx {
+	char *log_buffer[IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES];
+	int log_head;
+	int log_tail;
+	bool log_rdy;
+	struct hlist_head htable[IPA3_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE];
+};
+
+struct ipa3_client_names {
+	enum ipa_client_type names[MAX_RESOURCE_TO_CLIENTS];
+	int length;
+};
+
+struct ipa_smmu_cb_ctx {
+	bool valid;
+	struct device *dev;
+	struct dma_iommu_mapping *mapping;
+	struct iommu_domain *iommu;
+	unsigned long next_addr;
+	u32 va_start;
+	u32 va_size;
+	u32 va_end;
+};
+
+/**
+ * struct ipa3_flt_entry - IPA filtering table entry
+ * @link: entry's link in global filtering enrties list
+ * @rule: filter rule
+ * @cookie: cookie used for validity check
+ * @tbl: filter table
+ * @rt_tbl: routing table
+ * @hw_len: entry's size
+ * @id: rule handle - globally unique
+ * @prio: rule 10bit priority which defines the order of the rule
+ *  among other rules at the same integrated table
+ * @rule_id: rule 10bit ID to be returned in packet status
+ */
+struct ipa3_flt_entry {
+	struct list_head link;
+	struct ipa_flt_rule rule;
+	u32 cookie;
+	struct ipa3_flt_tbl *tbl;
+	struct ipa3_rt_tbl *rt_tbl;
+	u32 hw_len;
+	int id;
+	u16 prio;
+	u16 rule_id;
+};
+
+/**
+ * struct ipa3_rt_tbl - IPA routing table
+ * @link: table's link in global routing tables list
+ * @head_rt_rule_list: head of routing rules list
+ * @name: routing table name
+ * @idx: routing table index
+ * @rule_cnt: number of rules in routing table
+ * @ref_cnt: reference counter of routing table
+ * @set: collection of routing tables
+ * @cookie: cookie used for validity check
+ * @in_sys: flag indicating if the table is located in system memory
+ * @sz: the size of the routing table
+ * @curr_mem: current routing tables block in sys memory
+ * @prev_mem: previous routing table block in sys memory
+ * @id: routing table id
+ * @rule_ids: idr structure that holds the rule_id for each rule
+ */
+struct ipa3_rt_tbl {
+	struct list_head link;
+	struct list_head head_rt_rule_list;
+	char name[IPA_RESOURCE_NAME_MAX];
+	u32 idx;
+	u32 rule_cnt;
+	u32 ref_cnt;
+	struct ipa3_rt_tbl_set *set;
+	u32 cookie;
+	bool in_sys[IPA_RULE_TYPE_MAX];
+	u32 sz[IPA_RULE_TYPE_MAX];
+	struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX];
+	struct ipa_mem_buffer prev_mem[IPA_RULE_TYPE_MAX];
+	int id;
+	struct idr rule_ids;
+};
+
+/**
+ * struct ipa3_hdr_entry - IPA header table entry
+ * @link: entry's link in global header table entries list
+ * @hdr: the header
+ * @hdr_len: header length
+ * @name: name of header table entry
+ * @type: l2 header type
+ * @is_partial: flag indicating if header table entry is partial
+ * @is_hdr_proc_ctx: false - hdr entry resides in hdr table,
+ * true - hdr entry resides in DDR and pointed to by proc ctx
+ * @phys_base: physical address of entry in DDR when is_hdr_proc_ctx is true,
+ * else 0
+ * @proc_ctx: processing context header
+ * @offset_entry: entry's offset
+ * @cookie: cookie used for validity check
+ * @ref_cnt: reference counter of routing table
+ * @id: header entry id
+ * @is_eth2_ofst_valid: is eth2_ofst field valid?
+ * @eth2_ofst: offset to start of Ethernet-II/802.3 header
+ */
+struct ipa3_hdr_entry {
+	struct list_head link;
+	u8 hdr[IPA_HDR_MAX_SIZE];
+	u32 hdr_len;
+	char name[IPA_RESOURCE_NAME_MAX];
+	enum ipa_hdr_l2_type type;
+	u8 is_partial;
+	bool is_hdr_proc_ctx;
+	dma_addr_t phys_base;
+	struct ipa3_hdr_proc_ctx_entry *proc_ctx;
+	struct ipa_hdr_offset_entry *offset_entry;
+	u32 cookie;
+	u32 ref_cnt;
+	int id;
+	u8 is_eth2_ofst_valid;
+	u16 eth2_ofst;
+};
+
+/**
+ * struct ipa3_hdr_tbl - IPA header table
+ * @head_hdr_entry_list: header entries list
+ * @head_offset_list: header offset list
+ * @head_free_offset_list: header free offset list
+ * @hdr_cnt: number of headers
+ * @end: the last header index
+ */
+struct ipa3_hdr_tbl {
+	struct list_head head_hdr_entry_list;
+	struct list_head head_offset_list[IPA_HDR_BIN_MAX];
+	struct list_head head_free_offset_list[IPA_HDR_BIN_MAX];
+	u32 hdr_cnt;
+	u32 end;
+};
+
+/**
+ * struct ipa3_hdr_offset_entry - IPA header offset entry
+ * @link: entry's link in global processing context header offset entries list
+ * @offset: the offset
+ * @bin: bin
+ */
+struct ipa3_hdr_proc_ctx_offset_entry {
+	struct list_head link;
+	u32 offset;
+	u32 bin;
+};
+
+/**
+ * struct ipa3_hdr_proc_ctx_entry - IPA processing context header table entry
+ * @link: entry's link in global header table entries list
+ * @type:
+ * @offset_entry: entry's offset
+ * @hdr: the header
+ * @cookie: cookie used for validity check
+ * @ref_cnt: reference counter of routing table
+ * @id: processing context header entry id
+ */
+struct ipa3_hdr_proc_ctx_entry {
+	struct list_head link;
+	enum ipa_hdr_proc_type type;
+	struct ipa3_hdr_proc_ctx_offset_entry *offset_entry;
+	struct ipa3_hdr_entry *hdr;
+	u32 cookie;
+	u32 ref_cnt;
+	int id;
+};
+
+/**
+ * struct ipa3_hdr_proc_ctx_tbl - IPA processing context header table
+ * @head_proc_ctx_entry_list: header entries list
+ * @head_offset_list: header offset list
+ * @head_free_offset_list: header free offset list
+ * @proc_ctx_cnt: number of processing context headers
+ * @end: the last processing context header index
+ * @start_offset: offset in words of processing context header table
+ */
+struct ipa3_hdr_proc_ctx_tbl {
+	struct list_head head_proc_ctx_entry_list;
+	struct list_head head_offset_list[IPA_HDR_PROC_CTX_BIN_MAX];
+	struct list_head head_free_offset_list[IPA_HDR_PROC_CTX_BIN_MAX];
+	u32 proc_ctx_cnt;
+	u32 end;
+	u32 start_offset;
+};
+
+/**
+ * struct ipa3_flt_tbl - IPA filter table
+ * @head_flt_rule_list: filter rules list
+ * @rule_cnt: number of filter rules
+ * @in_sys: flag indicating if filter table is located in system memory
+ * @sz: the size of the filter tables
+ * @end: the last header index
+ * @curr_mem: current filter tables block in sys memory
+ * @prev_mem: previous filter table block in sys memory
+ * @rule_ids: idr structure that holds the rule_id for each rule
+ */
+struct ipa3_flt_tbl {
+	struct list_head head_flt_rule_list;
+	u32 rule_cnt;
+	bool in_sys[IPA_RULE_TYPE_MAX];
+	u32 sz[IPA_RULE_TYPE_MAX];
+	struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX];
+	struct ipa_mem_buffer prev_mem[IPA_RULE_TYPE_MAX];
+	bool sticky_rear;
+	struct idr rule_ids;
+};
+
+/**
+ * struct ipa3_rt_entry - IPA routing table entry
+ * @link: entry's link in global routing table entries list
+ * @rule: routing rule
+ * @cookie: cookie used for validity check
+ * @tbl: routing table
+ * @hdr: header table
+ * @proc_ctx: processing context table
+ * @hw_len: the length of the table
+ * @id: rule handle - globaly unique
+ * @prio: rule 10bit priority which defines the order of the rule
+ *  among other rules at the integrated same table
+ * @rule_id: rule 10bit ID to be returned in packet status
+ */
+struct ipa3_rt_entry {
+	struct list_head link;
+	struct ipa_rt_rule rule;
+	u32 cookie;
+	struct ipa3_rt_tbl *tbl;
+	struct ipa3_hdr_entry *hdr;
+	struct ipa3_hdr_proc_ctx_entry *proc_ctx;
+	u32 hw_len;
+	int id;
+	u16 prio;
+	u16 rule_id;
+};
+
+/**
+ * struct ipa3_rt_tbl_set - collection of routing tables
+ * @head_rt_tbl_list: collection of routing tables
+ * @tbl_cnt: number of routing tables
+ */
+struct ipa3_rt_tbl_set {
+	struct list_head head_rt_tbl_list;
+	u32 tbl_cnt;
+};
+
+/**
+ * struct ipa3_wlan_stats - Wlan stats for each wlan endpoint
+ * @rx_pkts_rcvd: Packets sent by wlan driver
+ * @rx_pkts_status_rcvd: Status packets received from ipa hw
+ * @rx_hd_processed: Data Descriptors processed by IPA Driver
+ * @rx_hd_reply: Data Descriptors recycled by wlan driver
+ * @rx_hd_rcvd: Data Descriptors sent by wlan driver
+ * @rx_pkt_leak: Packet count that are not recycled
+ * @rx_dp_fail: Packets failed to transfer to IPA HW
+ * @tx_pkts_rcvd: SKB Buffers received from ipa hw
+ * @tx_pkts_sent: SKB Buffers sent to wlan driver
+ * @tx_pkts_dropped: Dropped packets count
+ */
+struct ipa3_wlan_stats {
+	u32 rx_pkts_rcvd;
+	u32 rx_pkts_status_rcvd;
+	u32 rx_hd_processed;
+	u32 rx_hd_reply;
+	u32 rx_hd_rcvd;
+	u32 rx_pkt_leak;
+	u32 rx_dp_fail;
+	u32 tx_pkts_rcvd;
+	u32 tx_pkts_sent;
+	u32 tx_pkts_dropped;
+};
+
+/**
+ * struct ipa3_wlan_comm_memb - Wlan comm members
+ * @wlan_spinlock: protects wlan comm buff list and its size
+ * @ipa_tx_mul_spinlock: protects tx dp mul transfer
+ * @wlan_comm_total_cnt: wlan common skb buffers allocated count
+ * @wlan_comm_free_cnt: wlan common skb buffer free count
+ * @total_tx_pkts_freed: Recycled Buffer count
+ * @wlan_comm_desc_list: wlan common skb buffer list
+ */
+struct ipa3_wlan_comm_memb {
+	spinlock_t wlan_spinlock;
+	spinlock_t ipa_tx_mul_spinlock;
+	u32 wlan_comm_total_cnt;
+	u32 wlan_comm_free_cnt;
+	u32 total_tx_pkts_freed;
+	struct list_head wlan_comm_desc_list;
+	atomic_t active_clnt_cnt;
+};
+
+struct ipa_gsi_ep_mem_info {
+	u16 evt_ring_len;
+	u64 evt_ring_base_addr;
+	void *evt_ring_base_vaddr;
+	u16 chan_ring_len;
+	u64 chan_ring_base_addr;
+	void *chan_ring_base_vaddr;
+};
+
+struct ipa3_status_stats {
+	struct ipahal_pkt_status status[IPA_MAX_STATUS_STAT_NUM];
+	int curr;
+};
+
+/**
+ * struct ipa3_ep_context - IPA end point context
+ * @valid: flag indicating id EP context is valid
+ * @client: EP client type
+ * @ep_hdl: EP's client SPS handle
+ * @gsi_chan_hdl: EP's GSI channel handle
+ * @gsi_evt_ring_hdl: EP's GSI channel event ring handle
+ * @gsi_mem_info: EP's GSI channel rings info
+ * @chan_scratch: EP's GSI channel scratch info
+ * @cfg: EP cionfiguration
+ * @dst_pipe_index: destination pipe index
+ * @rt_tbl_idx: routing table index
+ * @connect: SPS connect
+ * @priv: user provided information which will forwarded once the user is
+ *        notified for new data avail
+ * @client_notify: user provided CB for EP events notification, the event is
+ *                 data revived.
+ * @desc_fifo_in_pipe_mem: flag indicating if descriptors FIFO uses pipe memory
+ * @data_fifo_in_pipe_mem: flag indicating if data FIFO uses pipe memory
+ * @desc_fifo_pipe_mem_ofst: descriptors FIFO pipe memory offset
+ * @data_fifo_pipe_mem_ofst: data FIFO pipe memory offset
+ * @desc_fifo_client_allocated: if descriptors FIFO was allocated by a client
+ * @data_fifo_client_allocated: if data FIFO was allocated by a client
+ * @skip_ep_cfg: boolean field that determines if EP should be configured
+ *  by IPA driver
+ * @keep_ipa_awake: when true, IPA will not be clock gated
+ * @disconnect_in_progress: Indicates client disconnect in progress.
+ * @qmi_request_sent: Indicates whether QMI request to enable clear data path
+ *					request is sent or not.
+ * @napi_enabled: when true, IPA call client callback to start polling
+ */
+struct ipa3_ep_context {
+	int valid;
+	enum ipa_client_type client;
+	struct sps_pipe *ep_hdl;
+	unsigned long gsi_chan_hdl;
+	unsigned long gsi_evt_ring_hdl;
+	struct ipa_gsi_ep_mem_info gsi_mem_info;
+	union __packed gsi_channel_scratch chan_scratch;
+	bool bytes_xfered_valid;
+	u16 bytes_xfered;
+	dma_addr_t phys_base;
+	struct ipa_ep_cfg cfg;
+	struct ipa_ep_cfg_holb holb;
+	struct ipahal_reg_ep_cfg_status status;
+	u32 dst_pipe_index;
+	u32 rt_tbl_idx;
+	struct sps_connect connect;
+	void *priv;
+	void (*client_notify)(void *priv, enum ipa_dp_evt_type evt,
+		       unsigned long data);
+	bool desc_fifo_in_pipe_mem;
+	bool data_fifo_in_pipe_mem;
+	u32 desc_fifo_pipe_mem_ofst;
+	u32 data_fifo_pipe_mem_ofst;
+	bool desc_fifo_client_allocated;
+	bool data_fifo_client_allocated;
+	atomic_t avail_fifo_desc;
+	u32 dflt_flt4_rule_hdl;
+	u32 dflt_flt6_rule_hdl;
+	bool skip_ep_cfg;
+	bool keep_ipa_awake;
+	struct ipa3_wlan_stats wstats;
+	u32 uc_offload_state;
+	bool disconnect_in_progress;
+	u32 qmi_request_sent;
+	bool napi_enabled;
+	bool switch_to_intr;
+	int inactive_cycles;
+	u32 eot_in_poll_err;
+
+	/* sys MUST be the last element of this struct */
+	struct ipa3_sys_context *sys;
+};
+
+/**
+ * ipa_usb_xdci_chan_params - xDCI channel related properties
+ *
+ * @ipa_ep_cfg:          IPA EP configuration
+ * @client:              type of "client"
+ * @priv:                callback cookie
+ * @notify:              callback
+ *           priv - callback cookie evt - type of event data - data relevant
+ *           to event.  May not be valid. See event_type enum for valid
+ *           cases.
+ * @skip_ep_cfg:         boolean field that determines if EP should be
+ *                       configured by IPA driver
+ * @keep_ipa_awake:      when true, IPA will not be clock gated
+ * @evt_ring_params:     parameters for the channel's event ring
+ * @evt_scratch:         parameters for the channel's event ring scratch
+ * @chan_params:         parameters for the channel
+ * @chan_scratch:        parameters for the channel's scratch
+ *
+ */
+struct ipa_request_gsi_channel_params {
+	struct ipa_ep_cfg ipa_ep_cfg;
+	enum ipa_client_type client;
+	void *priv;
+	ipa_notify_cb notify;
+	bool skip_ep_cfg;
+	bool keep_ipa_awake;
+	struct gsi_evt_ring_props evt_ring_params;
+	union __packed gsi_evt_scratch evt_scratch;
+	struct gsi_chan_props chan_params;
+	union __packed gsi_channel_scratch chan_scratch;
+};
+
+enum ipa3_sys_pipe_policy {
+	IPA_POLICY_INTR_MODE,
+	IPA_POLICY_NOINTR_MODE,
+	IPA_POLICY_INTR_POLL_MODE,
+};
+
+struct ipa3_repl_ctx {
+	struct ipa3_rx_pkt_wrapper **cache;
+	atomic_t head_idx;
+	atomic_t tail_idx;
+	u32 capacity;
+};
+
+/**
+ * struct ipa3_sys_context - IPA endpoint context for system to BAM pipes
+ * @head_desc_list: header descriptors list
+ * @len: the size of the above list
+ * @spinlock: protects the list and its size
+ * @event: used to request CALLBACK mode from SPS driver
+ * @ep: IPA EP context
+ *
+ * IPA context specific to the system-bam pipes a.k.a LAN IN/OUT and WAN
+ */
+struct ipa3_sys_context {
+	u32 len;
+	struct sps_register_event event;
+	atomic_t curr_polling_state;
+	struct delayed_work switch_to_intr_work;
+	enum ipa3_sys_pipe_policy policy;
+	int (*pyld_hdlr)(struct sk_buff *skb, struct ipa3_sys_context *sys);
+	struct sk_buff * (*get_skb)(unsigned int len, gfp_t flags);
+	void (*free_skb)(struct sk_buff *skb);
+	void (*free_rx_wrapper)(struct ipa3_rx_pkt_wrapper *rk_pkt);
+	u32 rx_buff_sz;
+	u32 rx_pool_sz;
+	struct sk_buff *prev_skb;
+	unsigned int len_rem;
+	unsigned int len_pad;
+	unsigned int len_partial;
+	bool drop_packet;
+	struct work_struct work;
+	void (*sps_callback)(struct sps_event_notify *notify);
+	enum sps_option sps_option;
+	struct delayed_work replenish_rx_work;
+	struct work_struct repl_work;
+	void (*repl_hdlr)(struct ipa3_sys_context *sys);
+	struct ipa3_repl_ctx repl;
+
+	/* ordering is important - mutable fields go above */
+	struct ipa3_ep_context *ep;
+	struct list_head head_desc_list;
+	struct list_head rcycl_list;
+	spinlock_t spinlock;
+	struct workqueue_struct *wq;
+	struct workqueue_struct *repl_wq;
+	struct ipa3_status_stats *status_stat;
+	/* ordering is important - other immutable fields go below */
+};
+
+/**
+ * enum ipa3_desc_type - IPA decriptors type
+ *
+ * IPA decriptors type, IPA supports DD and ICD but no CD
+ */
+enum ipa3_desc_type {
+	IPA_DATA_DESC,
+	IPA_DATA_DESC_SKB,
+	IPA_DATA_DESC_SKB_PAGED,
+	IPA_IMM_CMD_DESC,
+};
+
+/**
+ * struct ipa3_tx_pkt_wrapper - IPA Tx packet wrapper
+ * @type: specify if this packet is for the skb or immediate command
+ * @mem: memory buffer used by this Tx packet
+ * @work: work struct for current Tx packet
+ * @link: linked to the wrappers on that pipe
+ * @callback: IPA client provided callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @sys: corresponding IPA sys context
+ * @mult: valid only for first of a "multiple" transfer,
+ * holds info for the "sps_transfer" buffer
+ * @cnt: 1 for single transfers,
+ * >1 and <0xFFFF for first of a "multiple" transfer,
+ * 0xFFFF for last desc, 0 for rest of "multiple' transfer
+ * @bounce: va of bounce buffer
+ * @unmap_dma: in case this is true, the buffer will not be dma unmapped
+ *
+ * This struct can wrap both data packet and immediate command packet.
+ */
+struct ipa3_tx_pkt_wrapper {
+	enum ipa3_desc_type type;
+	struct ipa_mem_buffer mem;
+	struct work_struct work;
+	struct list_head link;
+	void (*callback)(void *user1, int user2);
+	void *user1;
+	int user2;
+	struct ipa3_sys_context *sys;
+	struct ipa_mem_buffer mult;
+	u32 cnt;
+	void *bounce;
+	bool no_unmap_dma;
+};
+
+/**
+ * struct ipa3_dma_xfer_wrapper - IPADMA transfer descr wrapper
+ * @phys_addr_src: physical address of the source data to copy
+ * @phys_addr_dest: physical address to store the copied data
+ * @len: len in bytes to copy
+ * @link: linked to the wrappers list on the proper(sync/async) cons pipe
+ * @xfer_done: completion object for sync_memcpy completion
+ * @callback: IPADMA client provided completion callback
+ * @user1: cookie1 for above callback
+ *
+ * This struct can wrap both sync and async memcpy transfers descriptors.
+ */
+struct ipa3_dma_xfer_wrapper {
+	u64 phys_addr_src;
+	u64 phys_addr_dest;
+	u16 len;
+	struct list_head link;
+	struct completion xfer_done;
+	void (*callback)(void *user1);
+	void *user1;
+};
+
+/**
+ * struct ipa3_desc - IPA descriptor
+ * @type: skb or immediate command or plain old data
+ * @pyld: points to skb
+ * @frag: points to paged fragment
+ * or kmalloc'ed immediate command parameters/plain old data
+ * @dma_address: dma mapped address of pyld
+ * @dma_address_valid: valid field for dma_address
+ * @len: length of the pyld
+ * @opcode: for immediate commands
+ * @callback: IPA client provided completion callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @xfer_done: completion object for sync completion
+ */
+struct ipa3_desc {
+	enum ipa3_desc_type type;
+	void *pyld;
+	skb_frag_t *frag;
+	dma_addr_t dma_address;
+	bool dma_address_valid;
+	u16 len;
+	u16 opcode;
+	void (*callback)(void *user1, int user2);
+	void *user1;
+	int user2;
+	struct completion xfer_done;
+};
+
+/**
+ * struct ipa3_rx_pkt_wrapper - IPA Rx packet wrapper
+ * @skb: skb
+ * @dma_address: DMA address of this Rx packet
+ * @link: linked to the Rx packets on that pipe
+ * @len: how many bytes are copied into skb's flat buffer
+ */
+struct ipa3_rx_pkt_wrapper {
+	struct list_head link;
+	struct ipa_rx_data data;
+	u32 len;
+	struct work_struct work;
+	struct ipa3_sys_context *sys;
+};
+
+/**
+ * struct ipa3_nat_mem - IPA NAT memory description
+ * @class: pointer to the struct class
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ * @dev_num: device number
+ * @vaddr: virtual address
+ * @dma_handle: DMA handle
+ * @size: NAT memory size
+ * @is_mapped: flag indicating if NAT memory is mapped
+ * @is_sys_mem: flag indicating if NAT memory is sys memory
+ * @is_dev_init: flag indicating if NAT device is initialized
+ * @lock: NAT memory mutex
+ * @nat_base_address: nat table virutal address
+ * @ipv4_rules_addr: base nat table address
+ * @ipv4_expansion_rules_addr: expansion table address
+ * @index_table_addr: index table address
+ * @index_table_expansion_addr: index expansion table address
+ * @size_base_tables: base table size
+ * @size_expansion_tables: expansion table size
+ * @public_ip_addr: ip address of nat table
+ */
+struct ipa3_nat_mem {
+	struct class *class;
+	struct device *dev;
+	struct cdev cdev;
+	dev_t dev_num;
+	void *vaddr;
+	dma_addr_t dma_handle;
+	size_t size;
+	bool is_mapped;
+	bool is_sys_mem;
+	bool is_dev_init;
+	bool is_dev;
+	struct mutex lock;
+	void *nat_base_address;
+	char *ipv4_rules_addr;
+	char *ipv4_expansion_rules_addr;
+	char *index_table_addr;
+	char *index_table_expansion_addr;
+	u32 size_base_tables;
+	u32 size_expansion_tables;
+	u32 public_ip_addr;
+	void *tmp_vaddr;
+	dma_addr_t tmp_dma_handle;
+	bool is_tmp_mem;
+};
+
+/**
+ * enum ipa3_hw_mode - IPA hardware mode
+ * @IPA_HW_Normal: Regular IPA hardware
+ * @IPA_HW_Virtual: IPA hardware supporting virtual memory allocation
+ * @IPA_HW_PCIE: IPA hardware supporting memory allocation over PCIE Bridge
+ */
+enum ipa3_hw_mode {
+	IPA_HW_MODE_NORMAL  = 0,
+	IPA_HW_MODE_VIRTUAL = 1,
+	IPA_HW_MODE_PCIE    = 2
+};
+
+enum ipa3_config_this_ep {
+	IPA_CONFIGURE_THIS_EP,
+	IPA_DO_NOT_CONFIGURE_THIS_EP,
+};
+
+struct ipa3_stats {
+	u32 tx_sw_pkts;
+	u32 tx_hw_pkts;
+	u32 rx_pkts;
+	u32 rx_excp_pkts[IPAHAL_PKT_STATUS_EXCEPTION_MAX];
+	u32 rx_repl_repost;
+	u32 tx_pkts_compl;
+	u32 rx_q_len;
+	u32 msg_w[IPA_EVENT_MAX_NUM];
+	u32 msg_r[IPA_EVENT_MAX_NUM];
+	u32 stat_compl;
+	u32 aggr_close;
+	u32 wan_aggr_close;
+	u32 wan_rx_empty;
+	u32 wan_repl_rx_empty;
+	u32 lan_rx_empty;
+	u32 lan_repl_rx_empty;
+	u32 flow_enable;
+	u32 flow_disable;
+	u32 tx_non_linear;
+};
+
+struct ipa3_active_clients {
+	struct mutex mutex;
+	spinlock_t spinlock;
+	bool mutex_locked;
+	int cnt;
+};
+
+struct ipa3_wakelock_ref_cnt {
+	spinlock_t spinlock;
+	int cnt;
+};
+
+struct ipa3_tag_completion {
+	struct completion comp;
+	atomic_t cnt;
+};
+
+struct ipa3_controller;
+
+/**
+ * struct ipa3_uc_hdlrs - IPA uC callback functions
+ * @ipa_uc_loaded_hdlr: Function handler when uC is loaded
+ * @ipa_uc_event_hdlr: Event handler function
+ * @ipa3_uc_response_hdlr: Response handler function
+ * @ipa_uc_event_log_info_hdlr: Log event handler function
+ */
+struct ipa3_uc_hdlrs {
+	void (*ipa_uc_loaded_hdlr)(void);
+
+	void (*ipa_uc_event_hdlr)
+		(struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio);
+
+	int (*ipa3_uc_response_hdlr)
+		(struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio,
+		u32 *uc_status);
+
+	void (*ipa_uc_event_log_info_hdlr)
+		(struct IpaHwEventLogInfoData_t *uc_event_top_mmio);
+};
+
+/**
+ * enum ipa3_hw_flags - flags which defines the behavior of HW
+ *
+ * @IPA_HW_FLAG_HALT_SYSTEM_ON_ASSERT_FAILURE: Halt system in case of assert
+ *	failure.
+ * @IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR: Channel error would be reported
+ *	in the event ring only. No event to CPU.
+ * @IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP: No need to report event
+ *	IPA_HW_2_CPU_EVENT_MHI_WAKE_UP_REQUEST
+ * @IPA_HW_FLAG_WORK_OVER_DDR: Perform all transaction to external addresses by
+ *	QMB (avoid memcpy)
+ * @IPA_HW_FLAG_NO_REPORT_OOB: If set do not report that the device is OOB in
+ *	IN Channel
+ * @IPA_HW_FLAG_NO_REPORT_DB_MODE: If set, do not report that the device is
+ *	entering a mode where it expects a doorbell to be rung for OUT Channel
+ * @IPA_HW_FLAG_NO_START_OOB_TIMER
+ */
+enum ipa3_hw_flags {
+	IPA_HW_FLAG_HALT_SYSTEM_ON_ASSERT_FAILURE	= 0x01,
+	IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR		= 0x02,
+	IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP	= 0x04,
+	IPA_HW_FLAG_WORK_OVER_DDR			= 0x08,
+	IPA_HW_FLAG_NO_REPORT_OOB			= 0x10,
+	IPA_HW_FLAG_NO_REPORT_DB_MODE			= 0x20,
+	IPA_HW_FLAG_NO_START_OOB_TIMER			= 0x40
+};
+
+/**
+ * struct ipa3_uc_ctx - IPA uC context
+ * @uc_inited: Indicates if uC interface has been initialized
+ * @uc_loaded: Indicates if uC has loaded
+ * @uc_failed: Indicates if uC has failed / returned an error
+ * @uc_lock: uC interface lock to allow only one uC interaction at a time
+ * @uc_spinlock: same as uc_lock but for irq contexts
+ * @uc_completation: Completion mechanism to wait for uC commands
+ * @uc_sram_mmio: Pointer to uC mapped memory
+ * @pending_cmd: The last command sent waiting to be ACKed
+ * @uc_status: The last status provided by the uC
+ * @uc_error_type: error type from uC error event
+ * @uc_error_timestamp: tag timer sampled after uC crashed
+ */
+struct ipa3_uc_ctx {
+	bool uc_inited;
+	bool uc_loaded;
+	bool uc_failed;
+	struct mutex uc_lock;
+	spinlock_t uc_spinlock;
+	struct completion uc_completion;
+	struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio;
+	struct IpaHwEventLogInfoData_t *uc_event_top_mmio;
+	u32 uc_event_top_ofst;
+	u32 pending_cmd;
+	u32 uc_status;
+	u32 uc_error_type;
+	u32 uc_error_timestamp;
+	phys_addr_t rdy_ring_base_pa;
+	phys_addr_t rdy_ring_rp_pa;
+	u32 rdy_ring_size;
+	phys_addr_t rdy_comp_ring_base_pa;
+	phys_addr_t rdy_comp_ring_wp_pa;
+	u32 rdy_comp_ring_size;
+	u32 *rdy_ring_rp_va;
+	u32 *rdy_comp_ring_wp_va;
+};
+
+/**
+ * struct ipa3_uc_wdi_ctx
+ * @wdi_uc_top_ofst:
+ * @wdi_uc_top_mmio:
+ * @wdi_uc_stats_ofst:
+ * @wdi_uc_stats_mmio:
+ */
+struct ipa3_uc_wdi_ctx {
+	/* WDI specific fields */
+	u32 wdi_uc_stats_ofst;
+	struct IpaHwStatsWDIInfoData_t *wdi_uc_stats_mmio;
+	void *priv;
+	ipa_uc_ready_cb uc_ready_cb;
+};
+
+/**
+ * struct ipa3_transport_pm - transport power management related members
+ * @lock: lock for ensuring atomic operations
+ * @res_granted: true if SPS requested IPA resource and IPA granted it
+ * @res_rel_in_prog: true if releasing IPA resource is in progress
+ */
+struct ipa3_transport_pm {
+	spinlock_t lock;
+	bool res_granted;
+	bool res_rel_in_prog;
+	atomic_t dec_clients;
+	atomic_t eot_activity;
+};
+
+/**
+ * struct ipa3cm_client_info - the client-info indicated from IPACM
+ * @ipacm_client_enum: the enum to indicate tether-client
+ * @ipacm_client_uplink: the bool to indicate pipe for uplink
+ */
+struct ipa3cm_client_info {
+	enum ipacm_client_enum client_enum;
+	bool uplink;
+};
+
+struct ipa3_smp2p_info {
+	u32 out_base_id;
+	u32 in_base_id;
+	bool res_sent;
+};
+
+/**
+ * struct ipa3_ready_cb_info - A list of all the registrations
+ *  for an indication of IPA driver readiness
+ *
+ * @link: linked list link
+ * @ready_cb: callback
+ * @user_data: User data
+ *
+ */
+struct ipa3_ready_cb_info {
+	struct list_head link;
+	ipa_ready_cb ready_cb;
+	void *user_data;
+};
+
+/**
+ * struct ipa3_context - IPA context
+ * @class: pointer to the struct class
+ * @dev_num: device number
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ * @bam_handle: IPA driver's BAM handle
+ * @ep: list of all end points
+ * @skip_ep_cfg_shadow: state to update filter table correctly across
+  power-save
+ * @ep_flt_bitmap: End-points supporting filtering bitmap
+ * @ep_flt_num: End-points supporting filtering number
+ * @resume_on_connect: resume ep on ipa3_connect
+ * @flt_tbl: list of all IPA filter tables
+ * @mode: IPA operating mode
+ * @mmio: iomem
+ * @ipa_wrapper_base: IPA wrapper base address
+ * @hdr_tbl: IPA header table
+ * @hdr_proc_ctx_tbl: IPA processing context table
+ * @rt_tbl_set: list of routing tables each of which is a list of rules
+ * @reap_rt_tbl_set: list of sys mem routing tables waiting to be reaped
+ * @flt_rule_cache: filter rule cache
+ * @rt_rule_cache: routing rule cache
+ * @hdr_cache: header cache
+ * @hdr_offset_cache: header offset cache
+ * @hdr_proc_ctx_cache: processing context cache
+ * @hdr_proc_ctx_offset_cache: processing context offset cache
+ * @rt_tbl_cache: routing table cache
+ * @tx_pkt_wrapper_cache: Tx packets cache
+ * @rx_pkt_wrapper_cache: Rx packets cache
+ * @rt_idx_bitmap: routing table index bitmap
+ * @lock: this does NOT protect the linked lists within ipa3_sys_context
+ * @smem_sz: shared memory size available for SW use starting
+ *  from non-restricted bytes
+ * @smem_restricted_bytes: the bytes that SW should not use in the shared mem
+ * @nat_mem: NAT memory
+ * @excp_hdr_hdl: exception header handle
+ * @dflt_v4_rt_rule_hdl: default v4 routing rule handle
+ * @dflt_v6_rt_rule_hdl: default v6 routing rule handle
+ * @aggregation_type: aggregation type used on USB client endpoint
+ * @aggregation_byte_limit: aggregation byte limit used on USB client endpoint
+ * @aggregation_time_limit: aggregation time limit used on USB client endpoint
+ * @hdr_tbl_lcl: where hdr tbl resides 1-local, 0-system
+ * @hdr_proc_ctx_tbl_lcl: where proc_ctx tbl resides true-local, false-system
+ * @hdr_mem: header memory
+ * @hdr_proc_ctx_mem: processing context memory
+ * @ip4_rt_tbl_lcl: where ip4 rt tables reside 1-local; 0-system
+ * @ip6_rt_tbl_lcl: where ip6 rt tables reside 1-local; 0-system
+ * @ip4_flt_tbl_lcl: where ip4 flt tables reside 1-local; 0-system
+ * @ip6_flt_tbl_lcl: where ip6 flt tables reside 1-local; 0-system
+ * @power_mgmt_wq: workqueue for power management
+ * @transport_power_mgmt_wq: workqueue transport related power management
+ * @tag_process_before_gating: indicates whether to start tag process before
+ *  gating IPA clocks
+ * @transport_pm: transport power management related information
+ * @disconnect_lock: protects LAN_CONS packet receive notification CB
+ * @pipe_mem_pool: pipe memory pool
+ * @dma_pool: special purpose DMA pool
+ * @ipa3_active_clients: structure for reference counting connected IPA clients
+ * @ipa_hw_type: type of IPA HW type (e.g. IPA 1.0, IPA 1.1 etc')
+ * @ipa3_hw_mode: mode of IPA HW mode (e.g. Normal, Virtual or over PCIe)
+ * @use_ipa_teth_bridge: use tethering bridge driver
+ * @ipa_bam_remote_mode: ipa bam is in remote mode
+ * @modem_cfg_emb_pipe_flt: modem configure embedded pipe filtering rules
+ * @logbuf: ipc log buffer for high priority messages
+ * @logbuf_low: ipc log buffer for low priority messages
+ * @ipa_wdi2: using wdi-2.0
+ * @use_64_bit_dma_mask: using 64bits dma mask
+ * @ipa_bus_hdl: msm driver handle for the data path bus
+ * @ctrl: holds the core specific operations based on
+ *  core version (vtable like)
+ * @enable_clock_scaling: clock scaling is enabled ?
+ * @curr_ipa_clk_rate: ipa3_clk current rate
+ * @wcstats: wlan common buffer stats
+ * @uc_ctx: uC interface context
+ * @uc_wdi_ctx: WDI specific fields for uC interface
+ * @ipa_num_pipes: The number of pipes used by IPA HW
+ * @skip_uc_pipe_reset: Indicates whether pipe reset via uC needs to be avoided
+ * @ipa_client_apps_wan_cons_agg_gro: RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA
+ * @apply_rg10_wa: Indicates whether to use register group 10 workaround
+ * @gsi_ch20_wa: Indicates whether to apply GSI physical channel 20 workaround
+ * @w_lock: Indicates the wakeup source.
+ * @wakelock_ref_cnt: Indicates the number of times wakelock is acquired
+ * @ipa_initialization_complete: Indicates that IPA is fully initialized
+ * @ipa_ready_cb_list: A list of all the clients who require a CB when IPA
+ *  driver is ready/initialized.
+ * @init_completion_obj: Completion object to be used in case IPA driver hasn't
+ *  finished initializing. Example of use - IOCTLs to /dev/ipa
+ * IPA context - holds all relevant info about IPA driver and its state
+ */
+struct ipa3_context {
+	struct class *class;
+	dev_t dev_num;
+	struct device *dev;
+	struct cdev cdev;
+	unsigned long bam_handle;
+	struct ipa3_ep_context ep[IPA3_MAX_NUM_PIPES];
+	bool skip_ep_cfg_shadow[IPA3_MAX_NUM_PIPES];
+	u32 ep_flt_bitmap;
+	u32 ep_flt_num;
+	bool resume_on_connect[IPA_CLIENT_MAX];
+	struct ipa3_flt_tbl flt_tbl[IPA3_MAX_NUM_PIPES][IPA_IP_MAX];
+	void __iomem *mmio;
+	u32 ipa_wrapper_base;
+	u32 ipa_wrapper_size;
+	struct ipa3_hdr_tbl hdr_tbl;
+	struct ipa3_hdr_proc_ctx_tbl hdr_proc_ctx_tbl;
+	struct ipa3_rt_tbl_set rt_tbl_set[IPA_IP_MAX];
+	struct ipa3_rt_tbl_set reap_rt_tbl_set[IPA_IP_MAX];
+	struct kmem_cache *flt_rule_cache;
+	struct kmem_cache *rt_rule_cache;
+	struct kmem_cache *hdr_cache;
+	struct kmem_cache *hdr_offset_cache;
+	struct kmem_cache *hdr_proc_ctx_cache;
+	struct kmem_cache *hdr_proc_ctx_offset_cache;
+	struct kmem_cache *rt_tbl_cache;
+	struct kmem_cache *tx_pkt_wrapper_cache;
+	struct kmem_cache *rx_pkt_wrapper_cache;
+	unsigned long rt_idx_bitmap[IPA_IP_MAX];
+	struct mutex lock;
+	u16 smem_sz;
+	u16 smem_restricted_bytes;
+	u16 smem_reqd_sz;
+	struct ipa3_nat_mem nat_mem;
+	u32 excp_hdr_hdl;
+	u32 dflt_v4_rt_rule_hdl;
+	u32 dflt_v6_rt_rule_hdl;
+	uint aggregation_type;
+	uint aggregation_byte_limit;
+	uint aggregation_time_limit;
+	bool hdr_tbl_lcl;
+	bool hdr_proc_ctx_tbl_lcl;
+	struct ipa_mem_buffer hdr_mem;
+	struct ipa_mem_buffer hdr_proc_ctx_mem;
+	bool ip4_rt_tbl_hash_lcl;
+	bool ip4_rt_tbl_nhash_lcl;
+	bool ip6_rt_tbl_hash_lcl;
+	bool ip6_rt_tbl_nhash_lcl;
+	bool ip4_flt_tbl_hash_lcl;
+	bool ip4_flt_tbl_nhash_lcl;
+	bool ip6_flt_tbl_hash_lcl;
+	bool ip6_flt_tbl_nhash_lcl;
+	struct gen_pool *pipe_mem_pool;
+	struct dma_pool *dma_pool;
+	struct ipa3_active_clients ipa3_active_clients;
+	struct ipa3_active_clients_log_ctx ipa3_active_clients_logging;
+	struct workqueue_struct *power_mgmt_wq;
+	struct workqueue_struct *transport_power_mgmt_wq;
+	bool tag_process_before_gating;
+	struct ipa3_transport_pm transport_pm;
+	u32 clnt_hdl_cmd;
+	u32 clnt_hdl_data_in;
+	u32 clnt_hdl_data_out;
+	spinlock_t disconnect_lock;
+	u8 a5_pipe_index;
+	struct list_head intf_list;
+	struct list_head msg_list;
+	struct list_head pull_msg_list;
+	struct mutex msg_lock;
+	wait_queue_head_t msg_waitq;
+	enum ipa_hw_type ipa_hw_type;
+	enum ipa3_hw_mode ipa3_hw_mode;
+	bool use_ipa_teth_bridge;
+	bool ipa_bam_remote_mode;
+	bool modem_cfg_emb_pipe_flt;
+	bool ipa_wdi2;
+	bool use_64_bit_dma_mask;
+	/* featurize if memory footprint becomes a concern */
+	struct ipa3_stats stats;
+	void *smem_pipe_mem;
+	void *logbuf;
+	void *logbuf_low;
+	u32 ipa_bus_hdl;
+	struct ipa3_controller *ctrl;
+	struct idr ipa_idr;
+	struct device *pdev;
+	struct device *uc_pdev;
+	spinlock_t idr_lock;
+	u32 enable_clock_scaling;
+	u32 curr_ipa_clk_rate;
+	bool q6_proxy_clk_vote_valid;
+	u32 ipa_num_pipes;
+
+	struct ipa3_wlan_comm_memb wc_memb;
+
+	struct ipa3_uc_ctx uc_ctx;
+
+	struct ipa3_uc_wdi_ctx uc_wdi_ctx;
+	struct ipa3_uc_ntn_ctx uc_ntn_ctx;
+	u32 wan_rx_ring_size;
+	u32 lan_rx_ring_size;
+	bool skip_uc_pipe_reset;
+	enum ipa_transport_type transport_prototype;
+	unsigned long gsi_dev_hdl;
+	u32 ee;
+	bool apply_rg10_wa;
+	bool gsi_ch20_wa;
+	bool smmu_present;
+	bool smmu_s1_bypass;
+	unsigned long peer_bam_iova;
+	phys_addr_t peer_bam_pa;
+	u32 peer_bam_map_size;
+	unsigned long peer_bam_dev;
+	u32 peer_bam_map_cnt;
+	u32 wdi_map_cnt;
+	struct wakeup_source w_lock;
+	struct ipa3_wakelock_ref_cnt wakelock_ref_cnt;
+	/* RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA */
+	bool ipa_client_apps_wan_cons_agg_gro;
+	/* M-release support to know client pipes */
+	struct ipa3cm_client_info ipacm_client[IPA3_MAX_NUM_PIPES];
+	bool tethered_flow_control;
+	bool ipa_initialization_complete;
+	struct list_head ipa_ready_cb_list;
+	struct completion init_completion_obj;
+	struct ipa3_smp2p_info smp2p_info;
+};
+
+/**
+ * enum ipa3_pipe_mem_type - IPA pipe memory type
+ * @IPA_SPS_PIPE_MEM: Default, SPS dedicated pipe memory
+ * @IPA_PRIVATE_MEM: IPA's private memory
+ * @IPA_SYSTEM_MEM: System RAM, requires allocation
+ */
+enum ipa3_pipe_mem_type {
+	IPA_SPS_PIPE_MEM = 0,
+	IPA_PRIVATE_MEM  = 1,
+	IPA_SYSTEM_MEM   = 2,
+};
+
+struct ipa3_plat_drv_res {
+	bool use_ipa_teth_bridge;
+	u32 ipa_mem_base;
+	u32 ipa_mem_size;
+	u32 transport_mem_base;
+	u32 transport_mem_size;
+	u32 ipa_irq;
+	u32 transport_irq;
+	u32 ipa_pipe_mem_start_ofst;
+	u32 ipa_pipe_mem_size;
+	enum ipa_hw_type ipa_hw_type;
+	enum ipa3_hw_mode ipa3_hw_mode;
+	u32 ee;
+	bool ipa_bam_remote_mode;
+	bool modem_cfg_emb_pipe_flt;
+	bool ipa_wdi2;
+	bool use_64_bit_dma_mask;
+	u32 wan_rx_ring_size;
+	u32 lan_rx_ring_size;
+	bool skip_uc_pipe_reset;
+	enum ipa_transport_type transport_prototype;
+	bool apply_rg10_wa;
+	bool gsi_ch20_wa;
+	bool tethered_flow_control;
+};
+
+/**
+ * struct ipa3_mem_partition - represents IPA RAM Map as read from DTS
+ * Order and type of members should not be changed without a suitable change
+ * to DTS file or the code that reads it.
+ *
+ * IPA v3.0 SRAM memory layout:
+ * +-------------------------+
+ * |    UC INFO              |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V4 FLT HDR HASHABLE     |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V4 FLT HDR NON-HASHABLE |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V6 FLT HDR HASHABLE     |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V6 FLT HDR NON-HASHABLE |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V4 RT HDR HASHABLE      |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V4 RT HDR NON-HASHABLE  |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V6 RT HDR HASHABLE      |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V6 RT HDR NON-HASHABLE  |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |  MODEM HDR              |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | MODEM PROC CTX          |
+ * +-------------------------+
+ * | APPS PROC CTX           |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |  MODEM MEM              |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ */
+struct ipa3_mem_partition {
+	u32 ofst_start;
+	u32 nat_ofst;
+	u32 nat_size;
+	u32 v4_flt_hash_ofst;
+	u32 v4_flt_hash_size;
+	u32 v4_flt_hash_size_ddr;
+	u32 v4_flt_nhash_ofst;
+	u32 v4_flt_nhash_size;
+	u32 v4_flt_nhash_size_ddr;
+	u32 v6_flt_hash_ofst;
+	u32 v6_flt_hash_size;
+	u32 v6_flt_hash_size_ddr;
+	u32 v6_flt_nhash_ofst;
+	u32 v6_flt_nhash_size;
+	u32 v6_flt_nhash_size_ddr;
+	u32 v4_rt_num_index;
+	u32 v4_modem_rt_index_lo;
+	u32 v4_modem_rt_index_hi;
+	u32 v4_apps_rt_index_lo;
+	u32 v4_apps_rt_index_hi;
+	u32 v4_rt_hash_ofst;
+	u32 v4_rt_hash_size;
+	u32 v4_rt_hash_size_ddr;
+	u32 v4_rt_nhash_ofst;
+	u32 v4_rt_nhash_size;
+	u32 v4_rt_nhash_size_ddr;
+	u32 v6_rt_num_index;
+	u32 v6_modem_rt_index_lo;
+	u32 v6_modem_rt_index_hi;
+	u32 v6_apps_rt_index_lo;
+	u32 v6_apps_rt_index_hi;
+	u32 v6_rt_hash_ofst;
+	u32 v6_rt_hash_size;
+	u32 v6_rt_hash_size_ddr;
+	u32 v6_rt_nhash_ofst;
+	u32 v6_rt_nhash_size;
+	u32 v6_rt_nhash_size_ddr;
+	u32 modem_hdr_ofst;
+	u32 modem_hdr_size;
+	u32 apps_hdr_ofst;
+	u32 apps_hdr_size;
+	u32 apps_hdr_size_ddr;
+	u32 modem_hdr_proc_ctx_ofst;
+	u32 modem_hdr_proc_ctx_size;
+	u32 apps_hdr_proc_ctx_ofst;
+	u32 apps_hdr_proc_ctx_size;
+	u32 apps_hdr_proc_ctx_size_ddr;
+	u32 modem_comp_decomp_ofst;
+	u32 modem_comp_decomp_size;
+	u32 modem_ofst;
+	u32 modem_size;
+	u32 apps_v4_flt_hash_ofst;
+	u32 apps_v4_flt_hash_size;
+	u32 apps_v4_flt_nhash_ofst;
+	u32 apps_v4_flt_nhash_size;
+	u32 apps_v6_flt_hash_ofst;
+	u32 apps_v6_flt_hash_size;
+	u32 apps_v6_flt_nhash_ofst;
+	u32 apps_v6_flt_nhash_size;
+	u32 uc_info_ofst;
+	u32 uc_info_size;
+	u32 end_ofst;
+	u32 apps_v4_rt_hash_ofst;
+	u32 apps_v4_rt_hash_size;
+	u32 apps_v4_rt_nhash_ofst;
+	u32 apps_v4_rt_nhash_size;
+	u32 apps_v6_rt_hash_ofst;
+	u32 apps_v6_rt_hash_size;
+	u32 apps_v6_rt_nhash_ofst;
+	u32 apps_v6_rt_nhash_size;
+};
+
+struct ipa3_controller {
+	struct ipa3_mem_partition mem_partition;
+	u32 ipa_clk_rate_turbo;
+	u32 ipa_clk_rate_nominal;
+	u32 ipa_clk_rate_svs;
+	u32 clock_scaling_bw_threshold_turbo;
+	u32 clock_scaling_bw_threshold_nominal;
+	u32 ipa_reg_base_ofst;
+	u32 max_holb_tmr_val;
+	void (*ipa_sram_read_settings)(void);
+	int (*ipa_init_sram)(void);
+	int (*ipa_init_hdr)(void);
+	int (*ipa_init_rt4)(void);
+	int (*ipa_init_rt6)(void);
+	int (*ipa_init_flt4)(void);
+	int (*ipa_init_flt6)(void);
+	int (*ipa3_read_ep_reg)(char *buff, int max_len, int pipe);
+	int (*ipa3_commit_flt)(enum ipa_ip_type ip);
+	int (*ipa3_commit_rt)(enum ipa_ip_type ip);
+	int (*ipa3_commit_hdr)(void);
+	void (*ipa3_enable_clks)(void);
+	void (*ipa3_disable_clks)(void);
+	struct msm_bus_scale_pdata *msm_bus_data_ptr;
+};
+
+extern struct ipa3_context *ipa3_ctx;
+
+/* public APIs */
+/*
+ * Connect / Disconnect
+ */
+int ipa3_connect(const struct ipa_connect_params *in,
+		struct ipa_sps_params *sps,
+		u32 *clnt_hdl);
+int ipa3_disconnect(u32 clnt_hdl);
+
+/* Generic GSI channels functions */
+int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
+			     struct ipa_req_chan_out_params *out_params);
+
+int ipa3_release_gsi_channel(u32 clnt_hdl);
+
+int ipa3_start_gsi_channel(u32 clnt_hdl);
+
+int ipa3_stop_gsi_channel(u32 clnt_hdl);
+
+int ipa3_reset_gsi_channel(u32 clnt_hdl);
+
+int ipa3_reset_gsi_event_ring(u32 clnt_hdl);
+
+/* Specific xDCI channels functions */
+int ipa3_set_usb_max_packet_size(
+	enum ipa_usb_max_usb_packet_size usb_max_packet_size);
+
+int ipa3_xdci_connect(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid);
+
+int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id);
+
+int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+	bool should_force_clear, u32 qmi_req_id, bool is_dpl);
+
+int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl);
+
+/*
+ * Resume / Suspend
+ */
+int ipa3_reset_endpoint(u32 clnt_hdl);
+
+/*
+ * Remove ep delay
+ */
+int ipa3_clear_endpoint_delay(u32 clnt_hdl);
+
+/*
+ * Configuration
+ */
+int ipa3_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
+
+int ipa3_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg);
+
+int ipa3_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg);
+
+int ipa3_cfg_ep_hdr_ext(u32 clnt_hdl,
+			const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg);
+
+int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg);
+
+int ipa3_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg);
+
+int ipa3_cfg_ep_deaggr(u32 clnt_hdl,
+		      const struct ipa_ep_cfg_deaggr *ipa_ep_cfg);
+
+int ipa3_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg);
+
+int ipa3_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+int ipa3_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *ipa_ep_cfg);
+
+int ipa3_cfg_ep_metadata_mask(u32 clnt_hdl,
+		const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg);
+
+int ipa3_cfg_ep_holb_by_client(enum ipa_client_type client,
+				const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl);
+
+/*
+ * Header removal / addition
+ */
+int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs);
+
+int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls);
+
+int ipa3_commit_hdr(void);
+
+int ipa3_reset_hdr(void);
+
+int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup);
+
+int ipa3_put_hdr(u32 hdr_hdl);
+
+int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy);
+
+/*
+ * Header Processing Context
+ */
+int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs);
+
+int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls);
+
+/*
+ * Routing
+ */
+int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
+
+int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules);
+
+int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
+
+int ipa3_commit_rt(enum ipa_ip_type ip);
+
+int ipa3_reset_rt(enum ipa_ip_type ip);
+
+int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup);
+
+int ipa3_put_rt_tbl(u32 rt_tbl_hdl);
+
+int ipa3_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in);
+
+int ipa3_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules);
+
+/*
+ * Filtering
+ */
+int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules);
+
+int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules);
+
+int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls);
+
+int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules);
+
+int ipa3_commit_flt(enum ipa_ip_type ip);
+
+int ipa3_reset_flt(enum ipa_ip_type ip);
+
+/*
+ * NAT
+ */
+int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem);
+
+int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init);
+
+int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma);
+
+int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del);
+
+/*
+ * Messaging
+ */
+int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff,
+		  ipa_msg_free_fn callback);
+int ipa3_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback);
+int ipa3_deregister_pull_msg(struct ipa_msg_meta *meta);
+
+/*
+ * Interface
+ */
+int ipa3_register_intf(const char *name, const struct ipa_tx_intf *tx,
+		       const struct ipa_rx_intf *rx);
+int ipa3_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
+		       const struct ipa_rx_intf *rx,
+		       const struct ipa_ext_intf *ext);
+int ipa3_deregister_intf(const char *name);
+
+/*
+ * Aggregation
+ */
+int ipa3_set_aggr_mode(enum ipa_aggr_mode mode);
+
+int ipa3_set_qcncm_ndp_sig(char sig[3]);
+
+int ipa3_set_single_ndp_per_mbim(bool enable);
+
+/*
+ * Data path
+ */
+int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+		struct ipa_tx_meta *metadata);
+
+/*
+ * To transfer multiple data packets
+ * While passing the data descriptor list, the anchor node
+ * should be of type struct ipa_tx_data_desc not list_head
+*/
+int ipa3_tx_dp_mul(enum ipa_client_type dst,
+			struct ipa_tx_data_desc *data_desc);
+
+void ipa3_free_skb(struct ipa_rx_data *);
+
+/*
+ * System pipes
+ */
+int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl);
+
+int ipa3_teardown_sys_pipe(u32 clnt_hdl);
+
+int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in,
+	unsigned long *ipa_bam_hdl,
+	u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status);
+
+int ipa3_sys_teardown(u32 clnt_hdl);
+
+int ipa3_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
+	unsigned long gsi_ev_hdl);
+
+int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+		struct ipa_wdi_out_params *out);
+int ipa3_disconnect_wdi_pipe(u32 clnt_hdl);
+int ipa3_enable_wdi_pipe(u32 clnt_hdl);
+int ipa3_disable_wdi_pipe(u32 clnt_hdl);
+int ipa3_resume_wdi_pipe(u32 clnt_hdl);
+int ipa3_suspend_wdi_pipe(u32 clnt_hdl);
+int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
+u16 ipa3_get_smem_restr_bytes(void);
+int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+		ipa_notify_cb notify, void *priv, u8 hdr_len,
+		struct ipa_ntn_conn_out_params *outp);
+int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+
+/*
+ * To retrieve doorbell physical address of
+ * wlan pipes
+ */
+int ipa3_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out);
+
+/*
+ * To register uC ready callback if uC not ready
+ * and also check uC readiness
+ * if uC not ready only, register callback
+ */
+int ipa3_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param);
+/*
+ * To de-register uC ready callback
+ */
+int ipa3_uc_dereg_rdyCB(void);
+
+/*
+ * Tethering bridge (Rmnet / MBIM)
+ */
+int ipa3_teth_bridge_init(struct teth_bridge_init_params *params);
+
+int ipa3_teth_bridge_disconnect(enum ipa_client_type client);
+
+int ipa3_teth_bridge_connect(struct teth_bridge_connect_params *connect_params);
+
+/*
+ * Tethering client info
+ */
+void ipa3_set_client(int index, enum ipacm_client_enum client, bool uplink);
+
+enum ipacm_client_enum ipa3_get_client(int pipe_idx);
+
+bool ipa3_get_client_uplink(int pipe_idx);
+
+/*
+ * IPADMA
+ */
+int ipa3_dma_init(void);
+
+int ipa3_dma_enable(void);
+
+int ipa3_dma_disable(void);
+
+int ipa3_dma_sync_memcpy(u64 dest, u64 src, int len);
+
+int ipa3_dma_async_memcpy(u64 dest, u64 src, int len,
+			void (*user_cb)(void *user1), void *user_param);
+
+int ipa3_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
+
+void ipa3_dma_destroy(void);
+
+/*
+ * MHI
+ */
+
+int ipa3_mhi_init_engine(struct ipa_mhi_init_engine *params);
+
+int ipa3_connect_mhi_pipe(
+		struct ipa_mhi_connect_params_internal *in,
+		u32 *clnt_hdl);
+
+int ipa3_disconnect_mhi_pipe(u32 clnt_hdl);
+
+bool ipa3_mhi_stop_gsi_channel(enum ipa_client_type client);
+
+int ipa3_mhi_reset_channel_internal(enum ipa_client_type client);
+
+int ipa3_mhi_start_channel_internal(enum ipa_client_type client);
+
+bool ipa3_has_open_aggr_frame(enum ipa_client_type client);
+
+int ipa3_mhi_resume_channels_internal(enum ipa_client_type client,
+		bool LPTransitionRejected, bool brstmode_enabled,
+		union __packed gsi_channel_scratch ch_scratch, u8 index);
+
+int ipa3_mhi_destroy_channel(enum ipa_client_type client);
+
+/*
+ * mux id
+ */
+int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in);
+
+/*
+ * interrupts
+ */
+int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt,
+		ipa_irq_handler_t handler,
+		bool deferred_flag,
+		void *private_data);
+
+int ipa3_remove_interrupt_handler(enum ipa_irq_type interrupt);
+
+/*
+ * Miscellaneous
+ */
+void ipa3_bam_reg_dump(void);
+
+int ipa3_get_ep_mapping(enum ipa_client_type client);
+
+bool ipa3_is_ready(void);
+
+void ipa3_proxy_clk_vote(void);
+void ipa3_proxy_clk_unvote(void);
+
+bool ipa3_is_client_handle_valid(u32 clnt_hdl);
+
+enum ipa_client_type ipa3_get_client_mapping(int pipe_idx);
+
+void ipa_init_ep_flt_bitmap(void);
+
+bool ipa_is_ep_support_flt(int pipe_idx);
+
+enum ipa_rm_resource_name ipa3_get_rm_resource_from_ep(int pipe_idx);
+
+bool ipa3_get_modem_cfg_emb_pipe_flt(void);
+
+u8 ipa3_get_qmb_master_sel(enum ipa_client_type client);
+
+/* internal functions */
+
+int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
+	struct ipa_api_controller *api_ctrl);
+
+bool ipa_is_modem_pipe(int pipe_idx);
+
+int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc,
+		bool in_atomic);
+int ipa3_send(struct ipa3_sys_context *sys,
+		u32 num_desc,
+		struct ipa3_desc *desc,
+		bool in_atomic);
+int ipa3_get_ep_mapping(enum ipa_client_type client);
+int ipa_get_ep_group(enum ipa_client_type client);
+
+int ipa3_generate_hw_rule(enum ipa_ip_type ip,
+			 const struct ipa_rule_attrib *attrib,
+			 u8 **buf,
+			 u16 *en_rule);
+int ipa3_init_hw(void);
+struct ipa3_rt_tbl *__ipa3_find_rt_tbl(enum ipa_ip_type ip, const char *name);
+int ipa3_set_single_ndp_per_mbim(bool);
+void ipa3_debugfs_init(void);
+void ipa3_debugfs_remove(void);
+
+void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size);
+#ifdef IPA_DEBUG
+#define IPA_DUMP_BUFF(base, phy_base, size) \
+	ipa3_dump_buff_internal(base, phy_base, size)
+#else
+#define IPA_DUMP_BUFF(base, phy_base, size)
+#endif
+int ipa3_init_mem_partition(struct device_node *dev_node);
+int ipa3_controller_static_bind(struct ipa3_controller *controller,
+		enum ipa_hw_type ipa_hw_type);
+int ipa3_cfg_route(struct ipahal_reg_route *route);
+int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr);
+int ipa3_cfg_filter(u32 disable);
+int ipa3_pipe_mem_init(u32 start_ofst, u32 size);
+int ipa3_pipe_mem_alloc(u32 *ofst, u32 size);
+int ipa3_pipe_mem_free(u32 ofst, u32 size);
+int ipa3_straddle_boundary(u32 start, u32 end, u32 boundary);
+struct ipa3_context *ipa3_get_ctx(void);
+void ipa3_enable_clks(void);
+void ipa3_disable_clks(void);
+void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id);
+int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
+		*id);
+void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id);
+void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
+		bool int_ctx);
+void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id,
+		bool int_ctx);
+int ipa3_active_clients_log_print_buffer(char *buf, int size);
+int ipa3_active_clients_log_print_table(char *buf, int size);
+void ipa3_active_clients_log_clear(void);
+int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev);
+int __ipa3_del_rt_rule(u32 rule_hdl);
+int __ipa3_del_hdr(u32 hdr_hdl);
+int __ipa3_release_hdr(u32 hdr_hdl);
+int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl);
+int _ipa_read_ep_reg_v3_0(char *buf, int max_len, int pipe);
+void _ipa_enable_clks_v3_0(void);
+void _ipa_disable_clks_v3_0(void);
+struct device *ipa3_get_dma_dev(void);
+void ipa3_suspend_active_aggr_wa(u32 clnt_hdl);
+void ipa3_suspend_handler(enum ipa_irq_type interrupt,
+				void *private_data,
+				void *interrupt_data);
+
+
+int ipa_bridge_init(void);
+void ipa_bridge_cleanup(void);
+
+ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count,
+		 loff_t *f_pos);
+int ipa3_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count);
+int ipa3_query_intf(struct ipa_ioc_query_intf *lookup);
+int ipa3_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx);
+int ipa3_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx);
+int ipa3_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext);
+
+void wwan_cleanup(void);
+
+int ipa3_teth_bridge_driver_init(void);
+void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data);
+
+int _ipa_init_sram_v3_0(void);
+int _ipa_init_hdr_v3_0(void);
+int _ipa_init_rt4_v3(void);
+int _ipa_init_rt6_v3(void);
+int _ipa_init_flt4_v3(void);
+int _ipa_init_flt6_v3(void);
+
+int __ipa_commit_flt_v3(enum ipa_ip_type ip);
+int __ipa_commit_rt_v3(enum ipa_ip_type ip);
+
+int __ipa_commit_hdr_v3_0(void);
+void ipa3_skb_recycle(struct sk_buff *skb);
+void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx);
+void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx);
+
+int ipa3_enable_data_path(u32 clnt_hdl);
+int ipa3_disable_data_path(u32 clnt_hdl);
+int ipa3_alloc_rule_id(struct idr *rule_ids);
+int ipa3_id_alloc(void *ptr);
+void *ipa3_id_find(u32 id);
+void ipa3_id_remove(u32 id);
+
+int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+				  u32 bandwidth_mbps);
+
+int ipa3_cfg_ep_status(u32 clnt_hdl,
+		const struct ipahal_reg_ep_cfg_status *ipa_ep_cfg);
+
+int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name name);
+int ipa3_suspend_resource_sync(enum ipa_rm_resource_name name);
+int ipa3_resume_resource(enum ipa_rm_resource_name name);
+bool ipa3_should_pipe_be_suspended(enum ipa_client_type client);
+int ipa3_tag_aggr_force_close(int pipe_num);
+
+void ipa3_active_clients_lock(void);
+int ipa3_active_clients_trylock(unsigned long *flags);
+void ipa3_active_clients_unlock(void);
+void ipa3_active_clients_trylock_unlock(unsigned long *flags);
+int ipa3_wdi_init(void);
+int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id);
+int ipa3_tag_process(struct ipa3_desc *desc, int num_descs,
+		    unsigned long timeout);
+
+void ipa3_q6_pre_shutdown_cleanup(void);
+void ipa3_q6_post_shutdown_cleanup(void);
+int ipa3_init_q6_smem(void);
+
+int ipa3_sps_connect_safe(struct sps_pipe *h, struct sps_connect *connect,
+			 enum ipa_client_type ipa_client);
+
+int ipa3_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req);
+int ipa3_mhi_query_ch_info(enum ipa_client_type client,
+		struct gsi_chan_info *ch_info);
+
+int ipa3_uc_interface_init(void);
+int ipa3_uc_reset_pipe(enum ipa_client_type ipa_client);
+int ipa3_uc_is_gsi_channel_empty(enum ipa_client_type ipa_client);
+int ipa3_uc_state_check(void);
+int ipa3_uc_loaded_check(void);
+void ipa3_uc_load_notify(void);
+int ipa3_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
+		    bool polling_mode, unsigned long timeout_jiffies);
+void ipa3_uc_register_handlers(enum ipa3_hw_features feature,
+			      struct ipa3_uc_hdlrs *hdlrs);
+int ipa3_create_nat_device(void);
+int ipa3_uc_notify_clk_state(bool enabled);
+void ipa3_dma_async_memcpy_notify_cb(void *priv,
+		enum ipa_dp_evt_type evt, unsigned long data);
+
+int ipa3_uc_update_hw_flags(u32 flags);
+
+int ipa3_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void));
+void ipa3_uc_mhi_cleanup(void);
+int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd);
+int ipa3_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
+	u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx,
+	u32 first_evt_idx);
+int ipa3_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle,
+	int contexArrayIndex, int channelDirection);
+int ipa3_uc_mhi_reset_channel(int channelHandle);
+int ipa3_uc_mhi_suspend_channel(int channelHandle);
+int ipa3_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected);
+int ipa3_uc_mhi_stop_event_update_channel(int channelHandle);
+int ipa3_uc_mhi_print_stats(char *dbg_buff, int size);
+int ipa3_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
+void ipa3_tag_destroy_imm(void *user1, int user2);
+struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info(int ipa_ep_idx);
+void ipa3_uc_rg10_write_reg(enum ipahal_reg_name reg, u32 n, u32 val);
+
+u32 ipa3_get_num_pipes(void);
+struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(void);
+struct ipa_smmu_cb_ctx *ipa3_get_wlan_smmu_ctx(void);
+struct ipa_smmu_cb_ctx *ipa3_get_uc_smmu_ctx(void);
+struct iommu_domain *ipa3_get_smmu_domain(void);
+struct iommu_domain *ipa3_get_uc_smmu_domain(void);
+struct iommu_domain *ipa3_get_wlan_smmu_domain(void);
+int ipa3_iommu_map(struct iommu_domain *domain, unsigned long iova,
+	phys_addr_t paddr, size_t size, int prot);
+int ipa3_ap_suspend(struct device *dev);
+int ipa3_ap_resume(struct device *dev);
+int ipa3_init_interrupts(void);
+struct iommu_domain *ipa3_get_smmu_domain(void);
+int ipa3_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
+int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
+int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple);
+int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple);
+void ipa3_set_resorce_groups_min_max_limits(void);
+void ipa3_suspend_apps_pipes(bool suspend);
+void ipa3_flow_control(enum ipa_client_type ipa_client, bool enable,
+			uint32_t qmap_id);
+int ipa3_flt_read_tbl_from_hw(u32 pipe_idx,
+	enum ipa_ip_type ip_type,
+	bool hashable,
+	struct ipahal_flt_rule_entry entry[],
+	int *num_entry);
+int ipa3_rt_read_tbl_from_hw(u32 tbl_idx,
+	enum ipa_ip_type ip_type,
+	bool hashable,
+	struct ipahal_rt_rule_entry entry[],
+	int *num_entry);
+int ipa3_restore_suspend_handler(void);
+int ipa3_inject_dma_task_for_gsi(void);
+int ipa3_uc_panic_notifier(struct notifier_block *this,
+	unsigned long event, void *ptr);
+void ipa3_inc_acquire_wakelock(void);
+void ipa3_dec_release_wakelock(void);
+int ipa3_load_fws(const struct firmware *firmware);
+int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data);
+const char *ipa_hw_error_str(enum ipa3_hw_errors err_type);
+int ipa_gsi_ch20_wa(void);
+int ipa3_rx_poll(u32 clnt_hdl, int budget);
+void ipa3_recycle_wan_skb(struct sk_buff *skb);
+int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map);
+int ipa3_smmu_map_peer_buff(u64 iova, phys_addr_t phys_addr,
+	u32 size, bool map);
+int ipa3_ntn_init(void);
+int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats);
+struct dentry *ipa_debugfs_get_root(void);
+bool ipa3_is_msm_device(void);
+#endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
new file mode 100644
index 0000000..75711c0
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
@@ -0,0 +1,567 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/interrupt.h>
+#include "ipa_i.h"
+
+#define INTERRUPT_WORKQUEUE_NAME "ipa_interrupt_wq"
+#define DIS_SUSPEND_INTERRUPT_TIMEOUT 5
+#define IPA_IRQ_NUM_MAX 32
+
+struct ipa3_interrupt_info {
+	ipa_irq_handler_t handler;
+	enum ipa_irq_type interrupt;
+	void *private_data;
+	bool deferred_flag;
+};
+
+struct ipa3_interrupt_work_wrap {
+	struct work_struct interrupt_work;
+	ipa_irq_handler_t handler;
+	enum ipa_irq_type interrupt;
+	void *private_data;
+	void *interrupt_data;
+};
+
+static struct ipa3_interrupt_info ipa_interrupt_to_cb[IPA_IRQ_NUM_MAX];
+static struct workqueue_struct *ipa_interrupt_wq;
+static u32 ipa_ee;
+
+static void ipa3_tx_suspend_interrupt_wa(void);
+static void ipa3_enable_tx_suspend_wa(struct work_struct *work);
+static DECLARE_DELAYED_WORK(dwork_en_suspend_int,
+						ipa3_enable_tx_suspend_wa);
+static spinlock_t suspend_wa_lock;
+static void ipa3_process_interrupts(bool isr_context);
+
+static int ipa3_irq_mapping[IPA_IRQ_MAX] = {
+	[IPA_UC_TX_CMD_Q_NOT_FULL_IRQ]		= -1,
+	[IPA_UC_TO_PROC_ACK_Q_NOT_FULL_IRQ]	= -1,
+	[IPA_BAD_SNOC_ACCESS_IRQ]		= 0,
+	[IPA_EOT_COAL_IRQ]			= -1,
+	[IPA_UC_IRQ_0]				= 2,
+	[IPA_UC_IRQ_1]				= 3,
+	[IPA_UC_IRQ_2]				= 4,
+	[IPA_UC_IRQ_3]				= 5,
+	[IPA_UC_IN_Q_NOT_EMPTY_IRQ]		= 6,
+	[IPA_UC_RX_CMD_Q_NOT_FULL_IRQ]		= 7,
+	[IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ]	= 8,
+	[IPA_RX_ERR_IRQ]			= 9,
+	[IPA_DEAGGR_ERR_IRQ]			= 10,
+	[IPA_TX_ERR_IRQ]			= 11,
+	[IPA_STEP_MODE_IRQ]			= 12,
+	[IPA_PROC_ERR_IRQ]			= 13,
+	[IPA_TX_SUSPEND_IRQ]			= 14,
+	[IPA_TX_HOLB_DROP_IRQ]			= 15,
+	[IPA_BAM_GSI_IDLE_IRQ]			= 16,
+};
+
+static void ipa3_interrupt_defer(struct work_struct *work);
+static DECLARE_WORK(ipa3_interrupt_defer_work, ipa3_interrupt_defer);
+
+static void ipa3_deferred_interrupt_work(struct work_struct *work)
+{
+	struct ipa3_interrupt_work_wrap *work_data =
+			container_of(work,
+			struct ipa3_interrupt_work_wrap,
+			interrupt_work);
+	IPADBG("call handler from workq...\n");
+	work_data->handler(work_data->interrupt, work_data->private_data,
+			work_data->interrupt_data);
+	kfree(work_data->interrupt_data);
+	kfree(work_data);
+}
+
+static bool ipa3_is_valid_ep(u32 ep_suspend_data)
+{
+	u32 bmsk = 1;
+	u32 i = 0;
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if ((ep_suspend_data & bmsk) && (ipa3_ctx->ep[i].valid))
+			return true;
+		bmsk = bmsk << 1;
+	}
+	return false;
+}
+
+static int ipa3_handle_interrupt(int irq_num, bool isr_context)
+{
+	struct ipa3_interrupt_info interrupt_info;
+	struct ipa3_interrupt_work_wrap *work_data;
+	u32 suspend_data;
+	void *interrupt_data = NULL;
+	struct ipa_tx_suspend_irq_data *suspend_interrupt_data = NULL;
+	int res;
+
+	interrupt_info = ipa_interrupt_to_cb[irq_num];
+	if (interrupt_info.handler == NULL) {
+		IPAERR("A callback function wasn't set for interrupt num %d\n",
+			irq_num);
+		return -EINVAL;
+	}
+
+	switch (interrupt_info.interrupt) {
+	case IPA_TX_SUSPEND_IRQ:
+		IPADBG_LOW("processing TX_SUSPEND interrupt work-around\n");
+		ipa3_tx_suspend_interrupt_wa();
+		suspend_data = ipahal_read_reg_n(IPA_IRQ_SUSPEND_INFO_EE_n,
+			ipa_ee);
+		IPADBG_LOW("get interrupt %d\n", suspend_data);
+
+		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) {
+			/* Clearing L2 interrupts status */
+			ipahal_write_reg_n(IPA_SUSPEND_IRQ_CLR_EE_n,
+				ipa_ee, suspend_data);
+		}
+		if (!ipa3_is_valid_ep(suspend_data))
+			return 0;
+
+		suspend_interrupt_data =
+			kzalloc(sizeof(*suspend_interrupt_data), GFP_ATOMIC);
+		if (!suspend_interrupt_data) {
+			IPAERR("failed allocating suspend_interrupt_data\n");
+			return -ENOMEM;
+		}
+		suspend_interrupt_data->endpoints = suspend_data;
+		interrupt_data = suspend_interrupt_data;
+		break;
+	case IPA_UC_IRQ_0:
+		if (ipa3_ctx->apply_rg10_wa) {
+			/*
+			 * Early detect of uC crash. If RG10 workaround is
+			 * enable uC crash will not be detected as before
+			 * processing uC event the interrupt is cleared using
+			 * uC register write which times out as it crashed
+			 * already.
+			 */
+			if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+			    IPA_HW_2_CPU_EVENT_ERROR)
+				ipa3_ctx->uc_ctx.uc_failed = true;
+		}
+		break;
+	default:
+		break;
+	}
+
+	/* Force defer processing if in ISR context. */
+	if (interrupt_info.deferred_flag || isr_context) {
+		work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap),
+				GFP_ATOMIC);
+		if (!work_data) {
+			IPAERR("failed allocating ipa3_interrupt_work_wrap\n");
+			res = -ENOMEM;
+			goto fail_alloc_work;
+		}
+		INIT_WORK(&work_data->interrupt_work,
+				ipa3_deferred_interrupt_work);
+		work_data->handler = interrupt_info.handler;
+		work_data->interrupt = interrupt_info.interrupt;
+		work_data->private_data = interrupt_info.private_data;
+		work_data->interrupt_data = interrupt_data;
+		queue_work(ipa_interrupt_wq, &work_data->interrupt_work);
+
+	} else {
+		interrupt_info.handler(interrupt_info.interrupt,
+			interrupt_info.private_data,
+			interrupt_data);
+		kfree(interrupt_data);
+	}
+
+	return 0;
+
+fail_alloc_work:
+	kfree(interrupt_data);
+	return res;
+}
+
+static void ipa3_enable_tx_suspend_wa(struct work_struct *work)
+{
+	u32 en;
+	u32 suspend_bmask;
+	int irq_num;
+
+	IPADBG_LOW("Enter\n");
+
+	irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
+	BUG_ON(irq_num == -1);
+
+	/* make sure ipa hw is clocked on*/
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+	suspend_bmask = 1 << irq_num;
+	/*enable  TX_SUSPEND_IRQ*/
+	en |= suspend_bmask;
+	IPADBG("enable TX_SUSPEND_IRQ, IPA_IRQ_EN_EE reg, write val = %u\n"
+		, en);
+	ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, en);
+	ipa3_process_interrupts(false);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	IPADBG_LOW("Exit\n");
+}
+
+static void ipa3_tx_suspend_interrupt_wa(void)
+{
+	u32 val;
+	u32 suspend_bmask;
+	int irq_num;
+
+	IPADBG_LOW("Enter\n");
+	irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
+	BUG_ON(irq_num == -1);
+
+	/*disable TX_SUSPEND_IRQ*/
+	val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+	suspend_bmask = 1 << irq_num;
+	val &= ~suspend_bmask;
+	IPADBG("Disabling TX_SUSPEND_IRQ, write val: %u to IPA_IRQ_EN_EE reg\n",
+		val);
+	ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val);
+
+	IPADBG_LOW(" processing suspend interrupt work-around, delayed work\n");
+	queue_delayed_work(ipa_interrupt_wq, &dwork_en_suspend_int,
+			msecs_to_jiffies(DIS_SUSPEND_INTERRUPT_TIMEOUT));
+
+	IPADBG_LOW("Exit\n");
+}
+
+static inline bool is_uc_irq(int irq_num)
+{
+	if (ipa_interrupt_to_cb[irq_num].interrupt >= IPA_UC_IRQ_0 &&
+		ipa_interrupt_to_cb[irq_num].interrupt <= IPA_UC_IRQ_3)
+		return true;
+	else
+		return false;
+}
+
+static void ipa3_process_interrupts(bool isr_context)
+{
+	u32 reg;
+	u32 bmsk;
+	u32 i = 0;
+	u32 en;
+	unsigned long flags;
+	bool uc_irq;
+
+	IPADBG_LOW("Enter\n");
+
+	spin_lock_irqsave(&suspend_wa_lock, flags);
+	en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+	reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee);
+	while (en & reg) {
+		bmsk = 1;
+		for (i = 0; i < IPA_IRQ_NUM_MAX; i++) {
+			if (en & reg & bmsk) {
+				uc_irq = is_uc_irq(i);
+
+				/*
+				 * Clear uC interrupt before processing to avoid
+				 * clearing unhandled interrupts
+				 */
+				if (uc_irq)
+					ipa3_uc_rg10_write_reg(IPA_IRQ_CLR_EE_n,
+							ipa_ee, bmsk);
+
+				/*
+				 * handle the interrupt with spin_lock
+				 * unlocked to avoid calling client in atomic
+				 * context. mutual exclusion still preserved
+				 * as the read/clr is done with spin_lock
+				 * locked.
+				 */
+				spin_unlock_irqrestore(&suspend_wa_lock, flags);
+				ipa3_handle_interrupt(i, isr_context);
+				spin_lock_irqsave(&suspend_wa_lock, flags);
+
+				/*
+				 * Clear non uC interrupt after processing
+				 * to avoid clearing interrupt data
+				 */
+				if (!uc_irq)
+					ipa3_uc_rg10_write_reg(IPA_IRQ_CLR_EE_n,
+							ipa_ee, bmsk);
+			}
+			bmsk = bmsk << 1;
+		}
+		/*
+		 * In case uC failed interrupt cannot be cleared.
+		 * Device will crash as part of handling uC event handler.
+		 */
+		if (ipa3_ctx->apply_rg10_wa && ipa3_ctx->uc_ctx.uc_failed)
+			break;
+
+		reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee);
+		/* since the suspend interrupt HW bug we must
+		  * read again the EN register, otherwise the while is endless
+		  */
+		en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+	}
+
+	spin_unlock_irqrestore(&suspend_wa_lock, flags);
+	IPADBG_LOW("Exit\n");
+}
+
+static void ipa3_interrupt_defer(struct work_struct *work)
+{
+	IPADBG("processing interrupts in wq\n");
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipa3_process_interrupts(false);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG("Done\n");
+}
+
+static irqreturn_t ipa3_isr(int irq, void *ctxt)
+{
+	unsigned long flags;
+
+	IPADBG_LOW("Enter\n");
+	/* defer interrupt handling in case IPA is not clocked on */
+	if (ipa3_active_clients_trylock(&flags) == 0) {
+		IPADBG("defer interrupt processing\n");
+		queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_interrupt_defer_work);
+		return IRQ_HANDLED;
+	}
+
+	if (ipa3_ctx->ipa3_active_clients.cnt == 0) {
+		IPADBG("defer interrupt processing\n");
+		queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_interrupt_defer_work);
+		goto bail;
+	}
+
+	ipa3_process_interrupts(true);
+	IPADBG_LOW("Exit\n");
+
+bail:
+	ipa3_active_clients_trylock_unlock(&flags);
+	return IRQ_HANDLED;
+}
+/**
+* ipa3_add_interrupt_handler() - Adds handler to an interrupt type
+* @interrupt:		Interrupt type
+* @handler:		The handler to be added
+* @deferred_flag:	whether the handler processing should be deferred in
+*			a workqueue
+* @private_data:	the client's private data
+*
+* Adds handler to an interrupt type and enable the specific bit
+* in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled
+*/
+int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt,
+		ipa_irq_handler_t handler,
+		bool deferred_flag,
+		void *private_data)
+{
+	u32 val;
+	u32 bmsk;
+	int irq_num;
+	int client_idx, ep_idx;
+
+	IPADBG("in ipa3_add_interrupt_handler interrupt_enum(%d)\n", interrupt);
+	if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ ||
+		interrupt >= IPA_IRQ_MAX) {
+		IPAERR("invalid interrupt number %d\n", interrupt);
+		return -EINVAL;
+	}
+
+	irq_num = ipa3_irq_mapping[interrupt];
+	if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) {
+		IPAERR("interrupt %d not supported\n", interrupt);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+	IPADBG("ipa_interrupt_to_cb irq_num(%d)\n", irq_num);
+
+	ipa_interrupt_to_cb[irq_num].deferred_flag = deferred_flag;
+	ipa_interrupt_to_cb[irq_num].handler = handler;
+	ipa_interrupt_to_cb[irq_num].private_data = private_data;
+	ipa_interrupt_to_cb[irq_num].interrupt = interrupt;
+
+	val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+	IPADBG("read IPA_IRQ_EN_EE_n register. reg = %d\n", val);
+	bmsk = 1 << irq_num;
+	val |= bmsk;
+	ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val);
+	IPADBG("wrote IPA_IRQ_EN_EE_n register. reg = %d\n", val);
+
+	/* register SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt*/
+	if ((interrupt == IPA_TX_SUSPEND_IRQ) &&
+		(ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1)) {
+		val = ~0;
+		for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
+			if (IPA_CLIENT_IS_Q6_CONS(client_idx) ||
+				IPA_CLIENT_IS_Q6_PROD(client_idx)) {
+				ep_idx = ipa3_get_ep_mapping(client_idx);
+				IPADBG("modem ep_idx(%d) client_idx = %d\n",
+					ep_idx, client_idx);
+			if (ep_idx == -1)
+				IPADBG("Invalid IPA client\n");
+			else
+				val &= ~(1 << ep_idx);
+		}
+
+		ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, val);
+		IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", val);
+	}
+	return 0;
+}
+
+/**
+* ipa3_remove_interrupt_handler() - Removes handler to an interrupt type
+* @interrupt:		Interrupt type
+*
+* Removes the handler and disable the specific bit in IRQ_EN register
+*/
+int ipa3_remove_interrupt_handler(enum ipa_irq_type interrupt)
+{
+	u32 val;
+	u32 bmsk;
+	int irq_num;
+
+	if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ ||
+		interrupt >= IPA_IRQ_MAX) {
+		IPAERR("invalid interrupt number %d\n", interrupt);
+		return -EINVAL;
+	}
+
+	irq_num = ipa3_irq_mapping[interrupt];
+	if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) {
+		IPAERR("interrupt %d not supported\n", interrupt);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	kfree(ipa_interrupt_to_cb[irq_num].private_data);
+	ipa_interrupt_to_cb[irq_num].deferred_flag = false;
+	ipa_interrupt_to_cb[irq_num].handler = NULL;
+	ipa_interrupt_to_cb[irq_num].private_data = NULL;
+	ipa_interrupt_to_cb[irq_num].interrupt = -1;
+
+	/* clean SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt */
+	if ((interrupt == IPA_TX_SUSPEND_IRQ) &&
+		(ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1)) {
+		ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, 0);
+		IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", 0);
+	}
+
+	val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+	bmsk = 1 << irq_num;
+	val &= ~bmsk;
+	ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val);
+
+	return 0;
+}
+
+/**
+* ipa3_interrupts_init() - Initialize the IPA interrupts framework
+* @ipa_irq:	The interrupt number to allocate
+* @ee:		Execution environment
+* @ipa_dev:	The basic device structure representing the IPA driver
+*
+* - Initialize the ipa_interrupt_to_cb array
+* - Clear interrupts status
+* - Register the ipa interrupt handler - ipa3_isr
+* - Enable apps processor wakeup by IPA interrupts
+*/
+int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev)
+{
+	int idx;
+	int res = 0;
+
+	ipa_ee = ee;
+	for (idx = 0; idx < IPA_IRQ_NUM_MAX; idx++) {
+		ipa_interrupt_to_cb[idx].deferred_flag = false;
+		ipa_interrupt_to_cb[idx].handler = NULL;
+		ipa_interrupt_to_cb[idx].private_data = NULL;
+		ipa_interrupt_to_cb[idx].interrupt = -1;
+	}
+
+	ipa_interrupt_wq = create_singlethread_workqueue(
+			INTERRUPT_WORKQUEUE_NAME);
+	if (!ipa_interrupt_wq) {
+		IPAERR("workqueue creation failed\n");
+		return -ENOMEM;
+	}
+
+	res = request_irq(ipa_irq, (irq_handler_t) ipa3_isr,
+				IRQF_TRIGGER_RISING, "ipa", ipa_dev);
+	if (res) {
+		IPAERR("fail to register IPA IRQ handler irq=%d\n", ipa_irq);
+		return -ENODEV;
+	}
+	IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq);
+
+	res = enable_irq_wake(ipa_irq);
+	if (res)
+		IPAERR("fail to enable IPA IRQ wakeup irq=%d res=%d\n",
+				ipa_irq, res);
+	else
+		IPADBG("IPA IRQ wakeup enabled irq=%d\n", ipa_irq);
+
+	spin_lock_init(&suspend_wa_lock);
+	return 0;
+}
+
+/**
+* ipa3_suspend_active_aggr_wa() - Emulate suspend IRQ
+* @clnt_hndl:		suspended client handle, IRQ is emulated for this pipe
+*
+*  Emulate suspend IRQ to unsuspend client which was suspended with an open
+*  aggregation frame in order to bypass HW bug of IRQ not generated when
+*  endpoint is suspended during an open aggregation.
+*/
+void ipa3_suspend_active_aggr_wa(u32 clnt_hdl)
+{
+	struct ipa3_interrupt_info interrupt_info;
+	struct ipa3_interrupt_work_wrap *work_data;
+	struct ipa_tx_suspend_irq_data *suspend_interrupt_data;
+	int irq_num;
+	int aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+
+	if (aggr_active_bitmap & (1 << clnt_hdl)) {
+		/* force close aggregation */
+		ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << clnt_hdl));
+
+		/* simulate suspend IRQ */
+		irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
+		interrupt_info = ipa_interrupt_to_cb[irq_num];
+		if (interrupt_info.handler == NULL) {
+			IPAERR("no CB function for IPA_TX_SUSPEND_IRQ!\n");
+			return;
+		}
+		suspend_interrupt_data = kzalloc(
+				sizeof(*suspend_interrupt_data),
+				GFP_ATOMIC);
+		if (!suspend_interrupt_data) {
+			IPAERR("failed allocating suspend_interrupt_data\n");
+			return;
+		}
+		suspend_interrupt_data->endpoints = 1 << clnt_hdl;
+
+		work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap),
+				GFP_ATOMIC);
+		if (!work_data) {
+			IPAERR("failed allocating ipa3_interrupt_work_wrap\n");
+			goto fail_alloc_work;
+		}
+		INIT_WORK(&work_data->interrupt_work,
+				ipa3_deferred_interrupt_work);
+		work_data->handler = interrupt_info.handler;
+		work_data->interrupt = IPA_TX_SUSPEND_IRQ;
+		work_data->private_data = interrupt_info.private_data;
+		work_data->interrupt_data = (void *)suspend_interrupt_data;
+		queue_work(ipa_interrupt_wq, &work_data->interrupt_work);
+		return;
+fail_alloc_work:
+		kfree(suspend_interrupt_data);
+	}
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
new file mode 100644
index 0000000..32c5004
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
@@ -0,0 +1,615 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include "ipa_i.h"
+
+struct ipa3_intf {
+	char name[IPA_RESOURCE_NAME_MAX];
+	struct list_head link;
+	u32 num_tx_props;
+	u32 num_rx_props;
+	u32 num_ext_props;
+	struct ipa_ioc_tx_intf_prop *tx;
+	struct ipa_ioc_rx_intf_prop *rx;
+	struct ipa_ioc_ext_intf_prop *ext;
+	enum ipa_client_type excp_pipe;
+};
+
+struct ipa3_push_msg {
+	struct ipa_msg_meta meta;
+	ipa_msg_free_fn callback;
+	void *buff;
+	struct list_head link;
+};
+
+struct ipa3_pull_msg {
+	struct ipa_msg_meta meta;
+	ipa_msg_pull_fn callback;
+	struct list_head link;
+};
+
+/**
+ * ipa3_register_intf() - register "logical" interface
+ * @name: [in] interface name
+ * @tx:	[in] TX properties of the interface
+ * @rx:	[in] RX properties of the interface
+ *
+ * Register an interface and its tx and rx properties, this allows
+ * configuration of rules from user-space
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_register_intf(const char *name, const struct ipa_tx_intf *tx,
+		       const struct ipa_rx_intf *rx)
+{
+	return ipa3_register_intf_ext(name, tx, rx, NULL);
+}
+
+/**
+ * ipa3_register_intf_ext() - register "logical" interface which has only
+ * extended properties
+ * @name: [in] interface name
+ * @tx:	[in] TX properties of the interface
+ * @rx:	[in] RX properties of the interface
+ * @ext: [in] EXT properties of the interface
+ *
+ * Register an interface and its tx, rx and ext properties, this allows
+ * configuration of rules from user-space
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
+		       const struct ipa_rx_intf *rx,
+		       const struct ipa_ext_intf *ext)
+{
+	struct ipa3_intf *intf;
+	u32 len;
+
+	if (name == NULL || (tx == NULL && rx == NULL && ext == NULL)) {
+		IPAERR("invalid params name=%p tx=%p rx=%p ext=%p\n", name,
+				tx, rx, ext);
+		return -EINVAL;
+	}
+
+	if (tx && tx->num_props > IPA_NUM_PROPS_MAX) {
+		IPAERR("invalid tx num_props=%d max=%d\n", tx->num_props,
+				IPA_NUM_PROPS_MAX);
+		return -EINVAL;
+	}
+
+	if (rx && rx->num_props > IPA_NUM_PROPS_MAX) {
+		IPAERR("invalid rx num_props=%d max=%d\n", rx->num_props,
+				IPA_NUM_PROPS_MAX);
+		return -EINVAL;
+	}
+
+	if (ext && ext->num_props > IPA_NUM_PROPS_MAX) {
+		IPAERR("invalid ext num_props=%d max=%d\n", ext->num_props,
+				IPA_NUM_PROPS_MAX);
+		return -EINVAL;
+	}
+
+	len = sizeof(struct ipa3_intf);
+	intf = kzalloc(len, GFP_KERNEL);
+	if (intf == NULL) {
+		IPAERR("fail to alloc 0x%x bytes\n", len);
+		return -ENOMEM;
+	}
+
+	strlcpy(intf->name, name, IPA_RESOURCE_NAME_MAX);
+
+	if (tx) {
+		intf->num_tx_props = tx->num_props;
+		len = tx->num_props * sizeof(struct ipa_ioc_tx_intf_prop);
+		intf->tx = kzalloc(len, GFP_KERNEL);
+		if (intf->tx == NULL) {
+			IPAERR("fail to alloc 0x%x bytes\n", len);
+			kfree(intf);
+			return -ENOMEM;
+		}
+		memcpy(intf->tx, tx->prop, len);
+	}
+
+	if (rx) {
+		intf->num_rx_props = rx->num_props;
+		len = rx->num_props * sizeof(struct ipa_ioc_rx_intf_prop);
+		intf->rx = kzalloc(len, GFP_KERNEL);
+		if (intf->rx == NULL) {
+			IPAERR("fail to alloc 0x%x bytes\n", len);
+			kfree(intf->tx);
+			kfree(intf);
+			return -ENOMEM;
+		}
+		memcpy(intf->rx, rx->prop, len);
+	}
+
+	if (ext) {
+		intf->num_ext_props = ext->num_props;
+		len = ext->num_props * sizeof(struct ipa_ioc_ext_intf_prop);
+		intf->ext = kzalloc(len, GFP_KERNEL);
+		if (intf->ext == NULL) {
+			IPAERR("fail to alloc 0x%x bytes\n", len);
+			kfree(intf->rx);
+			kfree(intf->tx);
+			kfree(intf);
+			return -ENOMEM;
+		}
+		memcpy(intf->ext, ext->prop, len);
+	}
+
+	if (ext && ext->excp_pipe_valid)
+		intf->excp_pipe = ext->excp_pipe;
+	else
+		intf->excp_pipe = IPA_CLIENT_APPS_LAN_CONS;
+
+	mutex_lock(&ipa3_ctx->lock);
+	list_add_tail(&intf->link, &ipa3_ctx->intf_list);
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return 0;
+}
+
+/**
+ * ipa3_deregister_intf() - de-register previously registered logical interface
+ * @name: [in] interface name
+ *
+ * De-register a previously registered interface
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_deregister_intf(const char *name)
+{
+	struct ipa3_intf *entry;
+	struct ipa3_intf *next;
+	int result = -EINVAL;
+
+	if ((name == NULL) ||
+	    (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX)) {
+		IPAERR("invalid param name=%s\n", name);
+		return result;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	list_for_each_entry_safe(entry, next, &ipa3_ctx->intf_list, link) {
+		if (!strcmp(entry->name, name)) {
+			list_del(&entry->link);
+			kfree(entry->ext);
+			kfree(entry->rx);
+			kfree(entry->tx);
+			kfree(entry);
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_query_intf() - query logical interface properties
+ * @lookup:	[inout] interface name and number of properties
+ *
+ * Obtain the handle and number of tx and rx properties for the named
+ * interface, used as part of querying the tx and rx properties for
+ * configuration of various rules from user-space
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_query_intf(struct ipa_ioc_query_intf *lookup)
+{
+	struct ipa3_intf *entry;
+	int result = -EINVAL;
+
+	if (lookup == NULL) {
+		IPAERR("invalid param lookup=%p\n", lookup);
+		return result;
+	}
+
+	if (strnlen(lookup->name, IPA_RESOURCE_NAME_MAX) ==
+			IPA_RESOURCE_NAME_MAX) {
+		IPAERR("Interface name too long. (%s)\n", lookup->name);
+		return result;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
+		if (!strcmp(entry->name, lookup->name)) {
+			lookup->num_tx_props = entry->num_tx_props;
+			lookup->num_rx_props = entry->num_rx_props;
+			lookup->num_ext_props = entry->num_ext_props;
+			lookup->excp_pipe = entry->excp_pipe;
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_query_intf_tx_props() - qeury TX props of an interface
+ * @tx:  [inout] interface tx attributes
+ *
+ * Obtain the tx properties for the specified interface
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx)
+{
+	struct ipa3_intf *entry;
+	int result = -EINVAL;
+
+	if (tx == NULL) {
+		IPAERR("invalid param tx=%p\n", tx);
+		return result;
+	}
+
+	if (strnlen(tx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
+		IPAERR("Interface name too long. (%s)\n", tx->name);
+		return result;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
+		if (!strcmp(entry->name, tx->name)) {
+			memcpy(tx->tx, entry->tx, entry->num_tx_props *
+			       sizeof(struct ipa_ioc_tx_intf_prop));
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_query_intf_rx_props() - qeury RX props of an interface
+ * @rx:  [inout] interface rx attributes
+ *
+ * Obtain the rx properties for the specified interface
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx)
+{
+	struct ipa3_intf *entry;
+	int result = -EINVAL;
+
+	if (rx == NULL) {
+		IPAERR("invalid param rx=%p\n", rx);
+		return result;
+	}
+
+	if (strnlen(rx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
+		IPAERR("Interface name too long. (%s)\n", rx->name);
+		return result;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
+		if (!strcmp(entry->name, rx->name)) {
+			memcpy(rx->rx, entry->rx, entry->num_rx_props *
+					sizeof(struct ipa_ioc_rx_intf_prop));
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_query_intf_ext_props() - qeury EXT props of an interface
+ * @ext:  [inout] interface ext attributes
+ *
+ * Obtain the ext properties for the specified interface
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext)
+{
+	struct ipa3_intf *entry;
+	int result = -EINVAL;
+
+	if (ext == NULL) {
+		IPAERR("invalid param ext=%p\n", ext);
+		return result;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
+		if (!strcmp(entry->name, ext->name)) {
+			memcpy(ext->ext, entry->ext, entry->num_ext_props *
+					sizeof(struct ipa_ioc_ext_intf_prop));
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa3_send_msg() - Send "message" from kernel client to IPA driver
+ * @meta: [in] message meta-data
+ * @buff: [in] the payload for message
+ * @callback: [in] free callback
+ *
+ * Client supplies the message meta-data and payload which IPA driver buffers
+ * till read by user-space. After read from user space IPA driver invokes the
+ * callback supplied to free the message payload. Client must not touch/free
+ * the message payload after calling this API.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff,
+		  ipa_msg_free_fn callback)
+{
+	struct ipa3_push_msg *msg;
+
+	if (meta == NULL || (buff == NULL && callback != NULL) ||
+	    (buff != NULL && callback == NULL)) {
+		IPAERR("invalid param meta=%p buff=%p, callback=%p\n",
+		       meta, buff, callback);
+		return -EINVAL;
+	}
+
+	if (meta->msg_type >= IPA_EVENT_MAX_NUM) {
+		IPAERR("unsupported message type %d\n", meta->msg_type);
+		return -EINVAL;
+	}
+
+	msg = kzalloc(sizeof(struct ipa3_push_msg), GFP_KERNEL);
+	if (msg == NULL) {
+		IPAERR("fail to alloc ipa_msg container\n");
+		return -ENOMEM;
+	}
+
+	msg->meta = *meta;
+	msg->buff = buff;
+	msg->callback = callback;
+
+	mutex_lock(&ipa3_ctx->msg_lock);
+	list_add_tail(&msg->link, &ipa3_ctx->msg_list);
+	mutex_unlock(&ipa3_ctx->msg_lock);
+	IPA_STATS_INC_CNT(ipa3_ctx->stats.msg_w[meta->msg_type]);
+
+	wake_up(&ipa3_ctx->msg_waitq);
+
+	return 0;
+}
+
+/**
+ * ipa3_register_pull_msg() - register pull message type
+ * @meta: [in] message meta-data
+ * @callback: [in] pull callback
+ *
+ * Register message callback by kernel client with IPA driver for IPA driver to
+ * pull message on-demand.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback)
+{
+	struct ipa3_pull_msg *msg;
+
+	if (meta == NULL || callback == NULL) {
+		IPAERR("invalid param meta=%p callback=%p\n", meta, callback);
+		return -EINVAL;
+	}
+
+	msg = kzalloc(sizeof(struct ipa3_pull_msg), GFP_KERNEL);
+	if (msg == NULL) {
+		IPAERR("fail to alloc ipa_msg container\n");
+		return -ENOMEM;
+	}
+
+	msg->meta = *meta;
+	msg->callback = callback;
+
+	mutex_lock(&ipa3_ctx->msg_lock);
+	list_add_tail(&msg->link, &ipa3_ctx->pull_msg_list);
+	mutex_unlock(&ipa3_ctx->msg_lock);
+
+	return 0;
+}
+
+/**
+ * ipa3_deregister_pull_msg() - De-register pull message type
+ * @meta: [in] message meta-data
+ *
+ * De-register "message" by kernel client from IPA driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_deregister_pull_msg(struct ipa_msg_meta *meta)
+{
+	struct ipa3_pull_msg *entry;
+	struct ipa3_pull_msg *next;
+	int result = -EINVAL;
+
+	if (meta == NULL) {
+		IPAERR("invalid param name=%p\n", meta);
+		return result;
+	}
+
+	mutex_lock(&ipa3_ctx->msg_lock);
+	list_for_each_entry_safe(entry, next, &ipa3_ctx->pull_msg_list, link) {
+		if (entry->meta.msg_len == meta->msg_len &&
+		    entry->meta.msg_type == meta->msg_type) {
+			list_del(&entry->link);
+			kfree(entry);
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->msg_lock);
+	return result;
+}
+
+/**
+ * ipa3_read() - read message from IPA device
+ * @filp:	[in] file pointer
+ * @buf:	[out] buffer to read into
+ * @count:	[in] size of above buffer
+ * @f_pos:	[inout] file position
+ *
+ * Uer-space should continually read from /dev/ipa, read wll block when there
+ * are no messages to read. Upon return, user-space should read the ipa_msg_meta
+ * from the start of the buffer to know what type of message was read and its
+ * length in the remainder of the buffer. Buffer supplied must be big enough to
+ * hold the message meta-data and the largest defined message type
+ *
+ * Returns:	how many bytes copied to buffer
+ *
+ * Note:	Should not be called from atomic context
+ */
+ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count,
+		  loff_t *f_pos)
+{
+	char __user *start;
+	struct ipa3_push_msg *msg = NULL;
+	int ret;
+	DEFINE_WAIT(wait);
+	int locked;
+
+	start = buf;
+
+	while (1) {
+		prepare_to_wait(&ipa3_ctx->msg_waitq,
+				&wait,
+				TASK_INTERRUPTIBLE);
+
+		mutex_lock(&ipa3_ctx->msg_lock);
+		locked = 1;
+		if (!list_empty(&ipa3_ctx->msg_list)) {
+			msg = list_first_entry(&ipa3_ctx->msg_list,
+					struct ipa3_push_msg, link);
+			list_del(&msg->link);
+		}
+
+		IPADBG_LOW("msg=%p\n", msg);
+
+		if (msg) {
+			locked = 0;
+			mutex_unlock(&ipa3_ctx->msg_lock);
+			if (copy_to_user(buf, &msg->meta,
+					  sizeof(struct ipa_msg_meta))) {
+				ret = -EFAULT;
+				break;
+			}
+			buf += sizeof(struct ipa_msg_meta);
+			count -= sizeof(struct ipa_msg_meta);
+			if (msg->buff) {
+				if (copy_to_user(buf, msg->buff,
+						  msg->meta.msg_len)) {
+					ret = -EFAULT;
+					break;
+				}
+				buf += msg->meta.msg_len;
+				count -= msg->meta.msg_len;
+				msg->callback(msg->buff, msg->meta.msg_len,
+					       msg->meta.msg_type);
+			}
+			IPA_STATS_INC_CNT(
+				ipa3_ctx->stats.msg_r[msg->meta.msg_type]);
+			kfree(msg);
+		}
+
+		ret = -EAGAIN;
+		if (filp->f_flags & O_NONBLOCK)
+			break;
+
+		ret = -EINTR;
+		if (signal_pending(current))
+			break;
+
+		if (start != buf)
+			break;
+
+		locked = 0;
+		mutex_unlock(&ipa3_ctx->msg_lock);
+		schedule();
+	}
+
+	finish_wait(&ipa3_ctx->msg_waitq, &wait);
+	if (start != buf && ret != -EFAULT)
+		ret = buf - start;
+
+	if (locked)
+		mutex_unlock(&ipa3_ctx->msg_lock);
+
+	return ret;
+}
+
+/**
+ * ipa3_pull_msg() - pull the specified message from client
+ * @meta: [in] message meta-data
+ * @buf:  [out] buffer to read into
+ * @count: [in] size of above buffer
+ *
+ * Populate the supplied buffer with the pull message which is fetched
+ * from client, the message must have previously been registered with
+ * the IPA driver
+ *
+ * Returns:	how many bytes copied to buffer
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count)
+{
+	struct ipa3_pull_msg *entry;
+	int result = -EINVAL;
+
+	if (meta == NULL || buff == NULL || !count) {
+		IPAERR("invalid param name=%p buff=%p count=%zu\n",
+				meta, buff, count);
+		return result;
+	}
+
+	mutex_lock(&ipa3_ctx->msg_lock);
+	list_for_each_entry(entry, &ipa3_ctx->pull_msg_list, link) {
+		if (entry->meta.msg_len == meta->msg_len &&
+		    entry->meta.msg_type == meta->msg_type) {
+			result = entry->callback(buff, count, meta->msg_type);
+			break;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->msg_lock);
+	return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
new file mode 100644
index 0000000..4ef1a96
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
@@ -0,0 +1,629 @@
+/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/ipa.h>
+#include <linux/msm_gsi.h>
+#include <linux/ipa_mhi.h>
+#include "../ipa_common_i.h"
+#include "ipa_i.h"
+#include "ipa_qmi_service.h"
+
+#define IPA_MHI_DRV_NAME "ipa_mhi"
+
+
+#define IPA_MHI_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_MHI_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
+#define IPA_MHI_ERR(fmt, args...) \
+	do { \
+		pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+				IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+				IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
+#define IPA_MHI_FUNC_ENTRY() \
+	IPA_MHI_DBG_LOW("ENTRY\n")
+#define IPA_MHI_FUNC_EXIT() \
+	IPA_MHI_DBG_LOW("EXIT\n")
+
+#define IPA_MHI_MAX_UL_CHANNELS 1
+#define IPA_MHI_MAX_DL_CHANNELS 1
+
+/* bit #40 in address should be asserted for MHI transfers over pcie */
+#define IPA_MHI_HOST_ADDR_COND(addr) \
+		((params->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr))
+
+enum ipa3_mhi_polling_mode {
+	IPA_MHI_POLLING_MODE_DB_MODE,
+	IPA_MHI_POLLING_MODE_POLL_MODE,
+};
+
+bool ipa3_mhi_stop_gsi_channel(enum ipa_client_type client)
+{
+	int res;
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+
+	IPA_MHI_FUNC_ENTRY();
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	if (ipa_ep_idx == -1) {
+		IPA_MHI_ERR("Invalid client.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	IPA_MHI_DBG_LOW("Stopping GSI channel %ld\n", ep->gsi_chan_hdl);
+	res = gsi_stop_channel(ep->gsi_chan_hdl);
+	if (res != 0 &&
+		res != -GSI_STATUS_AGAIN &&
+		res != -GSI_STATUS_TIMED_OUT) {
+		IPA_MHI_ERR("GSI stop channel failed %d\n",
+			res);
+		WARN_ON(1);
+		return false;
+	}
+
+	if (res == 0) {
+		IPA_MHI_DBG_LOW("GSI channel %ld STOP\n",
+			ep->gsi_chan_hdl);
+		return true;
+	}
+
+	return false;
+}
+
+static int ipa3_mhi_reset_gsi_channel(enum ipa_client_type client)
+{
+	int res;
+	int clnt_hdl;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	clnt_hdl = ipa3_get_ep_mapping(client);
+	if (clnt_hdl < 0)
+		return -EFAULT;
+
+	res = ipa3_reset_gsi_channel(clnt_hdl);
+	if (res) {
+		IPA_MHI_ERR("ipa3_reset_gsi_channel failed %d\n", res);
+		return -EFAULT;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+int ipa3_mhi_reset_channel_internal(enum ipa_client_type client)
+{
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	res = ipa3_mhi_reset_gsi_channel(client);
+	if (res) {
+		IPAERR("ipa3_mhi_reset_gsi_channel failed\n");
+		ipa_assert();
+		return res;
+	}
+
+	res = ipa3_disable_data_path(ipa3_get_ep_mapping(client));
+	if (res) {
+		IPA_MHI_ERR("ipa3_disable_data_path failed %d\n", res);
+		return res;
+	}
+	IPA_MHI_FUNC_EXIT();
+
+	return 0;
+}
+
+int ipa3_mhi_start_channel_internal(enum ipa_client_type client)
+{
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	res = ipa3_enable_data_path(ipa3_get_ep_mapping(client));
+	if (res) {
+		IPA_MHI_ERR("ipa3_enable_data_path failed %d\n", res);
+		return res;
+	}
+	IPA_MHI_FUNC_EXIT();
+
+	return 0;
+}
+
+static int ipa3_mhi_get_ch_poll_cfg(enum ipa_client_type client,
+		struct ipa_mhi_ch_ctx *ch_ctx_host, int ring_size)
+{
+	switch (ch_ctx_host->pollcfg) {
+	case 0:
+	/*set default polling configuration according to MHI spec*/
+		if (IPA_CLIENT_IS_PROD(client))
+			return 7;
+		else
+			return (ring_size/2)/8;
+		break;
+	default:
+		return ch_ctx_host->pollcfg;
+	}
+}
+
+static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
+	int ipa_ep_idx, struct start_gsi_channel *params)
+{
+	int res;
+	struct gsi_evt_ring_props ev_props;
+	struct ipa_mhi_msi_info *msi;
+	struct gsi_chan_props ch_props;
+	union __packed gsi_channel_scratch ch_scratch;
+	struct ipa3_ep_context *ep;
+	struct ipa_gsi_ep_config *ep_cfg;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	msi = params->msi;
+	ep_cfg = ipa_get_gsi_ep_info(ipa_ep_idx);
+	if (!ep_cfg) {
+		IPA_MHI_ERR("Wrong parameter, ep_cfg is NULL\n");
+		return -EPERM;
+	}
+
+	/* allocate event ring only for the first time pipe is connected */
+	if (params->state == IPA_HW_MHI_CHANNEL_STATE_INVALID) {
+		memset(&ev_props, 0, sizeof(ev_props));
+		ev_props.intf = GSI_EVT_CHTYPE_MHI_EV;
+		ev_props.intr = GSI_INTR_MSI;
+		ev_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
+		ev_props.ring_len = params->ev_ctx_host->rlen;
+		ev_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
+				params->ev_ctx_host->rbase);
+		ev_props.int_modt = params->ev_ctx_host->intmodt *
+				IPA_SLEEP_CLK_RATE_KHZ;
+		ev_props.int_modc = params->ev_ctx_host->intmodc;
+		ev_props.intvec = ((msi->data & ~msi->mask) |
+				(params->ev_ctx_host->msivec & msi->mask));
+		ev_props.msi_addr = IPA_MHI_HOST_ADDR_COND(
+				(((u64)msi->addr_hi << 32) | msi->addr_low));
+		ev_props.rp_update_addr = IPA_MHI_HOST_ADDR_COND(
+				params->event_context_addr +
+				offsetof(struct ipa_mhi_ev_ctx, rp));
+		ev_props.exclusive = true;
+		ev_props.err_cb = params->ev_err_cb;
+		ev_props.user_data = params->channel;
+		ev_props.evchid_valid = true;
+		ev_props.evchid = params->evchid;
+		IPA_MHI_DBG("allocating event ring ep:%u evchid:%u\n",
+			ipa_ep_idx, ev_props.evchid);
+		res = gsi_alloc_evt_ring(&ev_props, ipa3_ctx->gsi_dev_hdl,
+			&ep->gsi_evt_ring_hdl);
+		if (res) {
+			IPA_MHI_ERR("gsi_alloc_evt_ring failed %d\n", res);
+			goto fail_alloc_evt;
+			return res;
+		}
+		IPA_MHI_DBG("client %d, caching event ring hdl %lu\n",
+				client,
+				ep->gsi_evt_ring_hdl);
+		*params->cached_gsi_evt_ring_hdl =
+			ep->gsi_evt_ring_hdl;
+
+	} else {
+		IPA_MHI_DBG("event ring already exists: evt_ring_hdl=%lu\n",
+			*params->cached_gsi_evt_ring_hdl);
+		ep->gsi_evt_ring_hdl = *params->cached_gsi_evt_ring_hdl;
+	}
+
+	memset(&ch_props, 0, sizeof(ch_props));
+	ch_props.prot = GSI_CHAN_PROT_MHI;
+	ch_props.dir = IPA_CLIENT_IS_PROD(client) ?
+		GSI_CHAN_DIR_TO_GSI : GSI_CHAN_DIR_FROM_GSI;
+	ch_props.ch_id = ep_cfg->ipa_gsi_chan_num;
+	ch_props.evt_ring_hdl = *params->cached_gsi_evt_ring_hdl;
+	ch_props.re_size = GSI_CHAN_RE_SIZE_16B;
+	ch_props.ring_len = params->ch_ctx_host->rlen;
+	ch_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
+			params->ch_ctx_host->rbase);
+	ch_props.use_db_eng = GSI_CHAN_DB_MODE;
+	ch_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	ch_props.low_weight = 1;
+	ch_props.err_cb = params->ch_err_cb;
+	ch_props.chan_user_data = params->channel;
+	res = gsi_alloc_channel(&ch_props, ipa3_ctx->gsi_dev_hdl,
+		&ep->gsi_chan_hdl);
+	if (res) {
+		IPA_MHI_ERR("gsi_alloc_channel failed %d\n",
+			res);
+		goto fail_alloc_ch;
+	}
+
+	memset(&ch_scratch, 0, sizeof(ch_scratch));
+	ch_scratch.mhi.mhi_host_wp_addr = IPA_MHI_HOST_ADDR_COND(
+			params->channel_context_addr +
+			offsetof(struct ipa_mhi_ch_ctx, wp));
+	ch_scratch.mhi.assert_bit40 = params->assert_bit40;
+	ch_scratch.mhi.max_outstanding_tre =
+		ep_cfg->ipa_if_tlv * ch_props.re_size;
+	ch_scratch.mhi.outstanding_threshold =
+		min(ep_cfg->ipa_if_tlv / 2, 8) * ch_props.re_size;
+	ch_scratch.mhi.oob_mod_threshold = 4;
+	if (params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_DEFAULT ||
+		params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_ENABLE) {
+		ch_scratch.mhi.burst_mode_enabled = true;
+		ch_scratch.mhi.polling_configuration =
+			ipa3_mhi_get_ch_poll_cfg(client, params->ch_ctx_host,
+				(ch_props.ring_len / ch_props.re_size));
+		ch_scratch.mhi.polling_mode = IPA_MHI_POLLING_MODE_DB_MODE;
+	} else {
+		ch_scratch.mhi.burst_mode_enabled = false;
+	}
+	res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
+		ch_scratch);
+	if (res) {
+		IPA_MHI_ERR("gsi_write_channel_scratch failed %d\n",
+			res);
+		goto fail_ch_scratch;
+	}
+
+	*params->mhi = ch_scratch.mhi;
+
+	IPA_MHI_DBG("Starting channel\n");
+	res = gsi_start_channel(ep->gsi_chan_hdl);
+	if (res) {
+		IPA_MHI_ERR("gsi_start_channel failed %d\n", res);
+		goto fail_ch_start;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+
+fail_ch_start:
+fail_ch_scratch:
+	gsi_dealloc_channel(ep->gsi_chan_hdl);
+fail_alloc_ch:
+	gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+	ep->gsi_evt_ring_hdl = ~0;
+fail_alloc_evt:
+	return res;
+}
+
+int ipa3_mhi_init_engine(struct ipa_mhi_init_engine *params)
+{
+	int res;
+	struct gsi_device_scratch gsi_scratch;
+	struct ipa_gsi_ep_config *gsi_ep_info;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (!params) {
+		IPA_MHI_ERR("null args\n");
+		return -EINVAL;
+	}
+
+	/* Initialize IPA MHI engine */
+	gsi_ep_info = ipa_get_gsi_ep_info(
+		ipa_get_ep_mapping(IPA_CLIENT_MHI_PROD));
+	if (!gsi_ep_info) {
+		IPAERR("MHI PROD has no ep allocated\n");
+		ipa_assert();
+	}
+	memset(&gsi_scratch, 0, sizeof(gsi_scratch));
+	gsi_scratch.mhi_base_chan_idx_valid = true;
+	gsi_scratch.mhi_base_chan_idx = gsi_ep_info->ipa_gsi_chan_num +
+		params->gsi.first_ch_idx;
+	res = gsi_write_device_scratch(ipa3_ctx->gsi_dev_hdl,
+		&gsi_scratch);
+	if (res) {
+		IPA_MHI_ERR("failed to write device scratch %d\n", res);
+		goto fail_init_engine;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+
+fail_init_engine:
+	return res;
+}
+
+/**
+ * ipa3_connect_mhi_pipe() - Connect pipe to IPA and start corresponding
+ * MHI channel
+ * @in: connect parameters
+ * @clnt_hdl: [out] client handle for this pipe
+ *
+ * This function is called by IPA MHI client driver on MHI channel start.
+ * This function is called after MHI engine was started.
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa3_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
+		u32 *clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int ipa_ep_idx;
+	int res;
+	enum ipa_client_type client;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (!in || !clnt_hdl) {
+		IPA_MHI_ERR("NULL args\n");
+		return -EINVAL;
+	}
+
+	client = in->sys->client;
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	if (ipa_ep_idx == -1) {
+		IPA_MHI_ERR("Invalid client.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (ep->valid == 1) {
+		IPA_MHI_ERR("EP already allocated.\n");
+		return -EPERM;
+	}
+
+	memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
+	ep->valid = 1;
+	ep->skip_ep_cfg = in->sys->skip_ep_cfg;
+	ep->client = client;
+	ep->client_notify = in->sys->notify;
+	ep->priv = in->sys->priv;
+	ep->keep_ipa_awake = in->sys->keep_ipa_awake;
+
+	res = ipa_mhi_start_gsi_channel(client,
+					ipa_ep_idx, &in->start.gsi);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_start_gsi_channel failed %d\n",
+			res);
+		goto fail_start_channel;
+	}
+
+	res = ipa3_enable_data_path(ipa_ep_idx);
+	if (res) {
+		IPA_MHI_ERR("enable data path failed res=%d clnt=%d.\n", res,
+			ipa_ep_idx);
+		goto fail_ep_cfg;
+	}
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa3_cfg_ep(ipa_ep_idx, &in->sys->ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto fail_ep_cfg;
+		}
+		if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
+			IPAERR("fail to configure status of EP.\n");
+			goto fail_ep_cfg;
+		}
+		IPA_MHI_DBG("ep configuration successful\n");
+	} else {
+		IPA_MHI_DBG("skipping ep configuration\n");
+	}
+
+	*clnt_hdl = ipa_ep_idx;
+
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(client))
+		ipa3_install_dflt_flt_rules(ipa_ep_idx);
+
+	ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+	IPA_MHI_DBG("client %d (ep: %d) connected\n", client,
+		ipa_ep_idx);
+
+	IPA_MHI_FUNC_EXIT();
+
+	return 0;
+
+fail_ep_cfg:
+	ipa3_disable_data_path(ipa_ep_idx);
+fail_start_channel:
+	memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
+	return -EPERM;
+}
+
+/**
+ * ipa3_disconnect_mhi_pipe() - Disconnect pipe from IPA and reset corresponding
+ * MHI channel
+ * @clnt_hdl: client handle for this pipe
+ *
+ * This function is called by IPA MHI client driver on MHI channel reset.
+ * This function is called after MHI channel was started.
+ * This function is doing the following:
+ *	- Send command to uC/GSI to reset corresponding MHI channel
+ *	- Configure IPA EP control
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa3_disconnect_mhi_pipe(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes) {
+		IPAERR("invalid handle %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	if (ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("pipe was not connected %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		res = gsi_dealloc_channel(ep->gsi_chan_hdl);
+		if (res) {
+			IPAERR("gsi_dealloc_channel failed %d\n", res);
+			goto fail_reset_channel;
+		}
+	}
+
+	ep->valid = 0;
+	ipa3_delete_dflt_flt_rules(clnt_hdl);
+
+	IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+
+fail_reset_channel:
+	return res;
+}
+
+int ipa3_mhi_resume_channels_internal(enum ipa_client_type client,
+		bool LPTransitionRejected, bool brstmode_enabled,
+		union __packed gsi_channel_scratch ch_scratch, u8 index)
+{
+	int res;
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (brstmode_enabled && !LPTransitionRejected) {
+		/*
+		 * set polling mode bit to DB mode before
+		 * resuming the channel
+		 */
+		res = gsi_write_channel_scratch(
+			ep->gsi_chan_hdl, ch_scratch);
+		if (res) {
+			IPA_MHI_ERR("write ch scratch fail %d\n"
+				, res);
+			return res;
+		}
+	}
+
+	res = gsi_start_channel(ep->gsi_chan_hdl);
+	if (res) {
+		IPA_MHI_ERR("failed to resume channel error %d\n", res);
+		return res;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+int ipa3_mhi_query_ch_info(enum ipa_client_type client,
+		struct gsi_chan_info *ch_info)
+{
+	int ipa_ep_idx;
+	int res;
+	struct ipa3_ep_context *ep;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	res = gsi_query_channel_info(ep->gsi_chan_hdl, ch_info);
+	if (res) {
+		IPAERR("gsi_query_channel_info failed\n");
+		return res;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+bool ipa3_has_open_aggr_frame(enum ipa_client_type client)
+{
+	u32 aggr_state_active;
+	int ipa_ep_idx;
+
+	aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+	IPA_MHI_DBG_LOW("IPA_STATE_AGGR_ACTIVE_OFST 0x%x\n", aggr_state_active);
+
+	ipa_ep_idx = ipa_get_ep_mapping(client);
+	if (ipa_ep_idx == -1) {
+		ipa_assert();
+		return false;
+	}
+
+	if ((1 << ipa_ep_idx) & aggr_state_active)
+		return true;
+
+	return false;
+}
+
+int ipa3_mhi_destroy_channel(enum ipa_client_type client)
+{
+	int res;
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	IPA_MHI_DBG("reset event ring (hdl: %lu, ep: %d)\n",
+		ep->gsi_evt_ring_hdl, ipa_ep_idx);
+
+	res = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
+	if (res) {
+		IPAERR(" failed to reset evt ring %lu, err %d\n"
+			, ep->gsi_evt_ring_hdl, res);
+		goto fail;
+	}
+
+	IPA_MHI_DBG("dealloc event ring (hdl: %lu, ep: %d)\n",
+		ep->gsi_evt_ring_hdl, ipa_ep_idx);
+
+	res = gsi_dealloc_evt_ring(
+		ep->gsi_evt_ring_hdl);
+	if (res) {
+		IPAERR("dealloc evt ring %lu failed, err %d\n"
+			, ep->gsi_evt_ring_hdl, res);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	return res;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA MHI driver");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
new file mode 100644
index 0000000..4b22203
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -0,0 +1,763 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+
+#define IPA_NAT_PHYS_MEM_OFFSET  0
+#define IPA_NAT_PHYS_MEM_SIZE  IPA_RAM_NAT_SIZE
+
+#define IPA_NAT_TEMP_MEM_SIZE 128
+
+static int ipa3_nat_vma_fault_remap(
+	 struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	IPADBG("\n");
+	vmf->page = NULL;
+
+	return VM_FAULT_SIGBUS;
+}
+
+/* VMA related file operations functions */
+static struct vm_operations_struct ipa3_nat_remap_vm_ops = {
+	.fault = ipa3_nat_vma_fault_remap,
+};
+
+static int ipa3_nat_open(struct inode *inode, struct file *filp)
+{
+	struct ipa3_nat_mem *nat_ctx;
+
+	IPADBG("\n");
+	nat_ctx = container_of(inode->i_cdev, struct ipa3_nat_mem, cdev);
+	filp->private_data = nat_ctx;
+	IPADBG("return\n");
+
+	return 0;
+}
+
+static int ipa3_nat_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	unsigned long vsize = vma->vm_end - vma->vm_start;
+	struct ipa3_nat_mem *nat_ctx =
+		(struct ipa3_nat_mem *)filp->private_data;
+	unsigned long phys_addr;
+	int result;
+
+	mutex_lock(&nat_ctx->lock);
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	if (nat_ctx->is_sys_mem) {
+		IPADBG("Mapping system memory\n");
+		if (nat_ctx->is_mapped) {
+			IPAERR("mapping already exists, only 1 supported\n");
+			result = -EINVAL;
+			goto bail;
+		}
+		IPADBG("map sz=0x%zx\n", nat_ctx->size);
+		result =
+			dma_mmap_coherent(
+				 ipa3_ctx->pdev, vma,
+				 nat_ctx->vaddr, nat_ctx->dma_handle,
+				 nat_ctx->size);
+
+		if (result) {
+			IPAERR("unable to map memory. Err:%d\n", result);
+			goto bail;
+		}
+		ipa3_ctx->nat_mem.nat_base_address = nat_ctx->vaddr;
+	} else {
+		IPADBG("Mapping shared(local) memory\n");
+		IPADBG("map sz=0x%lx\n", vsize);
+
+		if ((IPA_NAT_PHYS_MEM_SIZE == 0) ||
+				(vsize > IPA_NAT_PHYS_MEM_SIZE)) {
+			result = -EINVAL;
+			goto bail;
+		}
+		phys_addr = ipa3_ctx->ipa_wrapper_base +
+			ipa3_ctx->ctrl->ipa_reg_base_ofst +
+			ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
+			IPA_NAT_PHYS_MEM_OFFSET);
+
+		if (remap_pfn_range(
+			 vma, vma->vm_start,
+			 phys_addr >> PAGE_SHIFT, vsize, vma->vm_page_prot)) {
+			IPAERR("remap failed\n");
+			result = -EAGAIN;
+			goto bail;
+		}
+		ipa3_ctx->nat_mem.nat_base_address = (void *)vma->vm_start;
+	}
+	nat_ctx->is_mapped = true;
+	vma->vm_ops = &ipa3_nat_remap_vm_ops;
+	IPADBG("return\n");
+	result = 0;
+bail:
+	mutex_unlock(&nat_ctx->lock);
+	return result;
+}
+
+static const struct file_operations ipa3_nat_fops = {
+	.owner = THIS_MODULE,
+	.open = ipa3_nat_open,
+	.mmap = ipa3_nat_mmap
+};
+
+/**
+ * ipa3_allocate_temp_nat_memory() - Allocates temp nat memory
+ *
+ * Called during nat table delete
+ */
+void ipa3_allocate_temp_nat_memory(void)
+{
+	struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem);
+	int gfp_flags = GFP_KERNEL | __GFP_ZERO;
+
+	nat_ctx->tmp_vaddr =
+		dma_alloc_coherent(ipa3_ctx->pdev, IPA_NAT_TEMP_MEM_SIZE,
+				&nat_ctx->tmp_dma_handle, gfp_flags);
+
+	if (nat_ctx->tmp_vaddr == NULL) {
+		IPAERR("Temp Memory alloc failed\n");
+		nat_ctx->is_tmp_mem = false;
+		return;
+	}
+
+	nat_ctx->is_tmp_mem = true;
+	IPADBG("IPA NAT allocated temp memory successfully\n");
+}
+
+/**
+ * ipa3_create_nat_device() - Create the NAT device
+ *
+ * Called during ipa init to create nat device
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_create_nat_device(void)
+{
+	struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem);
+	int result;
+
+	IPADBG("\n");
+
+	mutex_lock(&nat_ctx->lock);
+	nat_ctx->class = class_create(THIS_MODULE, NAT_DEV_NAME);
+	if (IS_ERR(nat_ctx->class)) {
+		IPAERR("unable to create the class\n");
+		result = -ENODEV;
+		goto vaddr_alloc_fail;
+	}
+	result = alloc_chrdev_region(&nat_ctx->dev_num,
+					0,
+					1,
+					NAT_DEV_NAME);
+	if (result) {
+		IPAERR("alloc_chrdev_region err.\n");
+		result = -ENODEV;
+		goto alloc_chrdev_region_fail;
+	}
+
+	nat_ctx->dev =
+	   device_create(nat_ctx->class, NULL, nat_ctx->dev_num, nat_ctx,
+			"%s", NAT_DEV_NAME);
+
+	if (IS_ERR(nat_ctx->dev)) {
+		IPAERR("device_create err:%ld\n", PTR_ERR(nat_ctx->dev));
+		result = -ENODEV;
+		goto device_create_fail;
+	}
+
+	cdev_init(&nat_ctx->cdev, &ipa3_nat_fops);
+	nat_ctx->cdev.owner = THIS_MODULE;
+	nat_ctx->cdev.ops = &ipa3_nat_fops;
+
+	result = cdev_add(&nat_ctx->cdev, nat_ctx->dev_num, 1);
+	if (result) {
+		IPAERR("cdev_add err=%d\n", -result);
+		goto cdev_add_fail;
+	}
+	IPADBG("ipa nat dev added successful. major:%d minor:%d\n",
+			MAJOR(nat_ctx->dev_num),
+			MINOR(nat_ctx->dev_num));
+
+	nat_ctx->is_dev = true;
+	ipa3_allocate_temp_nat_memory();
+	IPADBG("IPA NAT device created successfully\n");
+	result = 0;
+	goto bail;
+
+cdev_add_fail:
+	device_destroy(nat_ctx->class, nat_ctx->dev_num);
+device_create_fail:
+	unregister_chrdev_region(nat_ctx->dev_num, 1);
+alloc_chrdev_region_fail:
+	class_destroy(nat_ctx->class);
+vaddr_alloc_fail:
+	if (nat_ctx->vaddr) {
+		IPADBG("Releasing system memory\n");
+		dma_free_coherent(
+			 ipa3_ctx->pdev, nat_ctx->size,
+			 nat_ctx->vaddr, nat_ctx->dma_handle);
+		nat_ctx->vaddr = NULL;
+		nat_ctx->dma_handle = 0;
+		nat_ctx->size = 0;
+	}
+
+bail:
+	mutex_unlock(&nat_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_allocate_nat_device() - Allocates memory for the NAT device
+ * @mem:	[in/out] memory parameters
+ *
+ * Called by NAT client driver to allocate memory for the NAT entries. Based on
+ * the request size either shared or system memory will be used.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
+{
+	struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem);
+	int gfp_flags = GFP_KERNEL | __GFP_ZERO;
+	int result;
+
+	IPADBG("passed memory size %zu\n", mem->size);
+
+	mutex_lock(&nat_ctx->lock);
+	if (strcmp(mem->dev_name, NAT_DEV_NAME)) {
+		IPAERR("Nat device name mismatch\n");
+		IPAERR("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (nat_ctx->is_dev != true) {
+		IPAERR("Nat device not created successfully during boot up\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (nat_ctx->is_dev_init == true) {
+		IPAERR("Device already init\n");
+		result = 0;
+		goto bail;
+	}
+
+	if (mem->size <= 0 ||
+			nat_ctx->is_dev_init == true) {
+		IPAERR("Invalid Parameters or device is already init\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (mem->size > IPA_NAT_PHYS_MEM_SIZE) {
+		IPADBG("Allocating system memory\n");
+		nat_ctx->is_sys_mem = true;
+		nat_ctx->vaddr =
+		   dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
+				   &nat_ctx->dma_handle, gfp_flags);
+		if (nat_ctx->vaddr == NULL) {
+			IPAERR("memory alloc failed\n");
+			result = -ENOMEM;
+			goto bail;
+		}
+		nat_ctx->size = mem->size;
+	} else {
+		IPADBG("using shared(local) memory\n");
+		nat_ctx->is_sys_mem = false;
+	}
+
+	nat_ctx->is_dev_init = true;
+	IPADBG("IPA NAT dev init successfully\n");
+	result = 0;
+
+bail:
+	mutex_unlock(&nat_ctx->lock);
+
+	return result;
+}
+
+/* IOCTL function handlers */
+/**
+ * ipa3_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW
+ * @init:	[in] initialization command attributes
+ *
+ * Called by NAT client driver to post IP_V4_NAT_INIT command to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
+{
+#define TBL_ENTRY_SIZE 32
+#define INDX_TBL_ENTRY_SIZE 4
+
+	struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL;
+	struct ipa3_desc desc[2];
+	struct ipahal_imm_cmd_ip_v4_nat_init cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+	int result;
+	u32 offset = 0;
+	size_t tmp;
+
+	IPADBG("\n");
+	if (init->table_entries == 0) {
+		IPADBG("Table entries is zero\n");
+		return -EPERM;
+	}
+
+	/* check for integer overflow */
+	if (init->ipv4_rules_offset >
+		UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1))) {
+		IPAERR("Detected overflow\n");
+		return -EPERM;
+	}
+	/* Check Table Entry offset is not
+	 * beyond allocated size
+	 */
+	tmp = init->ipv4_rules_offset +
+		(TBL_ENTRY_SIZE * (init->table_entries + 1));
+	if (tmp > ipa3_ctx->nat_mem.size) {
+		IPAERR("Table rules offset not valid\n");
+		IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+			init->ipv4_rules_offset, (init->table_entries + 1),
+			tmp, ipa3_ctx->nat_mem.size);
+		return -EPERM;
+	}
+
+	/* check for integer overflow */
+	if (init->expn_rules_offset >
+		UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries)) {
+		IPAERR("Detected overflow\n");
+		return -EPERM;
+	}
+	/* Check Expn Table Entry offset is not
+	 * beyond allocated size
+	 */
+	tmp = init->expn_rules_offset +
+		(TBL_ENTRY_SIZE * init->expn_table_entries);
+	if (tmp > ipa3_ctx->nat_mem.size) {
+		IPAERR("Expn Table rules offset not valid\n");
+		IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+			init->expn_rules_offset, init->expn_table_entries,
+			tmp, ipa3_ctx->nat_mem.size);
+		return -EPERM;
+	}
+
+	/* check for integer overflow */
+	if (init->index_offset >
+		UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) {
+		IPAERR("Detected overflow\n");
+		return -EPERM;
+	}
+	/* Check Indx Table Entry offset is not
+	 * beyond allocated size
+	 */
+	tmp = init->index_offset +
+		(INDX_TBL_ENTRY_SIZE * (init->table_entries + 1));
+	if (tmp > ipa3_ctx->nat_mem.size) {
+		IPAERR("Indx Table rules offset not valid\n");
+		IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+			init->index_offset, (init->table_entries + 1),
+			tmp, ipa3_ctx->nat_mem.size);
+		return -EPERM;
+	}
+
+	/* check for integer overflow */
+	if (init->index_expn_offset >
+		UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries)) {
+		IPAERR("Detected overflow\n");
+		return -EPERM;
+	}
+	/* Check Expn Table entry offset is not
+	 * beyond allocated size
+	 */
+	tmp = init->index_expn_offset +
+		(INDX_TBL_ENTRY_SIZE * init->expn_table_entries);
+	if (tmp > ipa3_ctx->nat_mem.size) {
+		IPAERR("Indx Expn Table rules offset not valid\n");
+		IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+			init->index_expn_offset, init->expn_table_entries,
+			tmp, ipa3_ctx->nat_mem.size);
+		return -EPERM;
+	}
+
+	memset(&desc, 0, sizeof(desc));
+	/* NO-OP IC for ensuring that IPA pipeline is empty */
+	nop_cmd_pyld =
+		ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
+	if (!nop_cmd_pyld) {
+		IPAERR("failed to construct NOP imm cmd\n");
+		result = -ENOMEM;
+		goto bail;
+	}
+
+	desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+	desc[0].type = IPA_IMM_CMD_DESC;
+	desc[0].callback = NULL;
+	desc[0].user1 = NULL;
+	desc[0].user2 = 0;
+	desc[0].pyld = nop_cmd_pyld->data;
+	desc[0].len = nop_cmd_pyld->len;
+
+	if (ipa3_ctx->nat_mem.vaddr) {
+		IPADBG("using system memory for nat table\n");
+		cmd.ipv4_rules_addr_shared = false;
+		cmd.ipv4_expansion_rules_addr_shared = false;
+		cmd.index_table_addr_shared = false;
+		cmd.index_table_expansion_addr_shared = false;
+
+		offset = UINT_MAX - ipa3_ctx->nat_mem.dma_handle;
+
+		if ((init->ipv4_rules_offset > offset) ||
+				(init->expn_rules_offset > offset) ||
+				(init->index_offset > offset) ||
+				(init->index_expn_offset > offset)) {
+			IPAERR("Failed due to integer overflow\n");
+			IPAERR("nat.mem.dma_handle: 0x%pa\n",
+				&ipa3_ctx->nat_mem.dma_handle);
+			IPAERR("ipv4_rules_offset: 0x%x\n",
+				init->ipv4_rules_offset);
+			IPAERR("expn_rules_offset: 0x%x\n",
+				init->expn_rules_offset);
+			IPAERR("index_offset: 0x%x\n",
+				init->index_offset);
+			IPAERR("index_expn_offset: 0x%x\n",
+				init->index_expn_offset);
+			result = -EPERM;
+			goto free_nop;
+		}
+		cmd.ipv4_rules_addr =
+			ipa3_ctx->nat_mem.dma_handle + init->ipv4_rules_offset;
+		IPADBG("ipv4_rules_offset:0x%x\n", init->ipv4_rules_offset);
+
+		cmd.ipv4_expansion_rules_addr =
+		   ipa3_ctx->nat_mem.dma_handle + init->expn_rules_offset;
+		IPADBG("expn_rules_offset:0x%x\n", init->expn_rules_offset);
+
+		cmd.index_table_addr =
+			ipa3_ctx->nat_mem.dma_handle + init->index_offset;
+		IPADBG("index_offset:0x%x\n", init->index_offset);
+
+		cmd.index_table_expansion_addr =
+		   ipa3_ctx->nat_mem.dma_handle + init->index_expn_offset;
+		IPADBG("index_expn_offset:0x%x\n", init->index_expn_offset);
+	} else {
+		IPADBG("using shared(local) memory for nat table\n");
+		cmd.ipv4_rules_addr_shared = true;
+		cmd.ipv4_expansion_rules_addr_shared = true;
+		cmd.index_table_addr_shared = true;
+		cmd.index_table_expansion_addr_shared = true;
+
+		cmd.ipv4_rules_addr = init->ipv4_rules_offset +
+				IPA_RAM_NAT_OFST;
+
+		cmd.ipv4_expansion_rules_addr = init->expn_rules_offset +
+				IPA_RAM_NAT_OFST;
+
+		cmd.index_table_addr = init->index_offset  +
+				IPA_RAM_NAT_OFST;
+
+		cmd.index_table_expansion_addr = init->index_expn_offset +
+				IPA_RAM_NAT_OFST;
+	}
+	cmd.table_index = init->tbl_index;
+	IPADBG("Table index:0x%x\n", cmd.table_index);
+	cmd.size_base_tables = init->table_entries;
+	IPADBG("Base Table size:0x%x\n", cmd.size_base_tables);
+	cmd.size_expansion_tables = init->expn_table_entries;
+	IPADBG("Expansion Table size:0x%x\n", cmd.size_expansion_tables);
+	cmd.public_ip_addr = init->ip_addr;
+	IPADBG("Public ip address:0x%x\n", cmd.public_ip_addr);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("Fail to construct ip_v4_nat_init imm cmd\n");
+		result = -EPERM;
+		goto free_nop;
+	}
+
+	desc[1].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_NAT_INIT);
+	desc[1].type = IPA_IMM_CMD_DESC;
+	desc[1].callback = NULL;
+	desc[1].user1 = NULL;
+	desc[1].user2 = 0;
+	desc[1].pyld = cmd_pyld->data;
+	desc[1].len = cmd_pyld->len;
+	IPADBG("posting v4 init command\n");
+	if (ipa3_send_cmd(2, desc)) {
+		IPAERR("Fail to send immediate command\n");
+		result = -EPERM;
+		goto destroy_imm_cmd;
+	}
+
+	ipa3_ctx->nat_mem.public_ip_addr = init->ip_addr;
+	IPADBG("Table ip address:0x%x", ipa3_ctx->nat_mem.public_ip_addr);
+
+	ipa3_ctx->nat_mem.ipv4_rules_addr =
+	 (char *)ipa3_ctx->nat_mem.nat_base_address + init->ipv4_rules_offset;
+	IPADBG("ipv4_rules_addr: 0x%p\n",
+				 ipa3_ctx->nat_mem.ipv4_rules_addr);
+
+	ipa3_ctx->nat_mem.ipv4_expansion_rules_addr =
+	 (char *)ipa3_ctx->nat_mem.nat_base_address + init->expn_rules_offset;
+	IPADBG("ipv4_expansion_rules_addr: 0x%p\n",
+				 ipa3_ctx->nat_mem.ipv4_expansion_rules_addr);
+
+	ipa3_ctx->nat_mem.index_table_addr =
+		 (char *)ipa3_ctx->nat_mem.nat_base_address +
+		 init->index_offset;
+	IPADBG("index_table_addr: 0x%p\n",
+				 ipa3_ctx->nat_mem.index_table_addr);
+
+	ipa3_ctx->nat_mem.index_table_expansion_addr =
+	 (char *)ipa3_ctx->nat_mem.nat_base_address + init->index_expn_offset;
+	IPADBG("index_table_expansion_addr: 0x%p\n",
+				 ipa3_ctx->nat_mem.index_table_expansion_addr);
+
+	IPADBG("size_base_tables: %d\n", init->table_entries);
+	ipa3_ctx->nat_mem.size_base_tables  = init->table_entries;
+
+	IPADBG("size_expansion_tables: %d\n", init->expn_table_entries);
+	ipa3_ctx->nat_mem.size_expansion_tables = init->expn_table_entries;
+
+	IPADBG("return\n");
+	result = 0;
+destroy_imm_cmd:
+	ipahal_destroy_imm_cmd(cmd_pyld);
+free_nop:
+	ipahal_destroy_imm_cmd(nop_cmd_pyld);
+bail:
+	return result;
+}
+
+/**
+ * ipa3_nat_dma_cmd() - Post NAT_DMA command to IPA HW
+ * @dma:	[in] initialization command attributes
+ *
+ * Called by NAT client driver to post NAT_DMA command to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+#define NUM_OF_DESC 2
+
+	struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL;
+	struct ipahal_imm_cmd_nat_dma cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+	struct ipa3_desc *desc = NULL;
+	u16 size = 0, cnt = 0;
+	int ret = 0;
+
+	IPADBG("\n");
+	if (dma->entries <= 0) {
+		IPAERR("Invalid number of commands %d\n",
+			dma->entries);
+		ret = -EPERM;
+		goto bail;
+	}
+
+	size = sizeof(struct ipa3_desc) * NUM_OF_DESC;
+	desc = kzalloc(size, GFP_KERNEL);
+	if (desc == NULL) {
+		IPAERR("Failed to alloc memory\n");
+		ret = -ENOMEM;
+		goto bail;
+	}
+
+	/* NO-OP IC for ensuring that IPA pipeline is empty */
+	nop_cmd_pyld =
+		ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
+	if (!nop_cmd_pyld) {
+		IPAERR("Failed to construct NOP imm cmd\n");
+		ret = -ENOMEM;
+		goto bail;
+	}
+	desc[0].type = IPA_IMM_CMD_DESC;
+	desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+	desc[0].callback = NULL;
+	desc[0].user1 = NULL;
+	desc[0].user2 = 0;
+	desc[0].pyld = nop_cmd_pyld->data;
+	desc[0].len = nop_cmd_pyld->len;
+
+	for (cnt = 0; cnt < dma->entries; cnt++) {
+		cmd.table_index = dma->dma[cnt].table_index;
+		cmd.base_addr = dma->dma[cnt].base_addr;
+		cmd.offset = dma->dma[cnt].offset;
+		cmd.data = dma->dma[cnt].data;
+		cmd_pyld = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_NAT_DMA, &cmd, false);
+		if (!cmd_pyld) {
+			IPAERR("Fail to construct nat_dma imm cmd\n");
+			continue;
+		}
+		desc[1].type = IPA_IMM_CMD_DESC;
+		desc[1].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_NAT_DMA);
+		desc[1].callback = NULL;
+		desc[1].user1 = NULL;
+		desc[1].user2 = 0;
+		desc[1].pyld = cmd_pyld->data;
+		desc[1].len = cmd_pyld->len;
+
+		ret = ipa3_send_cmd(NUM_OF_DESC, desc);
+		if (ret == -EPERM)
+			IPAERR("Fail to send immediate command %d\n", cnt);
+		ipahal_destroy_imm_cmd(cmd_pyld);
+	}
+
+bail:
+	if (desc != NULL)
+		kfree(desc);
+
+	if (nop_cmd_pyld != NULL)
+		ipahal_destroy_imm_cmd(nop_cmd_pyld);
+
+	return ret;
+}
+
+/**
+ * ipa3_nat_free_mem_and_device() - free the NAT memory and remove the device
+ * @nat_ctx:	[in] the IPA NAT memory to free
+ *
+ * Called by NAT client driver to free the NAT memory and remove the device
+ */
+void ipa3_nat_free_mem_and_device(struct ipa3_nat_mem *nat_ctx)
+{
+	IPADBG("\n");
+	mutex_lock(&nat_ctx->lock);
+
+	if (nat_ctx->is_sys_mem) {
+		IPADBG("freeing the dma memory\n");
+		dma_free_coherent(
+			 ipa3_ctx->pdev, nat_ctx->size,
+			 nat_ctx->vaddr, nat_ctx->dma_handle);
+		nat_ctx->size = 0;
+		nat_ctx->vaddr = NULL;
+	}
+	nat_ctx->is_mapped = false;
+	nat_ctx->is_sys_mem = false;
+	nat_ctx->is_dev_init = false;
+
+	mutex_unlock(&nat_ctx->lock);
+	IPADBG("return\n");
+}
+
+/**
+ * ipa3_nat_del_cmd() - Delete a NAT table
+ * @del:	[in] delete table table table parameters
+ *
+ * Called by NAT client driver to delete the nat table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
+{
+	struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL;
+	struct ipa3_desc desc[2];
+	struct ipahal_imm_cmd_ip_v4_nat_init cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	bool mem_type_shared = true;
+	u32 base_addr = IPA_NAT_PHYS_MEM_OFFSET;
+	int result;
+
+	IPADBG("\n");
+	if (ipa3_ctx->nat_mem.is_tmp_mem) {
+		IPAERR("using temp memory during nat del\n");
+		mem_type_shared = false;
+		base_addr = ipa3_ctx->nat_mem.tmp_dma_handle;
+	}
+
+	if (del->public_ip_addr == 0) {
+		IPADBG("Bad Parameter\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	memset(&desc, 0, sizeof(desc));
+	/* NO-OP IC for ensuring that IPA pipeline is empty */
+	nop_cmd_pyld =
+		ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
+	if (!nop_cmd_pyld) {
+		IPAERR("Failed to construct NOP imm cmd\n");
+		result = -ENOMEM;
+		goto bail;
+	}
+	desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+	desc[0].type = IPA_IMM_CMD_DESC;
+	desc[0].callback = NULL;
+	desc[0].user1 = NULL;
+	desc[0].user2 = 0;
+	desc[0].pyld = nop_cmd_pyld->data;
+	desc[0].len = nop_cmd_pyld->len;
+
+	cmd.table_index = del->table_index;
+	cmd.ipv4_rules_addr = base_addr;
+	cmd.ipv4_rules_addr_shared = mem_type_shared;
+	cmd.ipv4_expansion_rules_addr = base_addr;
+	cmd.ipv4_expansion_rules_addr_shared = mem_type_shared;
+	cmd.index_table_addr = base_addr;
+	cmd.index_table_addr_shared = mem_type_shared;
+	cmd.index_table_expansion_addr = base_addr;
+	cmd.index_table_expansion_addr_shared = mem_type_shared;
+	cmd.size_base_tables = 0;
+	cmd.size_expansion_tables = 0;
+	cmd.public_ip_addr = 0;
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("Fail to construct ip_v4_nat_init imm cmd\n");
+		result = -EPERM;
+		goto destroy_regwrt_imm_cmd;
+	}
+	desc[1].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_NAT_INIT);
+	desc[1].type = IPA_IMM_CMD_DESC;
+	desc[1].callback = NULL;
+	desc[1].user1 = NULL;
+	desc[1].user2 = 0;
+	desc[1].pyld = cmd_pyld->data;
+	desc[1].len = cmd_pyld->len;
+
+	if (ipa3_send_cmd(2, desc)) {
+		IPAERR("Fail to send immediate command\n");
+		result = -EPERM;
+		goto destroy_imm_cmd;
+	}
+
+	ipa3_ctx->nat_mem.size_base_tables = 0;
+	ipa3_ctx->nat_mem.size_expansion_tables = 0;
+	ipa3_ctx->nat_mem.public_ip_addr = 0;
+	ipa3_ctx->nat_mem.ipv4_rules_addr = 0;
+	ipa3_ctx->nat_mem.ipv4_expansion_rules_addr = 0;
+	ipa3_ctx->nat_mem.index_table_addr = 0;
+	ipa3_ctx->nat_mem.index_table_expansion_addr = 0;
+
+	ipa3_nat_free_mem_and_device(&ipa3_ctx->nat_mem);
+	IPADBG("return\n");
+	result = 0;
+
+destroy_imm_cmd:
+	ipahal_destroy_imm_cmd(cmd_pyld);
+destroy_regwrt_imm_cmd:
+	ipahal_destroy_imm_cmd(nop_cmd_pyld);
+bail:
+	return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
new file mode 100644
index 0000000..719eb2d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -0,0 +1,1268 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/qmi_encdec.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <linux/ipa.h>
+#include <linux/vmalloc.h>
+
+#include "ipa_qmi_service.h"
+
+#define IPA_Q6_SVC_VERS 1
+#define IPA_A5_SVC_VERS 1
+#define Q6_QMI_COMPLETION_TIMEOUT (60*HZ)
+
+#define IPA_A5_SERVICE_SVC_ID 0x31
+#define IPA_A5_SERVICE_INS_ID 1
+#define IPA_Q6_SERVICE_SVC_ID 0x31
+#define IPA_Q6_SERVICE_INS_ID 2
+
+#define QMI_SEND_STATS_REQ_TIMEOUT_MS 5000
+#define QMI_SEND_REQ_TIMEOUT_MS 60000
+
+static struct qmi_handle *ipa3_svc_handle;
+static void ipa3_a5_svc_recv_msg(struct work_struct *work);
+static DECLARE_DELAYED_WORK(work_recv_msg, ipa3_a5_svc_recv_msg);
+static struct workqueue_struct *ipa_svc_workqueue;
+static struct workqueue_struct *ipa_clnt_req_workqueue;
+static struct workqueue_struct *ipa_clnt_resp_workqueue;
+static void *curr_conn;
+static bool ipa3_qmi_modem_init_fin, ipa3_qmi_indication_fin;
+static struct work_struct ipa3_qmi_service_init_work;
+static uint32_t ipa_wan_platform;
+struct ipa3_qmi_context *ipa3_qmi_ctx;
+static bool workqueues_stopped;
+static bool ipa3_modem_init_cmplt;
+static bool first_time_handshake;
+/* QMI A5 service */
+
+static struct msg_desc ipa3_indication_reg_req_desc = {
+	.max_msg_len = QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_INDICATION_REGISTER_REQ_V01,
+	.ei_array = ipa3_indication_reg_req_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_indication_reg_resp_desc = {
+	.max_msg_len = QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_INDICATION_REGISTER_RESP_V01,
+	.ei_array = ipa3_indication_reg_resp_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_master_driver_complete_indication_desc = {
+	.max_msg_len = QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01,
+	.ei_array = ipa3_master_driver_init_complt_ind_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_install_fltr_rule_req_desc = {
+	.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01,
+	.ei_array = ipa3_install_fltr_rule_req_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_install_fltr_rule_resp_desc = {
+	.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01,
+	.ei_array = ipa3_install_fltr_rule_resp_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_filter_installed_notif_req_desc = {
+	.max_msg_len = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01,
+	.ei_array = ipa3_fltr_installed_notif_req_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_filter_installed_notif_resp_desc = {
+	.max_msg_len = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01,
+	.ei_array = ipa3_fltr_installed_notif_resp_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_config_req_desc = {
+	.max_msg_len = QMI_IPA_CONFIG_REQ_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_CONFIG_REQ_V01,
+	.ei_array = ipa3_config_req_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_config_resp_desc = {
+	.max_msg_len = QMI_IPA_CONFIG_RESP_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_CONFIG_RESP_V01,
+	.ei_array = ipa3_config_resp_msg_data_v01_ei,
+};
+
+static struct msg_desc ipa3_init_modem_driver_cmplt_req_desc = {
+	.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01,
+	.ei_array = ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei,
+};
+
+static struct msg_desc ipa3_init_modem_driver_cmplt_resp_desc = {
+	.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01,
+	.ei_array = ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei,
+};
+
+static int ipa3_handle_indication_req(void *req_h, void *req)
+{
+	struct ipa_indication_reg_req_msg_v01 *indication_req;
+	struct ipa_indication_reg_resp_msg_v01 resp;
+	struct ipa_master_driver_init_complt_ind_msg_v01 ind;
+	int rc;
+
+	indication_req = (struct ipa_indication_reg_req_msg_v01 *)req;
+	IPAWANDBG("Received INDICATION Request\n");
+
+	memset(&resp, 0, sizeof(struct ipa_indication_reg_resp_msg_v01));
+	resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+	rc = qmi_send_resp_from_cb(ipa3_svc_handle, curr_conn, req_h,
+			&ipa3_indication_reg_resp_desc, &resp, sizeof(resp));
+	ipa3_qmi_indication_fin = true;
+	/* check if need sending indication to modem */
+	if (ipa3_qmi_modem_init_fin)	{
+		IPAWANDBG("send indication to modem (%d)\n",
+		ipa3_qmi_modem_init_fin);
+		memset(&ind, 0, sizeof(struct
+				ipa_master_driver_init_complt_ind_msg_v01));
+		ind.master_driver_init_status.result =
+			IPA_QMI_RESULT_SUCCESS_V01;
+		rc = qmi_send_ind_from_cb(ipa3_svc_handle, curr_conn,
+			&ipa3_master_driver_complete_indication_desc,
+			&ind,
+			sizeof(ind));
+	} else {
+		IPAWANERR("not send indication\n");
+	}
+	return rc;
+}
+
+
+static int ipa3_handle_install_filter_rule_req(void *req_h, void *req)
+{
+	struct ipa_install_fltr_rule_req_msg_v01 *rule_req;
+	struct ipa_install_fltr_rule_resp_msg_v01 resp;
+	uint32_t rule_hdl[MAX_NUM_Q6_RULE];
+	int rc = 0, i;
+
+	rule_req = (struct ipa_install_fltr_rule_req_msg_v01 *)req;
+	memset(rule_hdl, 0, sizeof(rule_hdl));
+	memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_msg_v01));
+	IPAWANDBG("Received install filter Request\n");
+
+	rc = ipa3_copy_ul_filter_rule_to_ipa((struct
+		ipa_install_fltr_rule_req_msg_v01*)req);
+	if (rc)
+		IPAWANERR("copy UL rules from modem is failed\n");
+
+	resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+	if (rule_req->filter_spec_ex_list_valid == true) {
+		resp.rule_id_valid = 1;
+		if (rule_req->filter_spec_ex_list_len > MAX_NUM_Q6_RULE) {
+			resp.rule_id_len = MAX_NUM_Q6_RULE;
+			IPAWANERR("installed (%d) max Q6-UL rules ",
+			MAX_NUM_Q6_RULE);
+			IPAWANERR("but modem gives total (%u)\n",
+			rule_req->filter_spec_ex_list_len);
+		} else {
+			resp.rule_id_len =
+				rule_req->filter_spec_ex_list_len;
+		}
+	} else {
+		resp.rule_id_valid = 0;
+		resp.rule_id_len = 0;
+	}
+
+	/* construct UL filter rules response to Modem*/
+	for (i = 0; i < resp.rule_id_len; i++) {
+		resp.rule_id[i] =
+			rule_req->filter_spec_ex_list[i].rule_id;
+	}
+
+	rc = qmi_send_resp_from_cb(ipa3_svc_handle, curr_conn, req_h,
+			&ipa3_install_fltr_rule_resp_desc, &resp, sizeof(resp));
+
+	IPAWANDBG("Replied to install filter request\n");
+	return rc;
+}
+
+static int ipa3_handle_filter_installed_notify_req(void *req_h, void *req)
+{
+	struct ipa_fltr_installed_notif_resp_msg_v01 resp;
+	int rc = 0;
+
+	memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+	IPAWANDBG("Received filter_install_notify Request\n");
+	resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+
+	rc = qmi_send_resp_from_cb(ipa3_svc_handle, curr_conn, req_h,
+			&ipa3_filter_installed_notif_resp_desc,
+			&resp, sizeof(resp));
+
+	IPAWANDBG("Responsed filter_install_notify Request\n");
+	return rc;
+}
+
+static int handle_ipa_config_req(void *req_h, void *req)
+{
+	struct ipa_config_resp_msg_v01 resp;
+	int rc;
+
+	memset(&resp, 0, sizeof(struct ipa_config_resp_msg_v01));
+	resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+	IPAWANDBG("Received IPA CONFIG Request\n");
+	rc = ipa_mhi_handle_ipa_config_req(
+		(struct ipa_config_req_msg_v01 *)req);
+	if (rc) {
+		IPAERR("ipa3_mhi_handle_ipa_config_req failed %d\n", rc);
+		resp.resp.result = IPA_QMI_RESULT_FAILURE_V01;
+	}
+	rc = qmi_send_resp_from_cb(ipa3_svc_handle, curr_conn, req_h,
+		&ipa3_config_resp_desc,
+		&resp, sizeof(resp));
+	IPAWANDBG("Responsed IPA CONFIG Request\n");
+	return rc;
+}
+
+static int ipa3_handle_modem_init_cmplt_req(void *req_h, void *req)
+{
+	struct ipa_init_modem_driver_cmplt_req_msg_v01 *cmplt_req;
+	struct ipa_init_modem_driver_cmplt_resp_msg_v01 resp;
+	int rc;
+
+	IPAWANDBG("Received QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01\n");
+	cmplt_req = (struct ipa_init_modem_driver_cmplt_req_msg_v01 *)req;
+
+	if (ipa3_modem_init_cmplt == false) {
+		ipa3_modem_init_cmplt = true;
+		if (ipa3_qmi_modem_init_fin == true) {
+			IPAWANDBG("load uc related registers (%d)\n",
+			ipa3_qmi_modem_init_fin);
+			ipa3_uc_load_notify();
+		}
+	}
+
+	memset(&resp, 0, sizeof(resp));
+	resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+
+	rc = qmi_send_resp_from_cb(ipa3_svc_handle, curr_conn, req_h,
+			&ipa3_init_modem_driver_cmplt_resp_desc,
+			&resp, sizeof(resp));
+
+	IPAWANDBG("Sent QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01\n");
+	return rc;
+}
+
+static int ipa3_a5_svc_connect_cb(struct qmi_handle *handle,
+			       void *conn_h)
+{
+	if (ipa3_svc_handle != handle || !conn_h)
+		return -EINVAL;
+
+	if (curr_conn) {
+		IPAWANERR("Service is busy\n");
+		return -ECONNREFUSED;
+	}
+	curr_conn = conn_h;
+	return 0;
+}
+
+static int ipa3_a5_svc_disconnect_cb(struct qmi_handle *handle,
+				  void *conn_h)
+{
+	if (ipa3_svc_handle != handle || curr_conn != conn_h)
+		return -EINVAL;
+
+	curr_conn = NULL;
+	return 0;
+}
+
+static int ipa3_a5_svc_req_desc_cb(unsigned int msg_id,
+				struct msg_desc **req_desc)
+{
+	int rc;
+
+	switch (msg_id) {
+	case QMI_IPA_INDICATION_REGISTER_REQ_V01:
+		*req_desc = &ipa3_indication_reg_req_desc;
+		rc = sizeof(struct ipa_indication_reg_req_msg_v01);
+		break;
+
+	case QMI_IPA_INSTALL_FILTER_RULE_REQ_V01:
+		*req_desc = &ipa3_install_fltr_rule_req_desc;
+		rc = sizeof(struct ipa_install_fltr_rule_req_msg_v01);
+		break;
+	case QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01:
+		*req_desc = &ipa3_filter_installed_notif_req_desc;
+		rc = sizeof(struct ipa_fltr_installed_notif_req_msg_v01);
+		break;
+	case QMI_IPA_CONFIG_REQ_V01:
+		*req_desc = &ipa3_config_req_desc;
+		rc = sizeof(struct ipa_config_req_msg_v01);
+		break;
+	case QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01:
+		*req_desc = &ipa3_init_modem_driver_cmplt_req_desc;
+		rc = sizeof(struct ipa_init_modem_driver_cmplt_req_msg_v01);
+		break;
+	default:
+		rc = -ENOTSUPP;
+		break;
+	}
+	return rc;
+}
+
+static int ipa3_a5_svc_req_cb(struct qmi_handle *handle, void *conn_h,
+			void *req_h, unsigned int msg_id, void *req)
+{
+	int rc;
+
+	if (ipa3_svc_handle != handle || curr_conn != conn_h)
+		return -EINVAL;
+
+	switch (msg_id) {
+	case QMI_IPA_INDICATION_REGISTER_REQ_V01:
+		rc = ipa3_handle_indication_req(req_h, req);
+		break;
+	case QMI_IPA_INSTALL_FILTER_RULE_REQ_V01:
+		rc = ipa3_handle_install_filter_rule_req(req_h, req);
+		rc = ipa3_wwan_update_mux_channel_prop();
+		break;
+	case QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01:
+		rc = ipa3_handle_filter_installed_notify_req(req_h, req);
+		break;
+	case QMI_IPA_CONFIG_REQ_V01:
+		rc = handle_ipa_config_req(req_h, req);
+		break;
+	case QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01:
+		rc = ipa3_handle_modem_init_cmplt_req(req_h, req);
+		break;
+	default:
+		rc = -ENOTSUPP;
+		break;
+	}
+	return rc;
+}
+
+static void ipa3_a5_svc_recv_msg(struct work_struct *work)
+{
+	int rc;
+
+	do {
+		IPAWANDBG_LOW("Notified about a Receive Event");
+		rc = qmi_recv_msg(ipa3_svc_handle);
+	} while (rc == 0);
+	if (rc != -ENOMSG)
+		IPAWANERR("Error receiving message\n");
+}
+
+static void qmi_ipa_a5_svc_ntfy(struct qmi_handle *handle,
+		enum qmi_event_type event, void *priv)
+{
+	switch (event) {
+	case QMI_RECV_MSG:
+		if (!workqueues_stopped)
+			queue_delayed_work(ipa_svc_workqueue,
+					   &work_recv_msg, 0);
+		break;
+	default:
+		break;
+	}
+}
+
+static struct qmi_svc_ops_options ipa3_a5_svc_ops_options = {
+	.version = 1,
+	.service_id = IPA_A5_SERVICE_SVC_ID,
+	.service_vers = IPA_A5_SVC_VERS,
+	.service_ins = IPA_A5_SERVICE_INS_ID,
+	.connect_cb = ipa3_a5_svc_connect_cb,
+	.disconnect_cb = ipa3_a5_svc_disconnect_cb,
+	.req_desc_cb = ipa3_a5_svc_req_desc_cb,
+	.req_cb = ipa3_a5_svc_req_cb,
+};
+
+
+/****************************************************/
+/*                 QMI A5 client ->Q6               */
+/****************************************************/
+static void ipa3_q6_clnt_recv_msg(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_work_recv_msg_client, ipa3_q6_clnt_recv_msg);
+static void ipa3_q6_clnt_svc_arrive(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_work_svc_arrive, ipa3_q6_clnt_svc_arrive);
+static void ipa3_q6_clnt_svc_exit(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_work_svc_exit, ipa3_q6_clnt_svc_exit);
+/* Test client port for IPC Router */
+static struct qmi_handle *ipa_q6_clnt;
+static int ipa_q6_clnt_reset;
+
+static int ipa3_check_qmi_response(int rc,
+				  int req_id,
+				  enum ipa_qmi_result_type_v01 result,
+				  enum ipa_qmi_error_type_v01 error,
+				  char *resp_type)
+{
+	if (rc < 0) {
+		if (rc == -ETIMEDOUT && ipa3_rmnet_ctx.ipa_rmnet_ssr) {
+			IPAWANERR(
+			"Timeout for qmi request id %d\n", req_id);
+			return rc;
+		}
+		if ((rc == -ENETRESET) || (rc == -ENODEV)) {
+			IPAWANERR(
+			"SSR while waiting for qmi request id %d\n", req_id);
+			return rc;
+		}
+		IPAWANERR("Error sending qmi request id %d, rc = %d\n",
+			req_id, rc);
+		return rc;
+	}
+	if (result != IPA_QMI_RESULT_SUCCESS_V01 &&
+	    ipa3_rmnet_ctx.ipa_rmnet_ssr) {
+		IPAWANERR(
+		"Got bad response %d from request id %d (error %d)\n",
+		req_id, result, error);
+		return result;
+	}
+	IPAWANDBG_LOW("Received %s successfully\n", resp_type);
+	return 0;
+}
+
+static int ipa3_qmi_init_modem_send_sync_msg(void)
+{
+	struct ipa_init_modem_driver_req_msg_v01 req;
+	struct ipa_init_modem_driver_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+	u16 smem_restr_bytes = ipa3_get_smem_restr_bytes();
+
+	memset(&req, 0, sizeof(struct ipa_init_modem_driver_req_msg_v01));
+	memset(&resp, 0, sizeof(struct ipa_init_modem_driver_resp_msg_v01));
+
+	req.platform_type_valid = true;
+	req.platform_type = ipa_wan_platform;
+
+	req.hdr_tbl_info_valid = (IPA_MEM_PART(modem_hdr_size) != 0);
+	req.hdr_tbl_info.modem_offset_start =
+		IPA_MEM_PART(modem_hdr_ofst) + smem_restr_bytes;
+	req.hdr_tbl_info.modem_offset_end = IPA_MEM_PART(modem_hdr_ofst) +
+		smem_restr_bytes + IPA_MEM_PART(modem_hdr_size) - 1;
+
+	req.v4_route_tbl_info_valid = true;
+	req.v4_route_tbl_info.route_tbl_start_addr =
+		IPA_MEM_PART(v4_rt_nhash_ofst) + smem_restr_bytes;
+	req.v4_route_tbl_info.num_indices =
+		IPA_MEM_PART(v4_modem_rt_index_hi);
+	req.v6_route_tbl_info_valid = true;
+
+	req.v6_route_tbl_info.route_tbl_start_addr =
+		IPA_MEM_PART(v6_rt_nhash_ofst) + smem_restr_bytes;
+	req.v6_route_tbl_info.num_indices =
+		IPA_MEM_PART(v6_modem_rt_index_hi);
+
+	req.v4_filter_tbl_start_addr_valid = true;
+	req.v4_filter_tbl_start_addr =
+		IPA_MEM_PART(v4_flt_nhash_ofst) + smem_restr_bytes;
+
+	req.v6_filter_tbl_start_addr_valid = true;
+	req.v6_filter_tbl_start_addr =
+		IPA_MEM_PART(v6_flt_nhash_ofst) + smem_restr_bytes;
+
+	req.modem_mem_info_valid = (IPA_MEM_PART(modem_size) != 0);
+	req.modem_mem_info.block_start_addr =
+		IPA_MEM_PART(modem_ofst) + smem_restr_bytes;
+	req.modem_mem_info.size = IPA_MEM_PART(modem_size);
+
+	req.ctrl_comm_dest_end_pt_valid = true;
+	req.ctrl_comm_dest_end_pt =
+		ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+
+	req.hdr_proc_ctx_tbl_info_valid =
+		(IPA_MEM_PART(modem_hdr_proc_ctx_size) != 0);
+	req.hdr_proc_ctx_tbl_info.modem_offset_start =
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst) + smem_restr_bytes;
+	req.hdr_proc_ctx_tbl_info.modem_offset_end =
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst) +
+		IPA_MEM_PART(modem_hdr_proc_ctx_size) + smem_restr_bytes - 1;
+
+	req.zip_tbl_info_valid = (IPA_MEM_PART(modem_comp_decomp_size) != 0);
+	req.zip_tbl_info.modem_offset_start =
+		IPA_MEM_PART(modem_comp_decomp_size) + smem_restr_bytes;
+	req.zip_tbl_info.modem_offset_end =
+		IPA_MEM_PART(modem_comp_decomp_ofst) +
+		IPA_MEM_PART(modem_comp_decomp_size) + smem_restr_bytes - 1;
+
+	req.v4_hash_route_tbl_info_valid = true;
+	req.v4_hash_route_tbl_info.route_tbl_start_addr =
+		IPA_MEM_PART(v4_rt_hash_ofst) + smem_restr_bytes;
+	req.v4_hash_route_tbl_info.num_indices =
+		IPA_MEM_PART(v4_modem_rt_index_hi);
+
+	req.v6_hash_route_tbl_info_valid = true;
+	req.v6_hash_route_tbl_info.route_tbl_start_addr =
+		IPA_MEM_PART(v6_rt_hash_ofst) + smem_restr_bytes;
+	req.v6_hash_route_tbl_info.num_indices =
+		IPA_MEM_PART(v6_modem_rt_index_hi);
+
+	req.v4_hash_filter_tbl_start_addr_valid = true;
+	req.v4_hash_filter_tbl_start_addr =
+		IPA_MEM_PART(v4_flt_hash_ofst) + smem_restr_bytes;
+
+	req.v6_hash_filter_tbl_start_addr_valid = true;
+	req.v6_hash_filter_tbl_start_addr =
+		IPA_MEM_PART(v6_flt_hash_ofst) + smem_restr_bytes;
+
+	if (!ipa3_uc_loaded_check()) {  /* First time boot */
+		req.is_ssr_bootup_valid = false;
+		req.is_ssr_bootup = 0;
+	} else {  /* After SSR boot */
+		req.is_ssr_bootup_valid = true;
+		req.is_ssr_bootup = 1;
+	}
+
+	IPAWANDBG("platform_type %d\n", req.platform_type);
+	IPAWANDBG("hdr_tbl_info.modem_offset_start %d\n",
+			req.hdr_tbl_info.modem_offset_start);
+	IPAWANDBG("hdr_tbl_info.modem_offset_end %d\n",
+			req.hdr_tbl_info.modem_offset_end);
+	IPAWANDBG("v4_route_tbl_info.route_tbl_start_addr %d\n",
+			req.v4_route_tbl_info.route_tbl_start_addr);
+	IPAWANDBG("v4_route_tbl_info.num_indices %d\n",
+			req.v4_route_tbl_info.num_indices);
+	IPAWANDBG("v6_route_tbl_info.route_tbl_start_addr %d\n",
+			req.v6_route_tbl_info.route_tbl_start_addr);
+	IPAWANDBG("v6_route_tbl_info.num_indices %d\n",
+			req.v6_route_tbl_info.num_indices);
+	IPAWANDBG("v4_filter_tbl_start_addr %d\n",
+			req.v4_filter_tbl_start_addr);
+	IPAWANDBG("v6_filter_tbl_start_addr %d\n",
+			req.v6_filter_tbl_start_addr);
+	IPAWANDBG("modem_mem_info.block_start_addr %d\n",
+			req.modem_mem_info.block_start_addr);
+	IPAWANDBG("modem_mem_info.size %d\n",
+			req.modem_mem_info.size);
+	IPAWANDBG("ctrl_comm_dest_end_pt %d\n",
+			req.ctrl_comm_dest_end_pt);
+	IPAWANDBG("is_ssr_bootup %d\n",
+			req.is_ssr_bootup);
+	IPAWANDBG("v4_hash_route_tbl_info.route_tbl_start_addr %d\n",
+		req.v4_hash_route_tbl_info.route_tbl_start_addr);
+	IPAWANDBG("v4_hash_route_tbl_info.num_indices %d\n",
+		req.v4_hash_route_tbl_info.num_indices);
+	IPAWANDBG("v6_hash_route_tbl_info.route_tbl_start_addr %d\n",
+		req.v6_hash_route_tbl_info.route_tbl_start_addr);
+	IPAWANDBG("v6_hash_route_tbl_info.num_indices %d\n",
+		req.v6_hash_route_tbl_info.num_indices);
+	IPAWANDBG("v4_hash_filter_tbl_start_addr %d\n",
+		req.v4_hash_filter_tbl_start_addr);
+	IPAWANDBG("v6_hash_filter_tbl_start_addr %d\n",
+		req.v6_hash_filter_tbl_start_addr);
+
+	req_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_REQ_V01;
+	req_desc.ei_array = ipa3_init_modem_driver_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_RESP_V01;
+	resp_desc.ei_array = ipa3_init_modem_driver_resp_msg_data_v01_ei;
+
+	pr_info("Sending QMI_IPA_INIT_MODEM_DRIVER_REQ_V01\n");
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
+			&resp_desc, &resp, sizeof(resp),
+			QMI_SEND_REQ_TIMEOUT_MS);
+	pr_info("QMI_IPA_INIT_MODEM_DRIVER_REQ_V01 response received\n");
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_INIT_MODEM_DRIVER_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_init_modem_driver_resp_msg_v01");
+}
+
+/* sending filter-install-request to modem*/
+int ipa3_qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req)
+{
+	struct ipa_install_fltr_rule_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	/* check if the filter rules from IPACM is valid */
+	if (req->filter_spec_ex_list_len == 0) {
+		IPAWANDBG("IPACM pass zero rules to Q6\n");
+	} else {
+		IPAWANDBG("IPACM pass %u rules to Q6\n",
+		req->filter_spec_ex_list_len);
+	}
+
+	/* cache the qmi_filter_request */
+	memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_msg_cache[
+		ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg]),
+			req, sizeof(struct ipa_install_fltr_rule_req_msg_v01));
+	ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg++;
+	ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg %= 10;
+
+	req_desc.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01;
+	req_desc.ei_array = ipa3_install_fltr_rule_req_msg_data_v01_ei;
+
+	memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_msg_v01));
+	resp_desc.max_msg_len =
+		QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01;
+	resp_desc.ei_array = ipa3_install_fltr_rule_resp_msg_data_v01_ei;
+
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc,
+			req,
+			sizeof(struct ipa_install_fltr_rule_req_msg_v01),
+			&resp_desc, &resp, sizeof(resp),
+			QMI_SEND_REQ_TIMEOUT_MS);
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_INSTALL_FILTER_RULE_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_install_filter");
+}
+
+
+int ipa3_qmi_enable_force_clear_datapath_send(
+	struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
+{
+	struct ipa_enable_force_clear_datapath_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc = 0;
+
+
+	if (!req || !req->source_pipe_bitmask) {
+		IPAWANERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	req_desc.max_msg_len =
+	QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_V01;
+	req_desc.ei_array =
+		ipa3_enable_force_clear_datapath_req_msg_data_v01_ei;
+
+	memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+	resp_desc.max_msg_len =
+		QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
+	resp_desc.ei_array =
+		ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei;
+
+	rc = qmi_send_req_wait(ipa_q6_clnt,
+			&req_desc,
+			req,
+			sizeof(*req),
+			&resp_desc, &resp, sizeof(resp), 0);
+	if (rc < 0) {
+		IPAWANERR("send req failed %d\n", rc);
+		return rc;
+	}
+	if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) {
+		IPAWANERR("filter_notify failed %d\n",
+			resp.resp.result);
+		return resp.resp.result;
+	}
+	IPAWANDBG("SUCCESS\n");
+	return rc;
+}
+
+int ipa3_qmi_disable_force_clear_datapath_send(
+	struct ipa_disable_force_clear_datapath_req_msg_v01 *req)
+{
+	struct ipa_disable_force_clear_datapath_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc = 0;
+
+
+	if (!req) {
+		IPAWANERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	req_desc.max_msg_len =
+		QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_V01;
+	req_desc.ei_array =
+		ipa3_disable_force_clear_datapath_req_msg_data_v01_ei;
+
+	memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+	resp_desc.max_msg_len =
+		QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
+	resp_desc.ei_array =
+		ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei;
+
+	rc = qmi_send_req_wait(ipa_q6_clnt,
+			&req_desc,
+			req,
+			sizeof(*req),
+			&resp_desc, &resp, sizeof(resp), 0);
+	if (rc < 0) {
+		IPAWANERR("send req failed %d\n", rc);
+		return rc;
+	}
+	if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) {
+		IPAWANERR("filter_notify failed %d\n",
+			resp.resp.result);
+		return resp.resp.result;
+	}
+	IPAWANDBG("SUCCESS\n");
+	return rc;
+}
+
+/* sending filter-installed-notify-request to modem*/
+int ipa3_qmi_filter_notify_send(
+		struct ipa_fltr_installed_notif_req_msg_v01 *req)
+{
+	struct ipa_fltr_installed_notif_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc = 0;
+
+	/* check if the filter rules from IPACM is valid */
+	if (req->rule_id_len == 0) {
+		IPAWANERR(" delete UL filter rule for pipe %d\n",
+		req->source_pipe_index);
+		return -EINVAL;
+	} else if (req->rule_id_len > QMI_IPA_MAX_FILTERS_V01) {
+		IPAWANERR(" UL filter rule for pipe %d exceed max (%u)\n",
+		req->source_pipe_index,
+		req->rule_id_len);
+		return -EINVAL;
+	}
+
+	/* cache the qmi_filter_request */
+	memcpy(&(ipa3_qmi_ctx->ipa_fltr_installed_notif_req_msg_cache[
+		ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg]),
+		req, sizeof(struct ipa_fltr_installed_notif_req_msg_v01));
+	ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg++;
+	ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg %= 10;
+
+	req_desc.max_msg_len =
+	QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01;
+	req_desc.ei_array = ipa3_fltr_installed_notif_req_msg_data_v01_ei;
+
+	memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+	resp_desc.max_msg_len =
+		QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01;
+	resp_desc.ei_array = ipa3_fltr_installed_notif_resp_msg_data_v01_ei;
+
+	rc = qmi_send_req_wait(ipa_q6_clnt,
+			&req_desc,
+			req,
+			sizeof(struct ipa_fltr_installed_notif_req_msg_v01),
+			&resp_desc, &resp, sizeof(resp),
+			QMI_SEND_REQ_TIMEOUT_MS);
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_fltr_installed_notif_resp");
+}
+
+static void ipa3_q6_clnt_recv_msg(struct work_struct *work)
+{
+	int rc;
+
+	do {
+		IPAWANDBG_LOW("Notified about a Receive Event");
+		rc = qmi_recv_msg(ipa_q6_clnt);
+	} while (rc == 0);
+	if (rc != -ENOMSG)
+		IPAWANERR("Error receiving message\n");
+}
+
+static void ipa3_q6_clnt_notify(struct qmi_handle *handle,
+			     enum qmi_event_type event, void *notify_priv)
+{
+	switch (event) {
+	case QMI_RECV_MSG:
+		IPAWANDBG_LOW("client qmi recv message called");
+		if (!workqueues_stopped)
+			queue_delayed_work(ipa_clnt_resp_workqueue,
+					   &ipa3_work_recv_msg_client, 0);
+		break;
+	default:
+		break;
+	}
+}
+
+static void ipa3_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id,
+			       void *msg, unsigned int msg_len,
+			       void *ind_cb_priv)
+{
+	struct ipa_data_usage_quota_reached_ind_msg_v01 qmi_ind;
+	struct msg_desc qmi_ind_desc;
+	int rc = 0;
+
+	if (handle != ipa_q6_clnt) {
+		IPAWANERR("Wrong client\n");
+		return;
+	}
+
+	if (msg_id == QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01) {
+		memset(&qmi_ind, 0, sizeof(
+			struct ipa_data_usage_quota_reached_ind_msg_v01));
+		qmi_ind_desc.max_msg_len =
+			QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01;
+		qmi_ind_desc.msg_id = QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01;
+		qmi_ind_desc.ei_array =
+			ipa3_data_usage_quota_reached_ind_msg_data_v01_ei;
+
+		rc = qmi_kernel_decode(&qmi_ind_desc, &qmi_ind, msg, msg_len);
+		if (rc < 0) {
+			IPAWANERR("Error decoding msg_id %d\n", msg_id);
+			return;
+		}
+		IPAWANDBG("Quota reached indication on qmux(%d) Mbytes(%lu)\n",
+			  qmi_ind.apn.mux_id,
+			  (unsigned long int) qmi_ind.apn.num_Mbytes);
+		ipa3_broadcast_quota_reach_ind(qmi_ind.apn.mux_id);
+	}
+}
+
+static void ipa3_q6_clnt_svc_arrive(struct work_struct *work)
+{
+	int rc;
+	struct ipa_master_driver_init_complt_ind_msg_v01 ind;
+
+	/* Create a Local client port for QMI communication */
+	ipa_q6_clnt = qmi_handle_create(ipa3_q6_clnt_notify, NULL);
+	if (!ipa_q6_clnt) {
+		IPAWANERR("QMI client handle alloc failed\n");
+		return;
+	}
+
+	IPAWANDBG("Lookup server name, get client-hdl(%p)\n",
+		ipa_q6_clnt);
+	rc = qmi_connect_to_service(ipa_q6_clnt,
+			IPA_Q6_SERVICE_SVC_ID,
+			IPA_Q6_SVC_VERS,
+			IPA_Q6_SERVICE_INS_ID);
+	if (rc < 0) {
+		IPAWANERR("Server not found\n");
+		qmi_handle_destroy(ipa_q6_clnt);
+		ipa_q6_clnt = NULL;
+		return;
+	}
+
+	rc = qmi_register_ind_cb(ipa_q6_clnt, ipa3_q6_clnt_ind_cb, NULL);
+	if (rc < 0)
+		IPAWANERR("Unable to register for indications\n");
+
+	ipa_q6_clnt_reset = 0;
+	IPAWANDBG("Q6 QMI service available now\n");
+	/* Initialize modem IPA-driver */
+	IPAWANDBG("send ipa3_qmi_init_modem_send_sync_msg to modem\n");
+	rc = ipa3_qmi_init_modem_send_sync_msg();
+	if ((rc == -ENETRESET) || (rc == -ENODEV)) {
+		IPAWANERR(
+			"ipa3_qmi_init_modem_send_sync_msg failed due to SSR!\n");
+		/* Cleanup will take place when ipa3_wwan_remove is called */
+		return;
+	}
+	if (rc != 0) {
+		IPAWANERR("ipa3_qmi_init_modem_send_sync_msg failed\n");
+		/*
+		 * This is a very unexpected scenario, which requires a kernel
+		 * panic in order to force dumps for QMI/Q6 side analysis.
+		 */
+		BUG();
+		return;
+	}
+	ipa3_qmi_modem_init_fin = true;
+
+	/* got modem_init_cmplt_req already, load uc-related register */
+	if (ipa3_modem_init_cmplt == true) {
+		IPAWANDBG("load uc related registers (%d)\n",
+		ipa3_modem_init_cmplt);
+			ipa3_uc_load_notify();
+	}
+
+	/* In cold-bootup, first_time_handshake = false */
+	ipa3_q6_handshake_complete(first_time_handshake);
+	first_time_handshake = true;
+	IPAWANDBG("complete, ipa3_qmi_modem_init_fin : %d\n",
+		ipa3_qmi_modem_init_fin);
+
+	if (ipa3_qmi_indication_fin)	{
+		IPAWANDBG("send indication to modem (%d)\n",
+		ipa3_qmi_indication_fin);
+		memset(&ind, 0, sizeof(struct
+				ipa_master_driver_init_complt_ind_msg_v01));
+		ind.master_driver_init_status.result =
+			IPA_QMI_RESULT_SUCCESS_V01;
+		rc = qmi_send_ind(ipa3_svc_handle, curr_conn,
+			&ipa3_master_driver_complete_indication_desc,
+			&ind,
+			sizeof(ind));
+		IPAWANDBG("ipa_qmi_service_client good\n");
+	} else {
+		IPAWANERR("not send indication (%d)\n",
+		ipa3_qmi_indication_fin);
+	}
+}
+
+
+static void ipa3_q6_clnt_svc_exit(struct work_struct *work)
+{
+	qmi_handle_destroy(ipa_q6_clnt);
+	ipa_q6_clnt_reset = 1;
+	ipa_q6_clnt = NULL;
+}
+
+
+static int ipa3_q6_clnt_svc_event_notify(struct notifier_block *this,
+				      unsigned long code,
+				      void *_cmd)
+{
+	IPAWANDBG("event %ld\n", code);
+	switch (code) {
+	case QMI_SERVER_ARRIVE:
+		if (!workqueues_stopped)
+			queue_delayed_work(ipa_clnt_req_workqueue,
+					   &ipa3_work_svc_arrive, 0);
+		break;
+	case QMI_SERVER_EXIT:
+		if (!workqueues_stopped)
+			queue_delayed_work(ipa_clnt_req_workqueue,
+					   &ipa3_work_svc_exit, 0);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+
+static struct notifier_block ipa3_q6_clnt_nb = {
+	.notifier_call = ipa3_q6_clnt_svc_event_notify,
+};
+
+static void ipa3_qmi_service_init_worker(struct work_struct *work)
+{
+	int rc;
+
+	/* Initialize QMI-service*/
+	IPAWANDBG("IPA A7 QMI init OK :>>>>\n");
+
+	/* start the QMI msg cache */
+	ipa3_qmi_ctx = vzalloc(sizeof(*ipa3_qmi_ctx));
+	if (!ipa3_qmi_ctx) {
+		IPAWANERR(":kzalloc err.\n");
+		return;
+	}
+	ipa3_qmi_ctx->modem_cfg_emb_pipe_flt =
+		ipa3_get_modem_cfg_emb_pipe_flt();
+
+	ipa_svc_workqueue = create_singlethread_workqueue("ipa_A7_svc");
+	if (!ipa_svc_workqueue) {
+		IPAWANERR("Creating ipa_A7_svc workqueue failed\n");
+		vfree(ipa3_qmi_ctx);
+		ipa3_qmi_ctx = NULL;
+		return;
+	}
+
+	ipa3_svc_handle = qmi_handle_create(qmi_ipa_a5_svc_ntfy, NULL);
+	if (!ipa3_svc_handle) {
+		IPAWANERR("Creating ipa_A7_svc qmi handle failed\n");
+		goto destroy_ipa_A7_svc_wq;
+	}
+
+	/*
+	 * Setting the current connection to NULL, as due to a race between
+	 * server and client clean-up in SSR, the disconnect_cb might not
+	 * have necessarily been called
+	 */
+	curr_conn = NULL;
+
+	rc = qmi_svc_register(ipa3_svc_handle, &ipa3_a5_svc_ops_options);
+	if (rc < 0) {
+		IPAWANERR("Registering ipa_a5 svc failed %d\n",
+				rc);
+		goto destroy_qmi_handle;
+	}
+
+	/* Initialize QMI-client */
+
+	ipa_clnt_req_workqueue = create_singlethread_workqueue("clnt_req");
+	if (!ipa_clnt_req_workqueue) {
+		IPAWANERR("Creating clnt_req workqueue failed\n");
+		goto deregister_qmi_srv;
+	}
+
+	ipa_clnt_resp_workqueue = create_singlethread_workqueue("clnt_resp");
+	if (!ipa_clnt_resp_workqueue) {
+		IPAWANERR("Creating clnt_resp workqueue failed\n");
+		goto destroy_clnt_req_wq;
+	}
+
+	rc = qmi_svc_event_notifier_register(IPA_Q6_SERVICE_SVC_ID,
+				IPA_Q6_SVC_VERS,
+				IPA_Q6_SERVICE_INS_ID, &ipa3_q6_clnt_nb);
+	if (rc < 0) {
+		IPAWANERR("notifier register failed\n");
+		goto destroy_clnt_resp_wq;
+	}
+
+	/* get Q6 service and start send modem-initial to Q6 */
+	IPAWANDBG("wait service available\n");
+	return;
+
+destroy_clnt_resp_wq:
+	destroy_workqueue(ipa_clnt_resp_workqueue);
+	ipa_clnt_resp_workqueue = NULL;
+destroy_clnt_req_wq:
+	destroy_workqueue(ipa_clnt_req_workqueue);
+	ipa_clnt_req_workqueue = NULL;
+deregister_qmi_srv:
+	qmi_svc_unregister(ipa3_svc_handle);
+destroy_qmi_handle:
+	qmi_handle_destroy(ipa3_svc_handle);
+	ipa3_svc_handle = 0;
+destroy_ipa_A7_svc_wq:
+	destroy_workqueue(ipa_svc_workqueue);
+	ipa_svc_workqueue = NULL;
+	vfree(ipa3_qmi_ctx);
+	ipa3_qmi_ctx = NULL;
+}
+
+int ipa3_qmi_service_init(uint32_t wan_platform_type)
+{
+	ipa_wan_platform = wan_platform_type;
+	ipa3_qmi_modem_init_fin = false;
+	ipa3_qmi_indication_fin = false;
+	ipa3_modem_init_cmplt = false;
+	workqueues_stopped = false;
+
+	if (!ipa3_svc_handle) {
+		INIT_WORK(&ipa3_qmi_service_init_work,
+			ipa3_qmi_service_init_worker);
+		schedule_work(&ipa3_qmi_service_init_work);
+	}
+	return 0;
+}
+
+void ipa3_qmi_service_exit(void)
+{
+	int ret = 0;
+
+	workqueues_stopped = true;
+
+	/* qmi-service */
+	if (ipa3_svc_handle) {
+		ret = qmi_svc_unregister(ipa3_svc_handle);
+		if (ret < 0)
+			IPAWANERR("unregister qmi handle %p failed, ret=%d\n",
+			ipa3_svc_handle, ret);
+	}
+	if (ipa_svc_workqueue) {
+		flush_workqueue(ipa_svc_workqueue);
+		destroy_workqueue(ipa_svc_workqueue);
+		ipa_svc_workqueue = NULL;
+	}
+
+	if (ipa3_svc_handle) {
+		ret = qmi_handle_destroy(ipa3_svc_handle);
+		if (ret < 0)
+			IPAWANERR("Error destroying qmi handle %p, ret=%d\n",
+			ipa3_svc_handle, ret);
+	}
+
+	/* qmi-client */
+
+	/* Unregister from events */
+	ret = qmi_svc_event_notifier_unregister(IPA_Q6_SERVICE_SVC_ID,
+				IPA_Q6_SVC_VERS,
+				IPA_Q6_SERVICE_INS_ID, &ipa3_q6_clnt_nb);
+	if (ret < 0)
+		IPAWANERR(
+		"Error qmi_svc_event_notifier_unregister service %d, ret=%d\n",
+		IPA_Q6_SERVICE_SVC_ID, ret);
+
+	/* Release client handle */
+	ipa3_q6_clnt_svc_exit(0);
+
+	if (ipa_clnt_req_workqueue) {
+		destroy_workqueue(ipa_clnt_req_workqueue);
+		ipa_clnt_req_workqueue = NULL;
+	}
+	if (ipa_clnt_resp_workqueue) {
+		destroy_workqueue(ipa_clnt_resp_workqueue);
+		ipa_clnt_resp_workqueue = NULL;
+	}
+
+	/* clean the QMI msg cache */
+	if (ipa3_qmi_ctx != NULL) {
+		vfree(ipa3_qmi_ctx);
+		ipa3_qmi_ctx = NULL;
+	}
+	ipa3_svc_handle = 0;
+	ipa3_qmi_modem_init_fin = false;
+	ipa3_qmi_indication_fin = false;
+	ipa3_modem_init_cmplt = false;
+}
+
+void ipa3_qmi_stop_workqueues(void)
+{
+	IPAWANDBG("Stopping all QMI workqueues\n");
+
+	/* Stopping all workqueues so new work won't be scheduled */
+	workqueues_stopped = true;
+
+	/* Making sure that the current scheduled work won't be executed */
+	cancel_delayed_work(&work_recv_msg);
+	cancel_delayed_work(&ipa3_work_recv_msg_client);
+	cancel_delayed_work(&ipa3_work_svc_arrive);
+	cancel_delayed_work(&ipa3_work_svc_exit);
+}
+
+
+/* voting for bus BW to ipa_rm*/
+int ipa3_vote_for_bus_bw(uint32_t *bw_mbps)
+{
+	struct ipa_rm_perf_profile profile;
+	int ret;
+
+	if (bw_mbps == NULL) {
+		IPAWANERR("Bus BW is invalid\n");
+		return -EINVAL;
+	}
+
+	memset(&profile, 0, sizeof(profile));
+	profile.max_supported_bandwidth_mbps = *bw_mbps;
+	ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
+			&profile);
+	if (ret)
+		IPAWANERR("Failed to set perf profile to BW %u\n",
+			profile.max_supported_bandwidth_mbps);
+	else
+		IPAWANDBG("Succeeded to set perf profile to BW %u\n",
+			profile.max_supported_bandwidth_mbps);
+
+	return ret;
+}
+
+int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
+			   struct ipa_get_data_stats_resp_msg_v01 *resp)
+{
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.max_msg_len = QMI_IPA_GET_DATA_STATS_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_GET_DATA_STATS_REQ_V01;
+	req_desc.ei_array = ipa3_get_data_stats_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len = QMI_IPA_GET_DATA_STATS_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_GET_DATA_STATS_RESP_V01;
+	resp_desc.ei_array = ipa3_get_data_stats_resp_msg_data_v01_ei;
+
+	IPAWANDBG_LOW("Sending QMI_IPA_GET_DATA_STATS_REQ_V01\n");
+
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+			sizeof(struct ipa_get_data_stats_req_msg_v01),
+			&resp_desc, resp,
+			sizeof(struct ipa_get_data_stats_resp_msg_v01),
+			QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	IPAWANDBG_LOW("QMI_IPA_GET_DATA_STATS_RESP_V01 received\n");
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_GET_DATA_STATS_REQ_V01, resp->resp.result,
+		resp->resp.error, "ipa_get_data_stats_resp_msg_v01");
+}
+
+int ipa3_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req,
+			      struct ipa_get_apn_data_stats_resp_msg_v01 *resp)
+{
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.max_msg_len = QMI_IPA_GET_APN_DATA_STATS_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_GET_APN_DATA_STATS_REQ_V01;
+	req_desc.ei_array = ipa3_get_apn_data_stats_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len = QMI_IPA_GET_APN_DATA_STATS_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_GET_APN_DATA_STATS_RESP_V01;
+	resp_desc.ei_array = ipa3_get_apn_data_stats_resp_msg_data_v01_ei;
+
+	IPAWANDBG_LOW("Sending QMI_IPA_GET_APN_DATA_STATS_REQ_V01\n");
+
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+			sizeof(struct ipa_get_apn_data_stats_req_msg_v01),
+			&resp_desc, resp,
+			sizeof(struct ipa_get_apn_data_stats_resp_msg_v01),
+			QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	IPAWANDBG_LOW("QMI_IPA_GET_APN_DATA_STATS_RESP_V01 received\n");
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_GET_APN_DATA_STATS_REQ_V01, resp->resp.result,
+		resp->resp.error, "ipa_get_apn_data_stats_req_msg_v01");
+}
+
+int ipa3_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req)
+{
+	struct ipa_set_data_usage_quota_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	memset(&resp, 0, sizeof(struct ipa_set_data_usage_quota_resp_msg_v01));
+
+	req_desc.max_msg_len = QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01;
+	req_desc.ei_array = ipa3_set_data_usage_quota_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len =
+		QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01;
+	resp_desc.ei_array = ipa3_set_data_usage_quota_resp_msg_data_v01_ei;
+
+	IPAWANDBG_LOW("Sending QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01\n");
+
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+			sizeof(struct ipa_set_data_usage_quota_req_msg_v01),
+			&resp_desc, &resp, sizeof(resp),
+			QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	IPAWANDBG_LOW("QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01 received\n");
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_set_data_usage_quota_req_msg_v01");
+}
+
+int ipa3_qmi_stop_data_qouta(void)
+{
+	struct ipa_stop_data_usage_quota_req_msg_v01 req;
+	struct ipa_stop_data_usage_quota_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	memset(&req, 0, sizeof(struct ipa_stop_data_usage_quota_req_msg_v01));
+	memset(&resp, 0, sizeof(struct ipa_stop_data_usage_quota_resp_msg_v01));
+
+	req_desc.max_msg_len =
+		QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01;
+	req_desc.ei_array = ipa3_stop_data_usage_quota_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len =
+		QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01;
+	resp_desc.ei_array = ipa3_stop_data_usage_quota_resp_msg_data_v01_ei;
+
+	IPAWANDBG_LOW("Sending QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01\n");
+
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
+		&resp_desc, &resp, sizeof(resp),
+		QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	IPAWANDBG_LOW("QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01 received\n");
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_stop_data_usage_quota_req_msg_v01");
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
new file mode 100644
index 0000000..0f64120
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -0,0 +1,303 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef IPA_QMI_SERVICE_H
+#define IPA_QMI_SERVICE_H
+
+#include <linux/ipa.h>
+#include <linux/ipa_qmi_service_v01.h>
+#include <uapi/linux/msm_rmnet.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include "ipa_i.h"
+#include <linux/rmnet_ipa_fd_ioctl.h>
+
+/**
+ * name of the DL wwan default routing tables for v4 and v6
+ */
+#define IPA_A7_QMAP_HDR_NAME "ipa_qmap_hdr"
+#define IPA_DFLT_WAN_RT_TBL_NAME "ipa_dflt_wan_rt"
+#define MAX_NUM_Q6_RULE 35
+#define MAX_NUM_QMI_RULE_CACHE 10
+#define DEV_NAME "ipa-wan"
+#define SUBSYS_MODEM "modem"
+
+#define IPAWANDBG(fmt, args...) \
+	do { \
+		pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			DEV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			DEV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
+#define IPAWANDBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			DEV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPAWANERR(fmt, args...) \
+	do { \
+		pr_err(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			DEV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			DEV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPAWANINFO(fmt, args...) \
+	do { \
+		pr_info(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			DEV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			DEV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+extern struct ipa3_qmi_context *ipa3_qmi_ctx;
+
+struct ipa3_qmi_context {
+struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE];
+u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE];
+int num_ipa_install_fltr_rule_req_msg;
+struct ipa_install_fltr_rule_req_msg_v01
+		ipa_install_fltr_rule_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
+int num_ipa_fltr_installed_notif_req_msg;
+struct ipa_fltr_installed_notif_req_msg_v01
+		ipa_fltr_installed_notif_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
+bool modem_cfg_emb_pipe_flt;
+};
+
+struct ipa3_rmnet_mux_val {
+	uint32_t  mux_id;
+	int8_t    vchannel_name[IFNAMSIZ];
+	bool mux_channel_set;
+	bool ul_flt_reg;
+	bool mux_hdr_set;
+	uint32_t  hdr_hdl;
+};
+
+extern struct elem_info ipa3_init_modem_driver_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_init_modem_driver_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_indication_reg_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_indication_reg_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_master_driver_init_complt_ind_msg_data_v01_ei[];
+extern struct elem_info ipa3_install_fltr_rule_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_install_fltr_rule_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_fltr_installed_notif_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_config_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_config_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_get_data_stats_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_get_data_stats_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_get_apn_data_stats_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_get_apn_data_stats_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_set_data_usage_quota_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_set_data_usage_quota_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[];
+extern struct elem_info ipa3_stop_data_usage_quota_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[];
+
+/**
+ * struct ipa3_rmnet_context - IPA rmnet context
+ * @ipa_rmnet_ssr: support modem SSR
+ * @polling_interval: Requested interval for polling tethered statistics
+ * @metered_mux_id: The mux ID on which quota has been set
+ */
+struct ipa3_rmnet_context {
+	bool ipa_rmnet_ssr;
+	u64 polling_interval;
+	u32 metered_mux_id;
+};
+
+extern struct ipa3_rmnet_context ipa3_rmnet_ctx;
+
+#ifdef CONFIG_RMNET_IPA3
+
+int ipa3_qmi_service_init(uint32_t wan_platform_type);
+
+void ipa3_qmi_service_exit(void);
+
+/* sending filter-install-request to modem*/
+int ipa3_qmi_filter_request_send(
+	struct ipa_install_fltr_rule_req_msg_v01 *req);
+
+/* sending filter-installed-notify-request to modem*/
+int ipa3_qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01
+		*req);
+
+/* voting for bus BW to ipa_rm*/
+int ipa3_vote_for_bus_bw(uint32_t *bw_mbps);
+
+int ipa3_qmi_enable_force_clear_datapath_send(
+	struct ipa_enable_force_clear_datapath_req_msg_v01 *req);
+
+int ipa3_qmi_disable_force_clear_datapath_send(
+	struct ipa_disable_force_clear_datapath_req_msg_v01 *req);
+
+int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
+	*rule_req);
+
+int ipa3_wwan_update_mux_channel_prop(void);
+
+int ipa3_wan_ioctl_init(void);
+
+void ipa3_wan_ioctl_stop_qmi_messages(void);
+
+void ipa3_wan_ioctl_enable_qmi_messages(void);
+
+void ipa3_wan_ioctl_deinit(void);
+
+void ipa3_qmi_stop_workqueues(void);
+
+int rmnet_ipa3_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats
+		*data);
+
+int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data);
+
+void ipa3_broadcast_quota_reach_ind(uint32_t mux_id);
+
+int rmnet_ipa3_set_tether_client_pipe(struct wan_ioctl_set_tether_client_pipe
+	*data);
+
+int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
+	bool reset);
+
+int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
+	struct ipa_get_data_stats_resp_msg_v01 *resp);
+
+int ipa3_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req,
+	struct ipa_get_apn_data_stats_resp_msg_v01 *resp);
+
+int ipa3_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req);
+
+int ipa3_qmi_stop_data_qouta(void);
+
+void ipa3_q6_handshake_complete(bool ssr_bootup);
+
+#else /* CONFIG_RMNET_IPA3 */
+
+static inline int ipa3_qmi_service_init(uint32_t wan_platform_type)
+{
+	return -EPERM;
+}
+
+static inline void ipa3_qmi_service_exit(void) { }
+
+/* sending filter-install-request to modem*/
+static inline int ipa3_qmi_filter_request_send(
+	struct ipa_install_fltr_rule_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+/* sending filter-installed-notify-request to modem*/
+static inline int ipa3_qmi_filter_notify_send(
+	struct ipa_fltr_installed_notif_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_enable_force_clear_datapath_send(
+	struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_disable_force_clear_datapath_send(
+	struct ipa_disable_force_clear_datapath_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_copy_ul_filter_rule_to_ipa(
+	struct ipa_install_fltr_rule_req_msg_v01 *rule_req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_wwan_update_mux_channel_prop(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_wan_ioctl_init(void)
+{
+	return -EPERM;
+}
+
+static inline void ipa3_wan_ioctl_stop_qmi_messages(void) { }
+
+static inline void ipa3_wan_ioctl_enable_qmi_messages(void) { }
+
+static inline void ipa3_wan_ioctl_deinit(void) { }
+
+static inline void ipa3_qmi_stop_workqueues(void) { }
+
+static inline int ipa3_vote_for_bus_bw(uint32_t *bw_mbps)
+{
+	return -EPERM;
+}
+
+static inline int rmnet_ipa3_poll_tethering_stats(
+	struct wan_ioctl_poll_tethering_stats *data)
+{
+	return -EPERM;
+}
+
+static inline int rmnet_ipa3_set_data_quota(
+	struct wan_ioctl_set_data_quota *data)
+{
+	return -EPERM;
+}
+
+static inline void ipa3_broadcast_quota_reach_ind(uint32_t mux_id) { }
+
+static inline int ipa3_qmi_get_data_stats(
+	struct ipa_get_data_stats_req_msg_v01 *req,
+	struct ipa_get_data_stats_resp_msg_v01 *resp)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_get_network_stats(
+	struct ipa_get_apn_data_stats_req_msg_v01 *req,
+	struct ipa_get_apn_data_stats_resp_msg_v01 *resp)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_set_data_quota(
+	struct ipa_set_data_usage_quota_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_stop_data_qouta(void)
+{
+	return -EPERM;
+}
+
+static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { }
+
+#endif /* CONFIG_RMNET_IPA3 */
+
+#endif /* IPA_QMI_SERVICE_H */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
new file mode 100644
index 0000000..6907811
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
@@ -0,0 +1,2746 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/qmi_encdec.h>
+#include <linux/ipa_qmi_service_v01.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+/* Type Definitions  */
+static struct elem_info ipa3_hdr_tbl_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_hdr_tbl_info_type_v01,
+					modem_offset_start),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_hdr_tbl_info_type_v01,
+					modem_offset_end),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_route_tbl_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_route_tbl_info_type_v01,
+					route_tbl_start_addr),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_route_tbl_info_type_v01,
+					num_indices),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_modem_mem_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_modem_mem_info_type_v01,
+					block_start_addr),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_modem_mem_info_type_v01,
+					size),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_hdr_proc_ctx_tbl_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_hdr_proc_ctx_tbl_info_type_v01,
+			modem_offset_start),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_hdr_proc_ctx_tbl_info_type_v01,
+			modem_offset_end),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_zip_tbl_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_zip_tbl_info_type_v01,
+					modem_offset_start),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_zip_tbl_info_type_v01,
+					modem_offset_end),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_ipfltr_range_eq_16_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_range_eq_16_type_v01,
+			offset),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_range_eq_16_type_v01,
+			range_low),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_range_eq_16_type_v01,
+			range_high),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_ipfltr_mask_eq_32_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+				struct ipa_ipfltr_mask_eq_32_type_v01,
+				offset),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+				struct ipa_ipfltr_mask_eq_32_type_v01,
+				mask),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_mask_eq_32_type_v01,
+			value),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_ipfltr_eq_16_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_eq_16_type_v01,
+			offset),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_ipfltr_eq_16_type_v01,
+					value),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_ipfltr_eq_32_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_ipfltr_eq_32_type_v01,
+					offset),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_ipfltr_eq_32_type_v01,
+					value),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_ipfltr_mask_eq_128_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_mask_eq_128_type_v01,
+			offset),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 16,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_mask_eq_128_type_v01,
+			mask),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 16,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_mask_eq_128_type_v01,
+			value),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_filter_rule_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_rule_type_v01,
+			rule_eq_bitmap),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_rule_type_v01,
+			tos_eq_present),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					tos_eq),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					protocol_eq_present),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					protocol_eq),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					num_ihl_offset_range_16),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01,
+		.elem_size	= sizeof(
+			struct ipa_ipfltr_range_eq_16_type_v01),
+		.is_array	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_range_16),
+		.ei_array	= ipa3_ipfltr_range_eq_16_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					num_offset_meq_32),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01,
+		.elem_size	= sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+		.is_array	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					offset_meq_32),
+		.ei_array	= ipa3_ipfltr_mask_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					tc_eq_present),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					tc_eq),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					flow_eq_present),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					flow_eq),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_eq_16_present),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_ipfltr_eq_16_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_eq_16),
+		.ei_array	= ipa3_ipfltr_eq_16_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_eq_32_present),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_ipfltr_eq_32_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_eq_32),
+		.ei_array	= ipa3_ipfltr_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					num_ihl_offset_meq_32),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01,
+		.elem_size	= sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+		.is_array	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_meq_32),
+		.ei_array	= ipa3_ipfltr_mask_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					num_offset_meq_128),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	=
+			QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01,
+		.elem_size	= sizeof(
+			struct ipa_ipfltr_mask_eq_128_type_v01),
+		.is_array	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_rule_type_v01,
+			offset_meq_128),
+		.ei_array	= ipa3_ipfltr_mask_eq_128_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					metadata_meq32_present),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					metadata_meq32),
+		.ei_array	= ipa3_ipfltr_mask_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ipv4_frag_eq_present),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa_filter_spec_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					filter_spec_identifier),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					ip_type),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_filter_rule_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					filter_rule),
+		.ei_array	= ipa3_filter_rule_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					filter_action),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					is_routing_table_index_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					route_table_index),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					is_mux_id_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					mux_id),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa_filter_spec_ex_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					ip_type),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_filter_rule_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					filter_rule),
+		.ei_array	= ipa3_filter_rule_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					filter_action),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					is_routing_table_index_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					route_table_index),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					is_mux_id_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					mux_id),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					rule_id),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					is_rule_hashable),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct
+elem_info ipa3_filter_rule_identifier_to_handle_map_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_rule_identifier_to_handle_map_v01,
+			filter_spec_identifier),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_rule_identifier_to_handle_map_v01,
+			filter_handle),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_filter_handle_to_index_map_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_handle_to_index_map_v01,
+			filter_handle),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_handle_to_index_map_v01,
+			filter_index),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_init_modem_driver_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			platform_type_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			platform_type),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hdr_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_hdr_tbl_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hdr_tbl_info),
+		.ei_array	= ipa3_hdr_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_route_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_route_tbl_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_route_tbl_info),
+		.ei_array	= ipa3_route_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_route_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_route_tbl_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_route_tbl_info),
+		.ei_array	= ipa3_route_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_filter_tbl_start_addr_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_filter_tbl_start_addr),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_filter_tbl_start_addr_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_filter_tbl_start_addr),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			modem_mem_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_modem_mem_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			modem_mem_info),
+		.ei_array	= ipa3_modem_mem_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			ctrl_comm_dest_end_pt_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			ctrl_comm_dest_end_pt),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			is_ssr_bootup_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			is_ssr_bootup),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hdr_proc_ctx_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(
+			struct ipa_hdr_proc_ctx_tbl_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hdr_proc_ctx_tbl_info),
+		.ei_array	= ipa3_hdr_proc_ctx_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1A,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			zip_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_zip_tbl_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1A,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			zip_tbl_info),
+		.ei_array	= ipa3_zip_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1B,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_hash_route_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_route_tbl_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1B,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_hash_route_tbl_info),
+		.ei_array	= ipa3_route_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1C,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_hash_route_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_route_tbl_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1C,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_hash_route_tbl_info),
+		.ei_array	= ipa3_route_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1D,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_hash_filter_tbl_start_addr_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1D,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_hash_filter_tbl_start_addr),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1E,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_hash_filter_tbl_start_addr_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1E,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_hash_filter_tbl_start_addr),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_init_modem_driver_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			ctrl_comm_dest_end_pt_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			ctrl_comm_dest_end_pt),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			default_end_pt_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			default_end_pt),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			modem_driver_init_pending_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			modem_driver_init_pending),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_cmplt_req_msg_v01,
+			status),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_cmplt_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_indication_reg_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+			master_driver_init_complete_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+			master_driver_init_complete),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+			data_usage_quota_reached_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+			data_usage_quota_reached),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_indication_reg_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_indication_reg_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_master_driver_init_complt_ind_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(struct
+			ipa_master_driver_init_complt_ind_msg_v01,
+			master_driver_init_status),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_install_fltr_rule_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(struct ipa_filter_spec_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_list),
+		.ei_array	= ipa_filter_spec_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			source_pipe_index_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			source_pipe_index),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			num_ipv4_filters_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			num_ipv4_filters),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			num_ipv6_filters_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			num_ipv6_filters),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			xlat_filter_indices_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			xlat_filter_indices_list_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			xlat_filter_indices_list),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_ex_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_ex_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(struct ipa_filter_spec_ex_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_ex_list),
+		.ei_array	= ipa_filter_spec_ex_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_install_fltr_rule_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			resp),
+		.ei_array       = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			filter_handle_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			filter_handle_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(
+			struct ipa_filter_rule_identifier_to_handle_map_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			filter_handle_list),
+		.ei_array	=
+			ipa3_filter_rule_identifier_to_handle_map_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			rule_id_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			rule_id_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			rule_id),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			source_pipe_index),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			install_status),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x03,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			filter_index_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(
+			struct ipa_filter_handle_to_index_map_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x03,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			filter_index_list),
+		.ei_array	= ipa3_filter_handle_to_index_map_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			embedded_pipe_index_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			embedded_pipe_index),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			retain_header_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			retain_header),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			embedded_call_mux_id_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			embedded_call_mux_id),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			num_ipv4_filters_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			num_ipv4_filters),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			num_ipv6_filters_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			num_ipv6_filters),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			start_ipv4_filter_idx_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			start_ipv4_filter_idx),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			start_ipv6_filter_idx_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			start_ipv6_filter_idx),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			rule_id_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			rule_id_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			rule_id),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_fltr_installed_notif_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_enable_force_clear_datapath_req_msg_v01,
+			source_pipe_bitmask),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_enable_force_clear_datapath_req_msg_v01,
+			request_id),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_enable_force_clear_datapath_req_msg_v01,
+			throttle_source_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_enable_force_clear_datapath_req_msg_v01,
+			throttle_source),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_enable_force_clear_datapath_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_disable_force_clear_datapath_req_msg_v01,
+			request_id),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_disable_force_clear_datapath_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_config_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			peripheral_type_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			peripheral_type),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			hw_deaggr_supported_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			hw_deaggr_supported),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			max_aggr_frame_size_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+					max_aggr_frame_size),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ipa_ingress_pipe_mode_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ipa_ingress_pipe_mode),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			peripheral_speed_info_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			peripheral_speed_info),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_time_limit_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_time_limit),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_pkt_limit_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_pkt_limit),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_byte_limit_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_byte_limit),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_accumulation_time_limit_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_accumulation_time_limit),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			hw_control_flags_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			hw_control_flags),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1A,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_msi_event_threshold_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1A,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_msi_event_threshold),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1B,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_msi_event_threshold_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1B,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_msi_event_threshold),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1C,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_fifo_size_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1C,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_fifo_size),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1D,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_fifo_size_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1D,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_fifo_size),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1E,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_buf_size_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1E,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_buf_size),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_config_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_config_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_get_data_stats_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_req_msg_v01,
+			ipa_stats_type),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_req_msg_v01,
+			reset_stats_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_req_msg_v01,
+			reset_stats),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_pipe_stats_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_pipe_stats_info_type_v01,
+					pipe_index),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_pipe_stats_info_type_v01,
+					num_ipv4_packets),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_pipe_stats_info_type_v01,
+					num_ipv4_bytes),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_pipe_stats_info_type_v01,
+					num_ipv6_packets),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_pipe_stats_info_type_v01,
+					num_ipv6_bytes),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_stats_type_filter_rule_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_stats_type_filter_rule_v01,
+					filter_rule_index),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_stats_type_filter_rule_v01,
+					num_packets),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_get_data_stats_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			ipa_stats_type_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			ipa_stats_type),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			ul_src_pipe_stats_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			ul_src_pipe_stats_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_PIPES_V01,
+		.elem_size	= sizeof(struct ipa_pipe_stats_info_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			ul_src_pipe_stats_list),
+		.ei_array	= ipa3_pipe_stats_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_dst_pipe_stats_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_dst_pipe_stats_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_PIPES_V01,
+		.elem_size	= sizeof(struct ipa_pipe_stats_info_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_dst_pipe_stats_list),
+		.ei_array	= ipa3_pipe_stats_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_filter_rule_stats_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_filter_rule_stats_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(struct ipa_pipe_stats_info_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_filter_rule_stats_list),
+		.ei_array	= ipa3_stats_type_filter_rule_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_apn_data_stats_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_apn_data_stats_info_type_v01,
+					mux_id),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_apn_data_stats_info_type_v01,
+					num_ul_packets),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_apn_data_stats_info_type_v01,
+					num_ul_bytes),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_apn_data_stats_info_type_v01,
+					num_dl_packets),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_apn_data_stats_info_type_v01,
+					num_dl_bytes),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_get_apn_data_stats_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_req_msg_v01,
+			mux_id_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_req_msg_v01,
+			mux_id_list_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_APN_V01,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_req_msg_v01,
+			mux_id_list),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_get_apn_data_stats_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_resp_msg_v01,
+			apn_data_stats_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_resp_msg_v01,
+			apn_data_stats_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_APN_V01,
+		.elem_size	= sizeof(struct
+					ipa_apn_data_stats_info_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_resp_msg_v01,
+			apn_data_stats_list),
+		.ei_array	= ipa3_apn_data_stats_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_data_usage_quota_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_data_usage_quota_info_type_v01,
+					mux_id),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_data_usage_quota_info_type_v01,
+					num_Mbytes),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_set_data_usage_quota_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_set_data_usage_quota_req_msg_v01,
+			apn_quota_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_set_data_usage_quota_req_msg_v01,
+			apn_quota_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_APN_V01,
+		.elem_size	= sizeof(struct
+					ipa_data_usage_quota_info_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_set_data_usage_quota_req_msg_v01,
+			apn_quota_list),
+		.ei_array	= ipa3_data_usage_quota_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_set_data_usage_quota_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_set_data_usage_quota_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct
+					ipa_data_usage_quota_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_data_usage_quota_reached_ind_msg_v01,
+			apn),
+		.ei_array	= ipa3_data_usage_quota_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_stop_data_usage_quota_req_msg_data_v01_ei[] = {
+	/* ipa_stop_data_usage_quota_req_msg is empty */
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_stop_data_usage_quota_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
new file mode 100644
index 0000000..8930d92
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -0,0 +1,1792 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/idr.h>
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+
+#define IPA_RT_INDEX_BITMAP_SIZE	(32)
+#define IPA_RT_STATUS_OF_ADD_FAILED	(-1)
+#define IPA_RT_STATUS_OF_DEL_FAILED	(-1)
+#define IPA_RT_STATUS_OF_MDFY_FAILED (-1)
+
+#define IPA_RT_GET_RULE_TYPE(__entry) \
+	( \
+	((__entry)->rule.hashable) ? \
+	(IPA_RULE_HASHABLE) : (IPA_RULE_NON_HASHABLE) \
+	)
+
+/**
+ * ipa_generate_rt_hw_rule() - Generated the RT H/W single rule
+ *  This func will do the preparation core driver work and then calls
+ *  the HAL layer for the real work.
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means
+ *	caller wants to know the size of the rule as seen
+ *	by HW so they did not pass a valid buffer, we will use a
+ *	scratch buffer instead.
+ *	With this scheme we are going to
+ *	generate the rule twice, once to know size using scratch
+ *	buffer and second to write the rule to the actual caller
+ *	supplied buffer which is of required size
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ */
+static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip,
+	struct ipa3_rt_entry *entry, u8 *buf)
+{
+	struct ipahal_rt_rule_gen_params gen_params;
+	int res = 0;
+
+	memset(&gen_params, 0, sizeof(gen_params));
+
+	gen_params.ipt = ip;
+	gen_params.dst_pipe_idx = ipa3_get_ep_mapping(entry->rule.dst);
+	if (gen_params.dst_pipe_idx == -1) {
+		IPAERR("Wrong destination pipe specified in RT rule\n");
+		WARN_ON(1);
+		return -EPERM;
+	}
+	if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) {
+		IPAERR("No RT rule on IPA_client_producer pipe.\n");
+		IPAERR("pipe_idx: %d dst_pipe: %d\n",
+				gen_params.dst_pipe_idx, entry->rule.dst);
+		WARN_ON(1);
+		return -EPERM;
+	}
+
+	if (entry->proc_ctx || (entry->hdr && entry->hdr->is_hdr_proc_ctx)) {
+		struct ipa3_hdr_proc_ctx_entry *proc_ctx;
+
+		proc_ctx = (entry->proc_ctx) ? : entry->hdr->proc_ctx;
+		gen_params.hdr_lcl = ipa3_ctx->hdr_proc_ctx_tbl_lcl;
+		gen_params.hdr_type = IPAHAL_RT_RULE_HDR_PROC_CTX;
+		gen_params.hdr_ofst = proc_ctx->offset_entry->offset +
+			ipa3_ctx->hdr_proc_ctx_tbl.start_offset;
+	} else if (entry->hdr) {
+		gen_params.hdr_lcl = ipa3_ctx->hdr_tbl_lcl;
+		gen_params.hdr_type = IPAHAL_RT_RULE_HDR_RAW;
+		gen_params.hdr_ofst = entry->hdr->offset_entry->offset;
+	} else {
+		gen_params.hdr_type = IPAHAL_RT_RULE_HDR_NONE;
+		gen_params.hdr_ofst = 0;
+	}
+
+	gen_params.priority = entry->prio;
+	gen_params.id = entry->rule_id;
+	gen_params.rule = (const struct ipa_rt_rule *)&entry->rule;
+
+	res = ipahal_rt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
+	if (res)
+		IPAERR("failed to generate rt h/w rule\n");
+
+	return res;
+}
+
+/**
+ * ipa_translate_rt_tbl_to_hw_fmt() - translate the routing driver structures
+ *  (rules and tables) to HW format and fill it in the given buffers
+ * @ip: the ip address family type
+ * @rlt: the type of the rules to translate (hashable or non-hashable)
+ * @base: the rules body buffer to be filled
+ * @hdr: the rules header (addresses/offsets) buffer to be filled
+ * @body_ofst: the offset of the rules body from the rules header at
+ *  ipa sram (for local body usage)
+ * @apps_start_idx: the first rt table index of apps tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_translate_rt_tbl_to_hw_fmt(enum ipa_ip_type ip,
+	enum ipa_rule_type rlt, u8 *base, u8 *hdr,
+	u32 body_ofst, u32 apps_start_idx)
+{
+	struct ipa3_rt_tbl_set *set;
+	struct ipa3_rt_tbl *tbl;
+	struct ipa_mem_buffer tbl_mem;
+	u8 *tbl_mem_buf;
+	struct ipa3_rt_entry *entry;
+	int res;
+	u64 offset;
+	u8 *body_i;
+
+	set = &ipa3_ctx->rt_tbl_set[ip];
+	body_i = base;
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		if (tbl->sz[rlt] == 0)
+			continue;
+		if (tbl->in_sys[rlt]) {
+			/* only body (no header) */
+			tbl_mem.size = tbl->sz[rlt] -
+				ipahal_get_hw_tbl_hdr_width();
+			if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) {
+				IPAERR("fail to alloc sys tbl of size %d\n",
+					tbl_mem.size);
+				goto err;
+			}
+
+			if (ipahal_fltrt_write_addr_to_hdr(tbl_mem.phys_base,
+				hdr, tbl->idx - apps_start_idx, true)) {
+				IPAERR("fail to wrt sys tbl addr to hdr\n");
+				goto hdr_update_fail;
+			}
+
+			tbl_mem_buf = tbl_mem.base;
+
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_rt_rule_list,
+					link) {
+				if (IPA_RT_GET_RULE_TYPE(entry) != rlt)
+					continue;
+				res = ipa_generate_rt_hw_rule(ip, entry,
+					tbl_mem_buf);
+				if (res) {
+					IPAERR("failed to gen HW RT rule\n");
+					goto hdr_update_fail;
+				}
+				tbl_mem_buf += entry->hw_len;
+			}
+
+			if (tbl->curr_mem[rlt].phys_base) {
+				WARN_ON(tbl->prev_mem[rlt].phys_base);
+				tbl->prev_mem[rlt] = tbl->curr_mem[rlt];
+			}
+			tbl->curr_mem[rlt] = tbl_mem;
+		} else {
+			offset = body_i - base + body_ofst;
+
+			/* update the hdr at the right index */
+			if (ipahal_fltrt_write_addr_to_hdr(offset, hdr,
+				tbl->idx - apps_start_idx, true)) {
+				IPAERR("fail to wrt lcl tbl ofst to hdr\n");
+				goto hdr_update_fail;
+			}
+
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_rt_rule_list,
+					link) {
+				if (IPA_RT_GET_RULE_TYPE(entry) != rlt)
+					continue;
+				res = ipa_generate_rt_hw_rule(ip, entry,
+					body_i);
+				if (res) {
+					IPAERR("failed to gen HW RT rule\n");
+					goto err;
+				}
+				body_i += entry->hw_len;
+			}
+
+			/**
+			 * advance body_i to next table alignment as local
+			 * tables
+			 * are order back-to-back
+			 */
+			body_i += ipahal_get_lcl_tbl_addr_alignment();
+			body_i = (u8 *)((long)body_i &
+				~ipahal_get_lcl_tbl_addr_alignment());
+		}
+	}
+
+	return 0;
+
+hdr_update_fail:
+	ipahal_free_dma_mem(&tbl_mem);
+err:
+	return -EPERM;
+}
+
+static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
+{
+	struct ipa3_rt_tbl *tbl;
+	struct ipa3_rt_tbl *next;
+	struct ipa3_rt_tbl_set *set;
+	int i;
+
+	set = &ipa3_ctx->rt_tbl_set[ip];
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		for (i = 0; i < IPA_RULE_TYPE_MAX; i++) {
+			if (tbl->prev_mem[i].phys_base) {
+				IPADBG_LOW(
+				"reaping sys rt tbl name=%s ip=%d rlt=%d\n",
+				tbl->name, ip, i);
+				ipahal_free_dma_mem(&tbl->prev_mem[i]);
+				memset(&tbl->prev_mem[i], 0,
+					sizeof(tbl->prev_mem[i]));
+			}
+		}
+	}
+
+	set = &ipa3_ctx->reap_rt_tbl_set[ip];
+	list_for_each_entry_safe(tbl, next, &set->head_rt_tbl_list, link) {
+		for (i = 0; i < IPA_RULE_TYPE_MAX; i++) {
+			WARN_ON(tbl->prev_mem[i].phys_base != 0);
+			if (tbl->curr_mem[i].phys_base) {
+				IPADBG_LOW(
+				"reaping sys rt tbl name=%s ip=%d rlt=%d\n",
+				tbl->name, ip, i);
+				ipahal_free_dma_mem(&tbl->curr_mem[i]);
+			}
+		}
+		list_del(&tbl->link);
+		kmem_cache_free(ipa3_ctx->rt_tbl_cache, tbl);
+	}
+}
+
+/**
+ * ipa_prep_rt_tbl_for_cmt() - preparing the rt table for commit
+ *  assign priorities to the rules, calculate their sizes and calculate
+ *  the overall table size
+ * @ip: the ip address family type
+ * @tbl: the rt tbl to be prepared
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_prep_rt_tbl_for_cmt(enum ipa_ip_type ip,
+	struct ipa3_rt_tbl *tbl)
+{
+	struct ipa3_rt_entry *entry;
+	int prio_i;
+	int res;
+	int max_prio;
+	u32 hdr_width;
+
+	tbl->sz[IPA_RULE_HASHABLE] = 0;
+	tbl->sz[IPA_RULE_NON_HASHABLE] = 0;
+
+	max_prio = ipahal_get_rule_max_priority();
+
+	prio_i = max_prio;
+	list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+
+		if (entry->rule.max_prio) {
+			entry->prio = max_prio;
+		} else {
+			if (ipahal_rule_decrease_priority(&prio_i)) {
+				IPAERR("cannot rule decrease priority - %d\n",
+					prio_i);
+				return -EPERM;
+			}
+			entry->prio = prio_i;
+		}
+
+		res = ipa_generate_rt_hw_rule(ip, entry, NULL);
+		if (res) {
+			IPAERR("failed to calculate HW RT rule size\n");
+			return -EPERM;
+		}
+
+		IPADBG("RT rule id (handle) %d hw_len %u priority %u\n",
+			entry->id, entry->hw_len, entry->prio);
+
+		if (entry->rule.hashable)
+			tbl->sz[IPA_RULE_HASHABLE] += entry->hw_len;
+		else
+			tbl->sz[IPA_RULE_NON_HASHABLE] += entry->hw_len;
+	}
+
+	if ((tbl->sz[IPA_RULE_HASHABLE] +
+		tbl->sz[IPA_RULE_NON_HASHABLE]) == 0) {
+		WARN_ON(1);
+		IPAERR("rt tbl %s is with zero total size\n", tbl->name);
+	}
+
+	hdr_width = ipahal_get_hw_tbl_hdr_width();
+
+	if (tbl->sz[IPA_RULE_HASHABLE])
+		tbl->sz[IPA_RULE_HASHABLE] += hdr_width;
+	if (tbl->sz[IPA_RULE_NON_HASHABLE])
+		tbl->sz[IPA_RULE_NON_HASHABLE] += hdr_width;
+
+	IPADBG("RT tbl index %u hash_sz %u non-hash sz %u\n", tbl->idx,
+		tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]);
+
+	return 0;
+}
+
+/**
+ * ipa_generate_rt_hw_tbl_img() - generates the rt hw tbls.
+ *  headers and bodies (sys bodies) are being created into buffers that will
+ *  be filled into the local memory (sram)
+ * @ip: the ip address family type
+ * @alloc_params: IN/OUT parameters to hold info regard the tables headers
+ *  and bodies on DDR (DMA buffers), and needed info for the allocation
+ *  that the HAL needs
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_generate_rt_hw_tbl_img(enum ipa_ip_type ip,
+	struct ipahal_fltrt_alloc_imgs_params *alloc_params)
+{
+	u32 hash_bdy_start_ofst, nhash_bdy_start_ofst;
+	u32 apps_start_idx;
+	int rc = 0;
+
+	if (ip == IPA_IP_v4) {
+		nhash_bdy_start_ofst = IPA_MEM_PART(apps_v4_rt_nhash_ofst) -
+			IPA_MEM_PART(v4_rt_nhash_ofst);
+		hash_bdy_start_ofst = IPA_MEM_PART(apps_v4_rt_hash_ofst) -
+			IPA_MEM_PART(v4_rt_hash_ofst);
+		apps_start_idx = IPA_MEM_PART(v4_apps_rt_index_lo);
+	} else {
+		nhash_bdy_start_ofst = IPA_MEM_PART(apps_v6_rt_nhash_ofst) -
+			IPA_MEM_PART(v6_rt_nhash_ofst);
+		hash_bdy_start_ofst = IPA_MEM_PART(apps_v6_rt_hash_ofst) -
+			IPA_MEM_PART(v6_rt_hash_ofst);
+		apps_start_idx = IPA_MEM_PART(v6_apps_rt_index_lo);
+	}
+
+	if (ipahal_fltrt_allocate_hw_tbl_imgs(alloc_params)) {
+		IPAERR("fail to allocate RT HW TBL images. IP %d\n", ip);
+		rc = -ENOMEM;
+		goto allocate_fail;
+	}
+
+	if (ipa_translate_rt_tbl_to_hw_fmt(ip, IPA_RULE_HASHABLE,
+		alloc_params->hash_bdy.base, alloc_params->hash_hdr.base,
+		hash_bdy_start_ofst, apps_start_idx)) {
+		IPAERR("fail to translate hashable rt tbls to hw format\n");
+		rc = -EPERM;
+		goto translate_fail;
+	}
+	if (ipa_translate_rt_tbl_to_hw_fmt(ip, IPA_RULE_NON_HASHABLE,
+		alloc_params->nhash_bdy.base, alloc_params->nhash_hdr.base,
+		nhash_bdy_start_ofst, apps_start_idx)) {
+		IPAERR("fail to translate non-hashable rt tbls to hw format\n");
+		rc = -EPERM;
+		goto translate_fail;
+	}
+
+	return rc;
+
+translate_fail:
+	if (alloc_params->hash_hdr.size)
+		ipahal_free_dma_mem(&alloc_params->hash_hdr);
+	ipahal_free_dma_mem(&alloc_params->nhash_hdr);
+	if (alloc_params->hash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params->hash_bdy);
+	if (alloc_params->nhash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params->nhash_bdy);
+allocate_fail:
+	return rc;
+}
+
+/**
+ * ipa_rt_valid_lcl_tbl_size() - validate if the space allocated for rt tbl
+ *  bodies at the sram is enough for the commit
+ * @ipt: the ip address family type
+ * @rlt: the rule type (hashable or non-hashable)
+ *
+ * Return: true if enough space available or false in other cases
+ */
+static bool ipa_rt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
+	enum ipa_rule_type rlt, struct ipa_mem_buffer *bdy)
+{
+	u16 avail;
+
+	if (ipt == IPA_IP_v4)
+		avail = (rlt == IPA_RULE_HASHABLE) ?
+			IPA_MEM_PART(apps_v4_rt_hash_size) :
+			IPA_MEM_PART(apps_v4_rt_nhash_size);
+	else
+		avail = (rlt == IPA_RULE_HASHABLE) ?
+			IPA_MEM_PART(apps_v6_rt_hash_size) :
+			IPA_MEM_PART(apps_v6_rt_nhash_size);
+
+	if (bdy->size <= avail)
+		return true;
+
+	IPAERR("tbl too big, needed %d avail %d ipt %d rlt %d\n",
+		bdy->size, avail, ipt, rlt);
+	return false;
+}
+
+/**
+ * __ipa_commit_rt_v3() - commit rt tables to the hw
+ * commit the headers and the bodies if are local with internal cache flushing
+ * @ipt: the ip address family type
+ *
+ * Return: 0 on success, negative on failure
+ */
+int __ipa_commit_rt_v3(enum ipa_ip_type ip)
+{
+	struct ipa3_desc desc[5];
+	struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
+	struct ipahal_imm_cmd_dma_shared_mem  mem_cmd = {0};
+	struct ipahal_imm_cmd_pyld *cmd_pyld[5];
+	int num_cmd = 0;
+	struct ipahal_fltrt_alloc_imgs_params alloc_params;
+	u32 num_modem_rt_index;
+	int rc = 0;
+	u32 lcl_hash_hdr, lcl_nhash_hdr;
+	u32 lcl_hash_bdy, lcl_nhash_bdy;
+	bool lcl_hash, lcl_nhash;
+	struct ipahal_reg_fltrt_hash_flush flush;
+	struct ipahal_reg_valmask valmask;
+	int i;
+	struct ipa3_rt_tbl_set *set;
+	struct ipa3_rt_tbl *tbl;
+	u32 tbl_hdr_width;
+
+	tbl_hdr_width = ipahal_get_hw_tbl_hdr_width();
+	memset(desc, 0, sizeof(desc));
+	memset(cmd_pyld, 0, sizeof(cmd_pyld));
+	memset(&alloc_params, 0, sizeof(alloc_params));
+	alloc_params.ipt = ip;
+
+	if (ip == IPA_IP_v4) {
+		num_modem_rt_index =
+			IPA_MEM_PART(v4_modem_rt_index_hi) -
+			IPA_MEM_PART(v4_modem_rt_index_lo) + 1;
+		lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v4_rt_hash_ofst) +
+			num_modem_rt_index * tbl_hdr_width;
+		lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v4_rt_nhash_ofst) +
+			num_modem_rt_index * tbl_hdr_width;
+		lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v4_rt_hash_ofst);
+		lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v4_rt_nhash_ofst);
+		lcl_hash = ipa3_ctx->ip4_rt_tbl_hash_lcl;
+		lcl_nhash = ipa3_ctx->ip4_rt_tbl_nhash_lcl;
+		alloc_params.tbls_num = IPA_MEM_PART(v4_apps_rt_index_hi) -
+			IPA_MEM_PART(v4_apps_rt_index_lo) + 1;
+	} else {
+		num_modem_rt_index =
+			IPA_MEM_PART(v6_modem_rt_index_hi) -
+			IPA_MEM_PART(v6_modem_rt_index_lo) + 1;
+		lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v6_rt_hash_ofst) +
+			num_modem_rt_index * tbl_hdr_width;
+		lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v6_rt_nhash_ofst) +
+			num_modem_rt_index * tbl_hdr_width;
+		lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v6_rt_hash_ofst);
+		lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v6_rt_nhash_ofst);
+		lcl_hash = ipa3_ctx->ip6_rt_tbl_hash_lcl;
+		lcl_nhash = ipa3_ctx->ip6_rt_tbl_nhash_lcl;
+		alloc_params.tbls_num = IPA_MEM_PART(v6_apps_rt_index_hi) -
+			IPA_MEM_PART(v6_apps_rt_index_lo) + 1;
+	}
+
+	if (!ipa3_ctx->rt_idx_bitmap[ip]) {
+		IPAERR("no rt tbls present\n");
+		rc = -EPERM;
+		goto no_rt_tbls;
+	}
+
+	set = &ipa3_ctx->rt_tbl_set[ip];
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		if (ipa_prep_rt_tbl_for_cmt(ip, tbl)) {
+			rc = -EPERM;
+			goto no_rt_tbls;
+		}
+		if (!tbl->in_sys[IPA_RULE_HASHABLE] &&
+			tbl->sz[IPA_RULE_HASHABLE]) {
+			alloc_params.num_lcl_hash_tbls++;
+			alloc_params.total_sz_lcl_hash_tbls +=
+				tbl->sz[IPA_RULE_HASHABLE];
+			alloc_params.total_sz_lcl_hash_tbls -= tbl_hdr_width;
+		}
+		if (!tbl->in_sys[IPA_RULE_NON_HASHABLE] &&
+			tbl->sz[IPA_RULE_NON_HASHABLE]) {
+			alloc_params.num_lcl_nhash_tbls++;
+			alloc_params.total_sz_lcl_nhash_tbls +=
+				tbl->sz[IPA_RULE_NON_HASHABLE];
+			alloc_params.total_sz_lcl_nhash_tbls -= tbl_hdr_width;
+		}
+	}
+
+	if (ipa_generate_rt_hw_tbl_img(ip, &alloc_params)) {
+		IPAERR("fail to generate RT HW TBL images. IP %d\n", ip);
+		rc = -EFAULT;
+		goto no_rt_tbls;
+	}
+
+	if (!ipa_rt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE,
+		&alloc_params.hash_bdy)) {
+		rc = -EFAULT;
+		goto fail_size_valid;
+	}
+	if (!ipa_rt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE,
+		&alloc_params.nhash_bdy)) {
+		rc = -EFAULT;
+		goto fail_size_valid;
+	}
+
+	/* flushing ipa internal hashable rt rules cache */
+	memset(&flush, 0, sizeof(flush));
+	if (ip == IPA_IP_v4)
+		flush.v4_rt = true;
+	else
+		flush.v6_rt = true;
+	ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
+	reg_write_cmd.skip_pipeline_clear = false;
+	reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
+	reg_write_cmd.value = valmask.val;
+	reg_write_cmd.value_mask = valmask.mask;
+	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_REGISTER_WRITE, &reg_write_cmd, false);
+	if (!cmd_pyld[num_cmd]) {
+		IPAERR("fail construct register_write imm cmd. IP %d\n", ip);
+		goto fail_size_valid;
+	}
+	desc[num_cmd].opcode =
+		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+	desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+	desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	num_cmd++;
+
+	mem_cmd.is_read = false;
+	mem_cmd.skip_pipeline_clear = false;
+	mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	mem_cmd.size = alloc_params.nhash_hdr.size;
+	mem_cmd.system_addr = alloc_params.nhash_hdr.phys_base;
+	mem_cmd.local_addr = lcl_nhash_hdr;
+	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+	if (!cmd_pyld[num_cmd]) {
+		IPAERR("fail construct dma_shared_mem imm cmd. IP %d\n", ip);
+		goto fail_imm_cmd_construct;
+	}
+	desc[num_cmd].opcode =
+		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+	desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+	desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	num_cmd++;
+
+	mem_cmd.is_read = false;
+	mem_cmd.skip_pipeline_clear = false;
+	mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	mem_cmd.size = alloc_params.hash_hdr.size;
+	mem_cmd.system_addr = alloc_params.hash_hdr.phys_base;
+	mem_cmd.local_addr = lcl_hash_hdr;
+	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+	if (!cmd_pyld[num_cmd]) {
+		IPAERR("fail construct dma_shared_mem imm cmd. IP %d\n", ip);
+		goto fail_imm_cmd_construct;
+	}
+	desc[num_cmd].opcode =
+		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+	desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+	desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	num_cmd++;
+
+	if (lcl_nhash) {
+		mem_cmd.is_read = false;
+		mem_cmd.skip_pipeline_clear = false;
+		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		mem_cmd.size = alloc_params.nhash_bdy.size;
+		mem_cmd.system_addr = alloc_params.nhash_bdy.phys_base;
+		mem_cmd.local_addr = lcl_nhash_bdy;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("fail construct dma_shared_mem cmd. IP %d\n",
+				ip);
+			goto fail_imm_cmd_construct;
+		}
+		desc[num_cmd].opcode =
+			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+		desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+		desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+		desc[num_cmd].type = IPA_IMM_CMD_DESC;
+		num_cmd++;
+	}
+	if (lcl_hash) {
+		mem_cmd.is_read = false;
+		mem_cmd.skip_pipeline_clear = false;
+		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		mem_cmd.size = alloc_params.hash_bdy.size;
+		mem_cmd.system_addr = alloc_params.hash_bdy.phys_base;
+		mem_cmd.local_addr = lcl_hash_bdy;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("fail construct dma_shared_mem cmd. IP %d\n",
+				ip);
+			goto fail_imm_cmd_construct;
+		}
+		desc[num_cmd].opcode =
+			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+		desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+		desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+		desc[num_cmd].type = IPA_IMM_CMD_DESC;
+		num_cmd++;
+	}
+
+	if (ipa3_send_cmd(num_cmd, desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+		goto fail_imm_cmd_construct;
+	}
+
+	IPADBG("Hashable HEAD\n");
+	IPA_DUMP_BUFF(alloc_params.hash_hdr.base,
+		alloc_params.hash_hdr.phys_base, alloc_params.hash_hdr.size);
+
+	IPADBG("Non-Hashable HEAD\n");
+	IPA_DUMP_BUFF(alloc_params.nhash_hdr.base,
+		alloc_params.nhash_hdr.phys_base, alloc_params.nhash_hdr.size);
+
+	if (alloc_params.hash_bdy.size) {
+		IPADBG("Hashable BODY\n");
+		IPA_DUMP_BUFF(alloc_params.hash_bdy.base,
+			alloc_params.hash_bdy.phys_base,
+			alloc_params.hash_bdy.size);
+	}
+
+	if (alloc_params.nhash_bdy.size) {
+		IPADBG("Non-Hashable BODY\n");
+		IPA_DUMP_BUFF(alloc_params.nhash_bdy.base,
+			alloc_params.nhash_bdy.phys_base,
+			alloc_params.nhash_bdy.size);
+	}
+
+	__ipa_reap_sys_rt_tbls(ip);
+
+fail_imm_cmd_construct:
+	for (i = 0 ; i < num_cmd ; i++)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
+fail_size_valid:
+	if (alloc_params.hash_hdr.size)
+		ipahal_free_dma_mem(&alloc_params.hash_hdr);
+	ipahal_free_dma_mem(&alloc_params.nhash_hdr);
+	if (alloc_params.hash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params.hash_bdy);
+	if (alloc_params.nhash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params.nhash_bdy);
+
+no_rt_tbls:
+	return rc;
+}
+
+/**
+ * __ipa3_find_rt_tbl() - find the routing table
+ *			which name is given as parameter
+ * @ip:	[in] the ip address family type of the wanted routing table
+ * @name:	[in] the name of the wanted routing table
+ *
+ * Returns: the routing table which name is given as parameter, or NULL if it
+ * doesn't exist
+ */
+struct ipa3_rt_tbl *__ipa3_find_rt_tbl(enum ipa_ip_type ip, const char *name)
+{
+	struct ipa3_rt_tbl *entry;
+	struct ipa3_rt_tbl_set *set;
+
+	if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
+		IPAERR("Name too long: %s\n", name);
+		return NULL;
+	}
+
+	set = &ipa3_ctx->rt_tbl_set[ip];
+	list_for_each_entry(entry, &set->head_rt_tbl_list, link) {
+		if (!strcmp(name, entry->name))
+			return entry;
+	}
+
+	return NULL;
+}
+
+/**
+ * ipa3_query_rt_index() - find the routing table index
+ *			which name and ip type are given as parameters
+ * @in:	[out] the index of the wanted routing table
+ *
+ * Returns: the routing table which name is given as parameter, or NULL if it
+ * doesn't exist
+ */
+int ipa3_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in)
+{
+	struct ipa3_rt_tbl *entry;
+
+	if (in->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	/* check if this table exists */
+	entry = __ipa3_find_rt_tbl(in->ip, in->name);
+	if (!entry)
+		return -EFAULT;
+
+	in->idx  = entry->idx;
+	return 0;
+}
+
+static struct ipa3_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
+		const char *name)
+{
+	struct ipa3_rt_tbl *entry;
+	struct ipa3_rt_tbl_set *set;
+	int i;
+	int id;
+	int max_tbl_indx;
+
+	if (name == NULL) {
+		IPAERR("no tbl name\n");
+		goto error;
+	}
+
+	if (ip == IPA_IP_v4) {
+		max_tbl_indx =
+			max(IPA_MEM_PART(v4_modem_rt_index_hi),
+			IPA_MEM_PART(v4_apps_rt_index_hi));
+	} else if (ip == IPA_IP_v6) {
+		max_tbl_indx =
+			max(IPA_MEM_PART(v6_modem_rt_index_hi),
+			IPA_MEM_PART(v6_apps_rt_index_hi));
+	} else {
+		IPAERR("bad ip family type\n");
+		goto error;
+	}
+
+	set = &ipa3_ctx->rt_tbl_set[ip];
+	/* check if this table exists */
+	entry = __ipa3_find_rt_tbl(ip, name);
+	if (!entry) {
+		entry = kmem_cache_zalloc(ipa3_ctx->rt_tbl_cache, GFP_KERNEL);
+		if (!entry) {
+			IPAERR("failed to alloc RT tbl object\n");
+			goto error;
+		}
+		/* find a routing tbl index */
+		for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
+			if (!test_bit(i, &ipa3_ctx->rt_idx_bitmap[ip])) {
+				entry->idx = i;
+				set_bit(i, &ipa3_ctx->rt_idx_bitmap[ip]);
+				break;
+			}
+		}
+		if (i == IPA_RT_INDEX_BITMAP_SIZE) {
+			IPAERR("not free RT tbl indices left\n");
+			goto fail_rt_idx_alloc;
+		}
+		if (i > max_tbl_indx) {
+			IPAERR("rt tbl index is above max\n");
+			goto fail_rt_idx_alloc;
+		}
+
+		INIT_LIST_HEAD(&entry->head_rt_rule_list);
+		INIT_LIST_HEAD(&entry->link);
+		strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
+		entry->set = set;
+		entry->cookie = IPA_COOKIE;
+		entry->in_sys[IPA_RULE_HASHABLE] = (ip == IPA_IP_v4) ?
+			!ipa3_ctx->ip4_rt_tbl_hash_lcl :
+			!ipa3_ctx->ip6_rt_tbl_hash_lcl;
+		entry->in_sys[IPA_RULE_NON_HASHABLE] = (ip == IPA_IP_v4) ?
+			!ipa3_ctx->ip4_rt_tbl_nhash_lcl :
+			!ipa3_ctx->ip6_rt_tbl_nhash_lcl;
+		set->tbl_cnt++;
+		idr_init(&entry->rule_ids);
+		list_add(&entry->link, &set->head_rt_tbl_list);
+
+		IPADBG("add rt tbl idx=%d tbl_cnt=%d ip=%d\n", entry->idx,
+				set->tbl_cnt, ip);
+
+		id = ipa3_id_alloc(entry);
+		if (id < 0) {
+			IPAERR("failed to add to tree\n");
+			WARN_ON(1);
+		}
+		entry->id = id;
+	}
+
+	return entry;
+
+fail_rt_idx_alloc:
+	entry->cookie = 0;
+	kmem_cache_free(ipa3_ctx->rt_tbl_cache, entry);
+error:
+	return NULL;
+}
+
+static int __ipa_del_rt_tbl(struct ipa3_rt_tbl *entry)
+{
+	enum ipa_ip_type ip = IPA_IP_MAX;
+	u32 id;
+	struct ipa3_rt_tbl_set *rset;
+
+	if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
+		IPAERR("bad parms\n");
+		return -EINVAL;
+	}
+	id = entry->id;
+	if (ipa3_id_find(id) == NULL) {
+		IPAERR("lookup failed\n");
+		return -EPERM;
+	}
+
+	if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v4])
+		ip = IPA_IP_v4;
+	else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6])
+		ip = IPA_IP_v6;
+	else
+		WARN_ON(1);
+
+	rset = &ipa3_ctx->reap_rt_tbl_set[ip];
+
+	idr_destroy(&entry->rule_ids);
+	if (entry->in_sys[IPA_RULE_HASHABLE] ||
+		entry->in_sys[IPA_RULE_NON_HASHABLE]) {
+		list_move(&entry->link, &rset->head_rt_tbl_list);
+		clear_bit(entry->idx, &ipa3_ctx->rt_idx_bitmap[ip]);
+		entry->set->tbl_cnt--;
+		IPADBG("del sys rt tbl_idx=%d tbl_cnt=%d ip=%d\n",
+			entry->idx, entry->set->tbl_cnt, ip);
+	} else {
+		list_del(&entry->link);
+		clear_bit(entry->idx, &ipa3_ctx->rt_idx_bitmap[ip]);
+		entry->set->tbl_cnt--;
+		IPADBG("del rt tbl_idx=%d tbl_cnt=%d ip=%d\n",
+			entry->idx, entry->set->tbl_cnt, ip);
+		kmem_cache_free(ipa3_ctx->rt_tbl_cache, entry);
+	}
+
+	/* remove the handle from the database */
+	ipa3_id_remove(id);
+	return 0;
+}
+
+static int __ipa_rt_validate_hndls(const struct ipa_rt_rule *rule,
+				struct ipa3_hdr_entry **hdr,
+				struct ipa3_hdr_proc_ctx_entry **proc_ctx)
+{
+	if (rule->hdr_hdl && rule->hdr_proc_ctx_hdl) {
+		IPAERR("rule contains both hdr_hdl and hdr_proc_ctx_hdl\n");
+		return -EPERM;
+	}
+
+	if (rule->hdr_hdl) {
+		*hdr = ipa3_id_find(rule->hdr_hdl);
+		if ((*hdr == NULL) || ((*hdr)->cookie != IPA_COOKIE)) {
+			IPAERR("rt rule does not point to valid hdr\n");
+			return -EPERM;
+		}
+	} else if (rule->hdr_proc_ctx_hdl) {
+		*proc_ctx = ipa3_id_find(rule->hdr_proc_ctx_hdl);
+		if ((*proc_ctx == NULL) ||
+			((*proc_ctx)->cookie != IPA_COOKIE)) {
+
+			IPAERR("rt rule does not point to valid proc ctx\n");
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry,
+		const struct ipa_rt_rule *rule,
+		struct ipa3_rt_tbl *tbl, struct ipa3_hdr_entry *hdr,
+		struct ipa3_hdr_proc_ctx_entry *proc_ctx)
+{
+	int id;
+
+	*entry = kmem_cache_zalloc(ipa3_ctx->rt_rule_cache, GFP_KERNEL);
+	if (!*entry) {
+		IPAERR("failed to alloc RT rule object\n");
+		goto error;
+	}
+	INIT_LIST_HEAD(&(*entry)->link);
+	(*(entry))->cookie = IPA_COOKIE;
+	(*(entry))->rule = *rule;
+	(*(entry))->tbl = tbl;
+	(*(entry))->hdr = hdr;
+	(*(entry))->proc_ctx = proc_ctx;
+	id = ipa3_alloc_rule_id(&tbl->rule_ids);
+	if (id < 0) {
+		IPAERR("failed to allocate rule id\n");
+		WARN_ON(1);
+		goto alloc_rule_id_fail;
+	}
+	(*(entry))->rule_id = id;
+
+	return 0;
+
+alloc_rule_id_fail:
+	kmem_cache_free(ipa3_ctx->rt_rule_cache, *entry);
+error:
+	return -EPERM;
+}
+
+static int __ipa_finish_rt_rule_add(struct ipa3_rt_entry *entry, u32 *rule_hdl,
+		struct ipa3_rt_tbl *tbl)
+{
+	int id;
+
+	tbl->rule_cnt++;
+	if (entry->hdr)
+		entry->hdr->ref_cnt++;
+	else if (entry->proc_ctx)
+		entry->proc_ctx->ref_cnt++;
+	id = ipa3_id_alloc(entry);
+	if (id < 0) {
+		IPAERR("failed to add to tree\n");
+		WARN_ON(1);
+		goto ipa_insert_failed;
+	}
+	IPADBG("add rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n",
+		tbl->idx, tbl->rule_cnt, entry->rule_id);
+	*rule_hdl = id;
+	entry->id = id;
+
+	return 0;
+
+ipa_insert_failed:
+	if (entry->hdr)
+		entry->hdr->ref_cnt--;
+	else if (entry->proc_ctx)
+		entry->proc_ctx->ref_cnt--;
+	idr_remove(&tbl->rule_ids, entry->rule_id);
+	list_del(&entry->link);
+	kmem_cache_free(ipa3_ctx->rt_rule_cache, entry);
+	return -EPERM;
+}
+
+static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
+		const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl)
+{
+	struct ipa3_rt_tbl *tbl;
+	struct ipa3_rt_entry *entry;
+	struct ipa3_hdr_entry *hdr = NULL;
+	struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL;
+
+	if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx))
+		goto error;
+
+
+	tbl = __ipa_add_rt_tbl(ip, name);
+	if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
+		IPAERR("failed adding rt tbl name = %s\n",
+			name ? name : "");
+		goto error;
+	}
+	/*
+	 * do not allow any rules to be added at end of the "default" routing
+	 * tables
+	 */
+	if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
+	    (tbl->rule_cnt > 0) && (at_rear != 0)) {
+		IPAERR("cannot add rule at end of tbl rule_cnt=%d at_rear=%d\n",
+		       tbl->rule_cnt, at_rear);
+		goto error;
+	}
+
+	if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx))
+		goto error;
+
+	if (at_rear)
+		list_add_tail(&entry->link, &tbl->head_rt_rule_list);
+	else
+		list_add(&entry->link, &tbl->head_rt_rule_list);
+
+	if (__ipa_finish_rt_rule_add(entry, rule_hdl, tbl))
+		goto error;
+
+	return 0;
+
+error:
+	return -EPERM;
+}
+
+static int __ipa_add_rt_rule_after(struct ipa3_rt_tbl *tbl,
+		const struct ipa_rt_rule *rule, u32 *rule_hdl,
+		struct ipa3_rt_entry **add_after_entry)
+{
+	struct ipa3_rt_entry *entry;
+	struct ipa3_hdr_entry *hdr = NULL;
+	struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL;
+
+	if (!*add_after_entry)
+		goto error;
+
+	if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx))
+		goto error;
+
+	if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx))
+		goto error;
+
+	list_add(&entry->link, &((*add_after_entry)->link));
+
+	if (__ipa_finish_rt_rule_add(entry, rule_hdl, tbl))
+		goto error;
+
+	/*
+	 * prepare for next insertion
+	 */
+	*add_after_entry = entry;
+
+	return 0;
+
+error:
+	*add_after_entry = NULL;
+	return -EPERM;
+}
+
+/**
+ * ipa3_add_rt_rule() - Add the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @rules:	[inout] set of routing rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
+{
+	int i;
+	int ret;
+
+	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < rules->num_rules; i++) {
+		if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
+					&rules->rules[i].rule,
+					rules->rules[i].at_rear,
+					&rules->rules[i].rt_rule_hdl)) {
+			IPAERR("failed to add rt rule %d\n", i);
+			rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+		} else {
+			rules->rules[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+/**
+ * ipa3_add_rt_rule_after() - Add the given routing rules after the
+ * specified rule to SW and optionally commit to IPA HW
+ * @rules:	[inout] set of routing rules to add + handle where to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules)
+{
+	int i;
+	int ret = 0;
+	struct ipa3_rt_tbl *tbl = NULL;
+	struct ipa3_rt_entry *entry = NULL;
+
+	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	tbl = __ipa3_find_rt_tbl(rules->ip, rules->rt_tbl_name);
+	if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
+		IPAERR("failed finding rt tbl name = %s\n",
+			rules->rt_tbl_name ? rules->rt_tbl_name : "");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if (tbl->rule_cnt <= 0) {
+		IPAERR("tbl->rule_cnt <= 0");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	entry = ipa3_id_find(rules->add_after_hdl);
+	if (!entry) {
+		IPAERR("failed finding rule %d in rt tbls\n",
+			rules->add_after_hdl);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if (entry->tbl != tbl) {
+		IPAERR("given rt rule does not match the table\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	/*
+	 * do not allow any rules to be added at end of the "default" routing
+	 * tables
+	 */
+	if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
+			(&entry->link == tbl->head_rt_rule_list.prev)) {
+		IPAERR("cannot add rule at end of tbl rule_cnt=%d\n",
+			tbl->rule_cnt);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	/*
+	 * we add all rules one after the other, if one insertion fails, it cuts
+	 * the chain (all following will receive fail status) following calls to
+	 * __ipa_add_rt_rule_after will fail (entry == NULL)
+	 */
+
+	for (i = 0; i < rules->num_rules; i++) {
+		if (__ipa_add_rt_rule_after(tbl,
+					&rules->rules[i].rule,
+					&rules->rules[i].rt_rule_hdl,
+					&entry)) {
+			IPAERR("failed to add rt rule %d\n", i);
+			rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+		} else {
+			rules->rules[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
+			IPAERR("failed to commit\n");
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+	goto bail;
+
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+int __ipa3_del_rt_rule(u32 rule_hdl)
+{
+	struct ipa3_rt_entry *entry;
+	int id;
+
+	entry = ipa3_id_find(rule_hdl);
+
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		return -EINVAL;
+	}
+
+	if (entry->cookie != IPA_COOKIE) {
+		IPAERR("bad params\n");
+		return -EINVAL;
+	}
+
+	if (entry->hdr)
+		__ipa3_release_hdr(entry->hdr->id);
+	else if (entry->proc_ctx)
+		__ipa3_release_hdr_proc_ctx(entry->proc_ctx->id);
+	list_del(&entry->link);
+	entry->tbl->rule_cnt--;
+	IPADBG("del rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n",
+		entry->tbl->idx, entry->tbl->rule_cnt, entry->rule_id);
+	idr_remove(&entry->tbl->rule_ids, entry->rule_id);
+	if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
+		if (__ipa_del_rt_tbl(entry->tbl))
+			IPAERR("fail to del RT tbl\n");
+	}
+	entry->cookie = 0;
+	id = entry->id;
+	kmem_cache_free(ipa3_ctx->rt_rule_cache, entry);
+
+	/* remove the handle from the database */
+	ipa3_id_remove(id);
+
+	return 0;
+}
+
+/**
+ * ipa3_del_rt_rule() - Remove the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @hdls:	[inout] set of routing rules to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
+{
+	int i;
+	int ret;
+
+	if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa3_del_rt_rule(hdls->hdl[i].hdl)) {
+			IPAERR("failed to del rt rule %i\n", i);
+			hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(hdls->ip)) {
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+/**
+ * ipa_commit_rt_rule() - Commit the current SW routing table of specified type
+ * to IPA HW
+ * @ip:	The family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_commit_rt(enum ipa_ip_type ip)
+{
+	int ret;
+
+	if (ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * issue a commit on the filtering module of same IP type since
+	 * filtering rules point to routing tables
+	 */
+	if (ipa3_commit_flt(ip))
+		return -EPERM;
+
+	mutex_lock(&ipa3_ctx->lock);
+	if (ipa3_ctx->ctrl->ipa3_commit_rt(ip)) {
+		ret = -EPERM;
+		goto bail;
+	}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+/**
+ * ipa3_reset_rt() - reset the current SW routing table of specified type
+ * (does not commit to HW)
+ * @ip:	The family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_reset_rt(enum ipa_ip_type ip)
+{
+	struct ipa3_rt_tbl *tbl;
+	struct ipa3_rt_tbl *tbl_next;
+	struct ipa3_rt_tbl_set *set;
+	struct ipa3_rt_entry *rule;
+	struct ipa3_rt_entry *rule_next;
+	struct ipa3_rt_tbl_set *rset;
+	u32 apps_start_idx;
+	int id;
+
+	if (ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	if (ip == IPA_IP_v4)
+		apps_start_idx =
+			IPA_MEM_PART(v4_apps_rt_index_lo);
+	else
+		apps_start_idx =
+			IPA_MEM_PART(v6_apps_rt_index_lo);
+
+	/*
+	 * issue a reset on the filtering module of same IP type since
+	 * filtering rules point to routing tables
+	 */
+	if (ipa3_reset_flt(ip))
+		IPAERR("fail to reset flt ip=%d\n", ip);
+
+	set = &ipa3_ctx->rt_tbl_set[ip];
+	rset = &ipa3_ctx->reap_rt_tbl_set[ip];
+	mutex_lock(&ipa3_ctx->lock);
+	IPADBG("reset rt ip=%d\n", ip);
+	list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) {
+		list_for_each_entry_safe(rule, rule_next,
+					 &tbl->head_rt_rule_list, link) {
+			if (ipa3_id_find(rule->id) == NULL) {
+				WARN_ON(1);
+				mutex_unlock(&ipa3_ctx->lock);
+				return -EFAULT;
+			}
+
+			/*
+			 * for the "default" routing tbl, remove all but the
+			 *  last rule
+			 */
+			if (tbl->idx == apps_start_idx && tbl->rule_cnt == 1)
+				continue;
+
+			list_del(&rule->link);
+			tbl->rule_cnt--;
+			if (rule->hdr)
+				__ipa3_release_hdr(rule->hdr->id);
+			else if (rule->proc_ctx)
+				__ipa3_release_hdr_proc_ctx(rule->proc_ctx->id);
+			rule->cookie = 0;
+			idr_remove(&tbl->rule_ids, rule->rule_id);
+			id = rule->id;
+			kmem_cache_free(ipa3_ctx->rt_rule_cache, rule);
+
+			/* remove the handle from the database */
+			ipa3_id_remove(id);
+		}
+
+		if (ipa3_id_find(tbl->id) == NULL) {
+			WARN_ON(1);
+			mutex_unlock(&ipa3_ctx->lock);
+			return -EFAULT;
+		}
+		id = tbl->id;
+
+		/* do not remove the "default" routing tbl which has index 0 */
+		if (tbl->idx != apps_start_idx) {
+			idr_destroy(&tbl->rule_ids);
+			if (tbl->in_sys[IPA_RULE_HASHABLE] ||
+				tbl->in_sys[IPA_RULE_NON_HASHABLE]) {
+				list_move(&tbl->link, &rset->head_rt_tbl_list);
+				clear_bit(tbl->idx,
+					  &ipa3_ctx->rt_idx_bitmap[ip]);
+				set->tbl_cnt--;
+				IPADBG("rst sys rt tbl_idx=%d tbl_cnt=%d\n",
+						tbl->idx, set->tbl_cnt);
+			} else {
+				list_del(&tbl->link);
+				set->tbl_cnt--;
+				clear_bit(tbl->idx,
+					  &ipa3_ctx->rt_idx_bitmap[ip]);
+				IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n",
+						tbl->idx, set->tbl_cnt);
+				kmem_cache_free(ipa3_ctx->rt_tbl_cache, tbl);
+			}
+			/* remove the handle from the database */
+			ipa3_id_remove(id);
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return 0;
+}
+
+/**
+ * ipa3_get_rt_tbl() - lookup the specified routing table and return handle if
+ * it exists, if lookup succeeds the routing table ref cnt is increased
+ * @lookup:	[inout] routing table to lookup and its handle
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ *	Caller should call ipa3_put_rt_tbl later if this function succeeds
+ */
+int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
+{
+	struct ipa3_rt_tbl *entry;
+	int result = -EFAULT;
+
+	if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+	mutex_lock(&ipa3_ctx->lock);
+	entry = __ipa3_find_rt_tbl(lookup->ip, lookup->name);
+	if (entry && entry->cookie == IPA_COOKIE) {
+		entry->ref_cnt++;
+		lookup->hdl = entry->id;
+
+		/* commit for get */
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(lookup->ip))
+			IPAERR("fail to commit RT tbl\n");
+
+		result = 0;
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_put_rt_tbl() - Release the specified routing table handle
+ * @rt_tbl_hdl:	[in] the routing table handle to release
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_put_rt_tbl(u32 rt_tbl_hdl)
+{
+	struct ipa3_rt_tbl *entry;
+	enum ipa_ip_type ip = IPA_IP_MAX;
+	int result;
+
+	mutex_lock(&ipa3_ctx->lock);
+	entry = ipa3_id_find(rt_tbl_hdl);
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		result = -EINVAL;
+		goto ret;
+	}
+
+	if ((entry->cookie != IPA_COOKIE) || entry->ref_cnt == 0) {
+		IPAERR("bad parms\n");
+		result = -EINVAL;
+		goto ret;
+	}
+
+	if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v4])
+		ip = IPA_IP_v4;
+	else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6])
+		ip = IPA_IP_v6;
+	else
+		WARN_ON(1);
+
+	entry->ref_cnt--;
+	if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
+		if (__ipa_del_rt_tbl(entry))
+			IPAERR("fail to del RT tbl\n");
+		/* commit for put */
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(ip))
+			IPAERR("fail to commit RT tbl\n");
+	}
+
+	result = 0;
+
+ret:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+
+static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule)
+{
+	struct ipa3_rt_entry *entry;
+	struct ipa3_hdr_entry *hdr = NULL;
+	struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL;
+
+	if (rtrule->rule.hdr_hdl) {
+		hdr = ipa3_id_find(rtrule->rule.hdr_hdl);
+		if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) {
+			IPAERR("rt rule does not point to valid hdr\n");
+			goto error;
+		}
+	} else if (rtrule->rule.hdr_proc_ctx_hdl) {
+		proc_ctx = ipa3_id_find(rtrule->rule.hdr_proc_ctx_hdl);
+		if ((proc_ctx == NULL) || (proc_ctx->cookie != IPA_COOKIE)) {
+			IPAERR("rt rule does not point to valid proc ctx\n");
+			goto error;
+		}
+	}
+
+	entry = ipa3_id_find(rtrule->rt_rule_hdl);
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		goto error;
+	}
+
+	if (entry->cookie != IPA_COOKIE) {
+		IPAERR("bad params\n");
+		goto error;
+	}
+
+	if (entry->hdr)
+		entry->hdr->ref_cnt--;
+	if (entry->proc_ctx)
+		entry->proc_ctx->ref_cnt--;
+
+	entry->rule = rtrule->rule;
+	entry->hdr = hdr;
+	entry->proc_ctx = proc_ctx;
+
+	if (entry->hdr)
+		entry->hdr->ref_cnt++;
+	if (entry->proc_ctx)
+		entry->proc_ctx->ref_cnt++;
+
+	entry->hw_len = 0;
+	entry->prio = 0;
+
+	return 0;
+
+error:
+	return -EPERM;
+}
+
+/**
+ * ipa3_mdfy_rt_rule() - Modify the specified routing rules in SW and optionally
+ * commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls)
+{
+	int i;
+	int result;
+
+	if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_rules; i++) {
+		if (__ipa_mdfy_rt_rule(&hdls->rules[i])) {
+			IPAERR("failed to mdfy rt rule %i\n", i);
+			hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
+		} else {
+			hdls->rules[i].status = 0;
+		}
+	}
+
+	if (hdls->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(hdls->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_set_rt_tuple_mask() - Sets the rt tuple masking for the given tbl
+ *  table index must be for AP EP (not modem)
+ *  updates the the routing masking values without changing the flt ones.
+ *
+ * @tbl_idx: routing table index to configure the tuple masking
+ * @tuple: the tuple members masking
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple)
+{
+	struct ipahal_reg_fltrt_hash_tuple fltrt_tuple;
+
+	if (!tuple) {
+		IPAERR("bad tuple\n");
+		return -EINVAL;
+	}
+
+	if (tbl_idx >=
+		max(IPA_MEM_PART(v6_rt_num_index),
+		IPA_MEM_PART(v4_rt_num_index)) ||
+		tbl_idx < 0) {
+		IPAERR("bad table index\n");
+		return -EINVAL;
+	}
+
+	if (tbl_idx >= IPA_MEM_PART(v4_modem_rt_index_lo) &&
+		tbl_idx <= IPA_MEM_PART(v4_modem_rt_index_hi)) {
+		IPAERR("cannot configure modem v4 rt tuple by AP\n");
+		return -EINVAL;
+	}
+
+	if (tbl_idx >= IPA_MEM_PART(v6_modem_rt_index_lo) &&
+		tbl_idx <= IPA_MEM_PART(v6_modem_rt_index_hi)) {
+		IPAERR("cannot configure modem v6 rt tuple by AP\n");
+		return -EINVAL;
+	}
+
+	ipahal_read_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+		tbl_idx, &fltrt_tuple);
+	fltrt_tuple.rt = *tuple;
+	ipahal_write_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+		tbl_idx, &fltrt_tuple);
+
+	return 0;
+}
+
+/**
+ * ipa3_rt_read_tbl_from_hw() -Read routing table from IPA HW
+ * @tbl_idx: routing table index
+ * @ip_type: IPv4 or IPv6 table
+ * @hashable: hashable or non-hashable table
+ * @entry: array to fill the table entries
+ * @num_entry: number of entries in entry array. set by the caller to indicate
+ *  entry array size. Then set by this function as an output parameter to
+ *  indicate the number of entries in the array
+ *
+ * This function reads the routing table from IPA SRAM and prepares an array
+ * of entries. This function is mainly used for debugging purposes.
+ *
+ * If empty table or Modem Apps table, zero entries will be returned.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_rt_read_tbl_from_hw(u32 tbl_idx, enum ipa_ip_type ip_type,
+	bool hashable, struct ipahal_rt_rule_entry entry[], int *num_entry)
+{
+	void *ipa_sram_mmio;
+	u64 hdr_base_ofst;
+	int res = 0;
+	u64 tbl_addr;
+	bool is_sys;
+	struct ipa_mem_buffer *sys_tbl_mem;
+	u8 *rule_addr;
+	int rule_idx;
+
+	IPADBG("tbl_idx=%d ip_type=%d hashable=%d entry=0x%p num_entry=0x%p\n",
+		tbl_idx, ip_type, hashable, entry, num_entry);
+
+	if (ip_type == IPA_IP_v4 && tbl_idx >= IPA_MEM_PART(v4_rt_num_index)) {
+		IPAERR("Invalid params\n");
+		return -EFAULT;
+	}
+
+	if (ip_type == IPA_IP_v6 && tbl_idx >= IPA_MEM_PART(v6_rt_num_index)) {
+		IPAERR("Invalid params\n");
+		return -EFAULT;
+	}
+
+	/* map IPA SRAM */
+	ipa_sram_mmio = ioremap(ipa3_ctx->ipa_wrapper_base +
+		ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
+			ipa3_ctx->smem_restricted_bytes / 4),
+		ipa3_ctx->smem_sz);
+	if (!ipa_sram_mmio) {
+		IPAERR("fail to ioremap IPA SRAM\n");
+		return -ENOMEM;
+	}
+
+	memset(entry, 0, sizeof(*entry) * (*num_entry));
+	if (hashable) {
+		if (ip_type == IPA_IP_v4)
+			hdr_base_ofst =
+				IPA_MEM_PART(v4_rt_hash_ofst);
+		else
+			hdr_base_ofst =
+				IPA_MEM_PART(v6_rt_hash_ofst);
+	} else {
+		if (ip_type == IPA_IP_v4)
+			hdr_base_ofst =
+				IPA_MEM_PART(v4_rt_nhash_ofst);
+		else
+			hdr_base_ofst =
+				IPA_MEM_PART(v6_rt_nhash_ofst);
+	}
+
+	IPADBG("hdr_base_ofst=0x%llx\n", hdr_base_ofst);
+
+	res = ipahal_fltrt_read_addr_from_hdr(ipa_sram_mmio + hdr_base_ofst,
+		tbl_idx, &tbl_addr, &is_sys);
+	if (res) {
+		IPAERR("failed to read table address from header structure\n");
+		goto bail;
+	}
+	IPADBG("rt tbl %d: tbl_addr=0x%llx is_sys=%d\n",
+		tbl_idx, tbl_addr, is_sys);
+	if (!tbl_addr) {
+		IPAERR("invalid rt tbl addr\n");
+		res = -EFAULT;
+		goto bail;
+	}
+
+	/* for tables which reside in DDR access it from the virtual memory */
+	if (is_sys) {
+		struct ipa3_rt_tbl_set *set;
+		struct ipa3_rt_tbl *tbl;
+
+		set = &ipa3_ctx->rt_tbl_set[ip_type];
+		rule_addr = NULL;
+		list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+			if (tbl->idx == tbl_idx) {
+				sys_tbl_mem = &(tbl->curr_mem[hashable ?
+					IPA_RULE_HASHABLE :
+					IPA_RULE_NON_HASHABLE]);
+				if (sys_tbl_mem->phys_base &&
+					sys_tbl_mem->phys_base != tbl_addr) {
+					IPAERR("mismatch:parsed=%llx sw=%pad\n"
+						, tbl_addr,
+						&sys_tbl_mem->phys_base);
+				}
+				if (sys_tbl_mem->phys_base)
+					rule_addr = sys_tbl_mem->base;
+				else
+					rule_addr = NULL;
+			}
+		}
+	} else {
+		rule_addr = ipa_sram_mmio + hdr_base_ofst + tbl_addr;
+	}
+
+	IPADBG("First rule addr 0x%p\n", rule_addr);
+
+	if (!rule_addr) {
+		/* Modem table in system memory or empty table */
+		*num_entry = 0;
+		goto bail;
+	}
+
+	rule_idx = 0;
+	while (rule_idx < *num_entry) {
+		res = ipahal_rt_parse_hw_rule(rule_addr, &entry[rule_idx]);
+		if (res) {
+			IPAERR("failed parsing rt rule\n");
+			goto bail;
+		}
+
+		IPADBG("rule_size=%d\n", entry[rule_idx].rule_size);
+		if (!entry[rule_idx].rule_size)
+			break;
+
+		rule_addr += entry[rule_idx].rule_size;
+		rule_idx++;
+	}
+	*num_entry = rule_idx;
+bail:
+	iounmap(ipa_sram_mmio);
+	return res;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h b/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h
new file mode 100644
index 0000000..b67899b
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h
@@ -0,0 +1,153 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ipa
+#define TRACE_INCLUDE_FILE ipa_trace
+
+#if !defined(_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _IPA_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(
+	intr_to_poll3,
+
+	TP_PROTO(unsigned long client),
+
+	TP_ARGS(client),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	client)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+	),
+
+	TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+	poll_to_intr3,
+
+	TP_PROTO(unsigned long client),
+
+	TP_ARGS(client),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	client)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+	),
+
+	TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+	idle_sleep_enter3,
+
+	TP_PROTO(unsigned long client),
+
+	TP_ARGS(client),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	client)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+	),
+
+	TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+	idle_sleep_exit3,
+
+	TP_PROTO(unsigned long client),
+
+	TP_ARGS(client),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	client)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+	),
+
+	TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+	rmnet_ipa_netifni3,
+
+	TP_PROTO(unsigned long rx_pkt_cnt),
+
+	TP_ARGS(rx_pkt_cnt),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	rx_pkt_cnt)
+	),
+
+	TP_fast_assign(
+		__entry->rx_pkt_cnt = rx_pkt_cnt;
+	),
+
+	TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
+TRACE_EVENT(
+	rmnet_ipa_netifrx3,
+
+	TP_PROTO(unsigned long rx_pkt_cnt),
+
+	TP_ARGS(rx_pkt_cnt),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	rx_pkt_cnt)
+	),
+
+	TP_fast_assign(
+		__entry->rx_pkt_cnt = rx_pkt_cnt;
+	),
+
+	TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
+TRACE_EVENT(
+	rmnet_ipa_netif_rcv_skb3,
+
+	TP_PROTO(unsigned long rx_pkt_cnt),
+
+	TP_ARGS(rx_pkt_cnt),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	rx_pkt_cnt)
+	),
+
+	TP_fast_assign(
+		__entry->rx_pkt_cnt = rx_pkt_cnt;
+	),
+
+	TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
+#endif /* _IPA_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
new file mode 100644
index 0000000..780a005
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
@@ -0,0 +1,991 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "ipa_i.h"
+#include <linux/delay.h>
+
+#define IPA_RAM_UC_SMEM_SIZE 128
+#define IPA_HW_INTERFACE_VERSION     0x2000
+#define IPA_PKT_FLUSH_TO_US 100
+#define IPA_UC_POLL_SLEEP_USEC 100
+#define IPA_UC_POLL_MAX_RETRY 10000
+
+/**
+ * Mailbox register to Interrupt HWP for CPU cmd
+ * Usage of IPA_UC_MAILBOX_m_n doorbell instead of IPA_IRQ_EE_UC_0
+ * due to HW limitation.
+ *
+ */
+#define IPA_CPU_2_HW_CMD_MBOX_m          0
+#define IPA_CPU_2_HW_CMD_MBOX_n         23
+
+/**
+ * enum ipa3_cpu_2_hw_commands - Values that represent the commands from the CPU
+ * IPA_CPU_2_HW_CMD_NO_OP : No operation is required.
+ * IPA_CPU_2_HW_CMD_UPDATE_FLAGS : Update SW flags which defines the behavior
+ *                                 of HW.
+ * IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST : Launch predefined test over HW.
+ * IPA_CPU_2_HW_CMD_DEBUG_GET_INFO : Read HW internal debug information.
+ * IPA_CPU_2_HW_CMD_ERR_FATAL : CPU instructs HW to perform error fatal
+ *                              handling.
+ * IPA_CPU_2_HW_CMD_CLK_GATE : CPU instructs HW to goto Clock Gated state.
+ * IPA_CPU_2_HW_CMD_CLK_UNGATE : CPU instructs HW to goto Clock Ungated state.
+ * IPA_CPU_2_HW_CMD_MEMCPY : CPU instructs HW to do memcopy using QMB.
+ * IPA_CPU_2_HW_CMD_RESET_PIPE : Command to reset a pipe - SW WA for a HW bug.
+ * IPA_CPU_2_HW_CMD_GSI_CH_EMPTY : Command to check for GSI channel emptiness.
+ */
+enum ipa3_cpu_2_hw_commands {
+	IPA_CPU_2_HW_CMD_NO_OP                     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+	IPA_CPU_2_HW_CMD_UPDATE_FLAGS              =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+	IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+	IPA_CPU_2_HW_CMD_DEBUG_GET_INFO            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
+	IPA_CPU_2_HW_CMD_ERR_FATAL                 =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
+	IPA_CPU_2_HW_CMD_CLK_GATE                  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
+	IPA_CPU_2_HW_CMD_CLK_UNGATE                =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
+	IPA_CPU_2_HW_CMD_MEMCPY                    =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7),
+	IPA_CPU_2_HW_CMD_RESET_PIPE                =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8),
+	IPA_CPU_2_HW_CMD_REG_WRITE                 =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 9),
+	IPA_CPU_2_HW_CMD_GSI_CH_EMPTY              =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 10),
+};
+
+/**
+ * enum ipa3_hw_2_cpu_responses -  Values that represent common HW responses
+ *  to CPU commands.
+ * @IPA_HW_2_CPU_RESPONSE_NO_OP : No operation response
+ * @IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED : HW shall send this command once
+ *  boot sequence is completed and HW is ready to serve commands from CPU
+ * @IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED: Response to CPU commands
+ * @IPA_HW_2_CPU_RESPONSE_DEBUG_GET_INFO : Response to
+ *  IPA_CPU_2_HW_CMD_DEBUG_GET_INFO command
+ */
+enum ipa3_hw_2_cpu_responses {
+	IPA_HW_2_CPU_RESPONSE_NO_OP          =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+	IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+	IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+	IPA_HW_2_CPU_RESPONSE_DEBUG_GET_INFO =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
+};
+
+/**
+ * struct IpaHwResetPipeCmdData_t - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_MEMCPY command.
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+struct IpaHwMemCopyData_t  {
+	u32 destination_addr;
+	u32 source_addr;
+	u32 dest_buffer_size;
+	u32 source_buffer_size;
+};
+
+/**
+ * union IpaHwResetPipeCmdData_t - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_RESET_PIPE command.
+ * @pipeNum : Pipe number to be reset
+ * @direction : 1 - IPA Producer, 0 - IPA Consumer
+ * @reserved_02_03 : Reserved
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+union IpaHwResetPipeCmdData_t {
+	struct IpaHwResetPipeCmdParams_t {
+		u8     pipeNum;
+		u8     direction;
+		u32    reserved_02_03;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * struct IpaHwRegWriteCmdData_t - holds the parameters for
+ * IPA_CPU_2_HW_CMD_REG_WRITE command. Parameters are
+ * sent as 64b immediate parameters.
+ * @RegisterAddress: RG10 register address where the value needs to be written
+ * @RegisterValue: 32-Bit value to be written into the register
+ */
+struct IpaHwRegWriteCmdData_t {
+	u32 RegisterAddress;
+	u32 RegisterValue;
+};
+
+/**
+ * union IpaHwCpuCmdCompletedResponseData_t - Structure holding the parameters
+ * for IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED response.
+ * @originalCmdOp : The original command opcode
+ * @status : 0 for success indication, otherwise failure
+ * @reserved : Reserved
+ *
+ * Parameters are sent as 32b immediate parameters.
+ */
+union IpaHwCpuCmdCompletedResponseData_t {
+	struct IpaHwCpuCmdCompletedResponseParams_t {
+		u32 originalCmdOp:8;
+		u32 status:8;
+		u32 reserved:16;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwUpdateFlagsCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_UPDATE_FLAGS command
+ * @newFlags: SW flags defined the behavior of HW.
+ *	This field is expected to be used as bitmask for enum ipa3_hw_flags
+ */
+union IpaHwUpdateFlagsCmdData_t {
+	struct IpaHwUpdateFlagsCmdParams_t {
+		u32 newFlags;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * union IpaHwChkChEmptyCmdData_t -  Structure holding the parameters for
+ *  IPA_CPU_2_HW_CMD_GSI_CH_EMPTY command. Parameters are sent as 32b
+ *  immediate parameters.
+ * @ee_n : EE owner of the channel
+ * @vir_ch_id : GSI virtual channel ID of the channel to checked of emptiness
+ * @reserved_02_04 : Reserved
+ */
+union IpaHwChkChEmptyCmdData_t {
+	struct IpaHwChkChEmptyCmdParams_t {
+		u8 ee_n;
+		u8 vir_ch_id;
+		u16 reserved_02_04;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * When resource group 10 limitation mitigation is enabled, uC send
+ * cmd should be able to run in interrupt context, so using spin lock
+ * instead of mutex.
+ */
+#define IPA3_UC_LOCK(flags)						 \
+do {									 \
+	if (ipa3_ctx->apply_rg10_wa)					 \
+		spin_lock_irqsave(&ipa3_ctx->uc_ctx.uc_spinlock, flags); \
+	else								 \
+		mutex_lock(&ipa3_ctx->uc_ctx.uc_lock);			 \
+} while (0)
+
+#define IPA3_UC_UNLOCK(flags)						      \
+do {									      \
+	if (ipa3_ctx->apply_rg10_wa)					      \
+		spin_unlock_irqrestore(&ipa3_ctx->uc_ctx.uc_spinlock, flags); \
+	else								      \
+		mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock);		      \
+} while (0)
+
+struct ipa3_uc_hdlrs ipa3_uc_hdlrs[IPA_HW_NUM_FEATURES] = { { 0 } };
+
+const char *ipa_hw_error_str(enum ipa3_hw_errors err_type)
+{
+	const char *str;
+
+	switch (err_type) {
+	case IPA_HW_ERROR_NONE:
+		str = "IPA_HW_ERROR_NONE";
+		break;
+	case IPA_HW_INVALID_DOORBELL_ERROR:
+		str = "IPA_HW_INVALID_DOORBELL_ERROR";
+		break;
+	case IPA_HW_DMA_ERROR:
+		str = "IPA_HW_DMA_ERROR";
+		break;
+	case IPA_HW_FATAL_SYSTEM_ERROR:
+		str = "IPA_HW_FATAL_SYSTEM_ERROR";
+		break;
+	case IPA_HW_INVALID_OPCODE:
+		str = "IPA_HW_INVALID_OPCODE";
+		break;
+	case IPA_HW_INVALID_PARAMS:
+		str = "IPA_HW_INVALID_PARAMS";
+		break;
+	case IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE:
+		str = "IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE";
+		break;
+	case IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE:
+		str = "IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE";
+		break;
+	case IPA_HW_GSI_CH_NOT_EMPTY_FAILURE:
+		str = "IPA_HW_GSI_CH_NOT_EMPTY_FAILURE";
+		break;
+	default:
+		str = "INVALID ipa_hw_errors type";
+	}
+
+	return str;
+}
+
+static void ipa3_log_evt_hdlr(void)
+{
+	int i;
+
+	if (!ipa3_ctx->uc_ctx.uc_event_top_ofst) {
+		ipa3_ctx->uc_ctx.uc_event_top_ofst =
+			ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams;
+		if (ipa3_ctx->uc_ctx.uc_event_top_ofst +
+			sizeof(struct IpaHwEventLogInfoData_t) >=
+			ipa3_ctx->ctrl->ipa_reg_base_ofst +
+			ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
+			ipa3_ctx->smem_sz) {
+			IPAERR("uc_top 0x%x outside SRAM\n",
+				ipa3_ctx->uc_ctx.uc_event_top_ofst);
+			goto bad_uc_top_ofst;
+		}
+
+		ipa3_ctx->uc_ctx.uc_event_top_mmio = ioremap(
+			ipa3_ctx->ipa_wrapper_base +
+			ipa3_ctx->uc_ctx.uc_event_top_ofst,
+			sizeof(struct IpaHwEventLogInfoData_t));
+		if (!ipa3_ctx->uc_ctx.uc_event_top_mmio) {
+			IPAERR("fail to ioremap uc top\n");
+			goto bad_uc_top_ofst;
+		}
+
+		for (i = 0; i < IPA_HW_NUM_FEATURES; i++) {
+			if (ipa3_uc_hdlrs[i].ipa_uc_event_log_info_hdlr)
+				ipa3_uc_hdlrs[i].ipa_uc_event_log_info_hdlr
+					(ipa3_ctx->uc_ctx.uc_event_top_mmio);
+		}
+	} else {
+
+		if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams !=
+			ipa3_ctx->uc_ctx.uc_event_top_ofst) {
+			IPAERR("uc top ofst changed new=%u cur=%u\n",
+				ipa3_ctx->uc_ctx.uc_sram_mmio->
+				eventParams,
+				ipa3_ctx->uc_ctx.uc_event_top_ofst);
+		}
+	}
+
+	return;
+
+bad_uc_top_ofst:
+	ipa3_ctx->uc_ctx.uc_event_top_ofst = 0;
+}
+
+/**
+ * ipa3_uc_state_check() - Check the status of the uC interface
+ *
+ * Return value: 0 if the uC is loaded, interface is initialized
+ *               and there was no recent failure in one of the commands.
+ *               A negative value is returned otherwise.
+ */
+int ipa3_uc_state_check(void)
+{
+	if (!ipa3_ctx->uc_ctx.uc_inited) {
+		IPAERR("uC interface not initialized\n");
+		return -EFAULT;
+	}
+
+	if (!ipa3_ctx->uc_ctx.uc_loaded) {
+		IPAERR("uC is not loaded\n");
+		return -EFAULT;
+	}
+
+	if (ipa3_ctx->uc_ctx.uc_failed) {
+		IPAERR("uC has failed its last command\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_uc_loaded_check() - Check the uC has been loaded
+ *
+ * Return value: 1 if the uC is loaded, 0 otherwise
+ */
+int ipa3_uc_loaded_check(void)
+{
+	return ipa3_ctx->uc_ctx.uc_loaded;
+}
+EXPORT_SYMBOL(ipa3_uc_loaded_check);
+
+static void ipa3_uc_event_handler(enum ipa_irq_type interrupt,
+				 void *private_data,
+				 void *interrupt_data)
+{
+	union IpaHwErrorEventData_t evt;
+	u8 feature;
+
+	WARN_ON(private_data != ipa3_ctx);
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	IPADBG("uC evt opcode=%u\n",
+		ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
+
+
+	feature = EXTRACT_UC_FEATURE(ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
+
+	if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
+		IPAERR("Invalid feature %u for event %u\n",
+			feature, ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return;
+	}
+	/* Feature specific handling */
+	if (ipa3_uc_hdlrs[feature].ipa_uc_event_hdlr)
+		ipa3_uc_hdlrs[feature].ipa_uc_event_hdlr
+			(ipa3_ctx->uc_ctx.uc_sram_mmio);
+
+	/* General handling */
+	if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+	    IPA_HW_2_CPU_EVENT_ERROR) {
+		evt.raw32b = ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams;
+		IPAERR("uC Error, evt errorType = %s\n",
+			ipa_hw_error_str(evt.params.errorType));
+		ipa3_ctx->uc_ctx.uc_failed = true;
+		ipa3_ctx->uc_ctx.uc_error_type = evt.params.errorType;
+		ipa3_ctx->uc_ctx.uc_error_timestamp =
+			ipahal_read_reg(IPA_TAG_TIMER);
+		BUG();
+	} else if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+		IPA_HW_2_CPU_EVENT_LOG_INFO) {
+		IPADBG("uC evt log info ofst=0x%x\n",
+			ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams);
+		ipa3_log_evt_hdlr();
+	} else {
+		IPADBG("unsupported uC evt opcode=%u\n",
+				ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+}
+
+int ipa3_uc_panic_notifier(struct notifier_block *this,
+		unsigned long event, void *ptr)
+{
+	int result = 0;
+	struct ipa_active_client_logging_info log_info;
+
+	IPADBG("this=%p evt=%lu ptr=%p\n", this, event, ptr);
+
+	result = ipa3_uc_state_check();
+	if (result)
+		goto fail;
+
+	IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
+	if (ipa3_inc_client_enable_clks_no_block(&log_info))
+		goto fail;
+
+	ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp =
+		IPA_CPU_2_HW_CMD_ERR_FATAL;
+	ipa3_ctx->uc_ctx.pending_cmd = ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp;
+	/* ensure write to shared memory is done before triggering uc */
+	wmb();
+
+	if (ipa3_ctx->apply_rg10_wa)
+		ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n,
+			IPA_CPU_2_HW_CMD_MBOX_m,
+			IPA_CPU_2_HW_CMD_MBOX_n, 0x1);
+	else
+		ipahal_write_reg_n(IPA_IRQ_EE_UC_n, 0, 0x1);
+
+	/* give uc enough time to save state */
+	udelay(IPA_PKT_FLUSH_TO_US);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG("err_fatal issued\n");
+
+fail:
+	return NOTIFY_DONE;
+}
+
+static void ipa3_uc_response_hdlr(enum ipa_irq_type interrupt,
+				void *private_data,
+				void *interrupt_data)
+{
+	union IpaHwCpuCmdCompletedResponseData_t uc_rsp;
+	u8 feature;
+	int res;
+	int i;
+
+	WARN_ON(private_data != ipa3_ctx);
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	IPADBG("uC rsp opcode=%u\n",
+			ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp);
+
+	feature = EXTRACT_UC_FEATURE(ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp);
+
+	if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
+		IPAERR("Invalid feature %u for event %u\n",
+			feature, ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return;
+	}
+
+	/* Feature specific handling */
+	if (ipa3_uc_hdlrs[feature].ipa3_uc_response_hdlr) {
+		res = ipa3_uc_hdlrs[feature].ipa3_uc_response_hdlr(
+			ipa3_ctx->uc_ctx.uc_sram_mmio,
+			&ipa3_ctx->uc_ctx.uc_status);
+		if (res == 0) {
+			IPADBG("feature %d specific response handler\n",
+				feature);
+			complete_all(&ipa3_ctx->uc_ctx.uc_completion);
+			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+			return;
+		}
+	}
+
+	/* General handling */
+	if (ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp ==
+			IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED) {
+		ipa3_ctx->uc_ctx.uc_loaded = true;
+
+		IPADBG("IPA uC loaded\n");
+		/*
+		 * The proxy vote is held until uC is loaded to ensure that
+		 * IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED is received.
+		 */
+		ipa3_proxy_clk_unvote();
+
+		for (i = 0; i < IPA_HW_NUM_FEATURES; i++) {
+			if (ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr)
+				ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr();
+		}
+	} else if (ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp ==
+		   IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) {
+		uc_rsp.raw32b = ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams;
+		IPADBG("uC cmd response opcode=%u status=%u\n",
+		       uc_rsp.params.originalCmdOp,
+		       uc_rsp.params.status);
+		if (uc_rsp.params.originalCmdOp ==
+		    ipa3_ctx->uc_ctx.pending_cmd) {
+			ipa3_ctx->uc_ctx.uc_status = uc_rsp.params.status;
+			complete_all(&ipa3_ctx->uc_ctx.uc_completion);
+		} else {
+			IPAERR("Expected cmd=%u rcvd cmd=%u\n",
+			       ipa3_ctx->uc_ctx.pending_cmd,
+			       uc_rsp.params.originalCmdOp);
+		}
+	} else {
+		IPAERR("Unsupported uC rsp opcode = %u\n",
+		       ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+static int ipa3_uc_send_cmd_64b_param(u32 cmd_lo, u32 cmd_hi, u32 opcode,
+	u32 expected_status, bool polling_mode, unsigned long timeout_jiffies)
+{
+	int index;
+	union IpaHwCpuCmdCompletedResponseData_t uc_rsp;
+	unsigned long flags;
+	int retries = 0;
+
+send_cmd_lock:
+	IPA3_UC_LOCK(flags);
+
+	if (ipa3_uc_state_check()) {
+		IPADBG("uC send command aborted\n");
+		IPA3_UC_UNLOCK(flags);
+		return -EBADF;
+	}
+send_cmd:
+	if (ipa3_ctx->apply_rg10_wa) {
+		if (!polling_mode)
+			IPADBG("Overriding mode to polling mode\n");
+		polling_mode = true;
+	} else {
+		init_completion(&ipa3_ctx->uc_ctx.uc_completion);
+	}
+
+	ipa3_ctx->uc_ctx.uc_sram_mmio->cmdParams = cmd_lo;
+	ipa3_ctx->uc_ctx.uc_sram_mmio->cmdParams_hi = cmd_hi;
+	ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp = opcode;
+	ipa3_ctx->uc_ctx.pending_cmd = opcode;
+	ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp = 0;
+	ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams = 0;
+
+	ipa3_ctx->uc_ctx.uc_status = 0;
+
+	/* ensure write to shared memory is done before triggering uc */
+	wmb();
+
+	if (ipa3_ctx->apply_rg10_wa)
+		ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n,
+			IPA_CPU_2_HW_CMD_MBOX_m,
+			IPA_CPU_2_HW_CMD_MBOX_n, 0x1);
+	else
+		ipahal_write_reg_n(IPA_IRQ_EE_UC_n, 0, 0x1);
+
+	if (polling_mode) {
+		for (index = 0; index < IPA_UC_POLL_MAX_RETRY; index++) {
+			if (ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp ==
+			    IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) {
+				uc_rsp.raw32b = ipa3_ctx->uc_ctx.uc_sram_mmio->
+						responseParams;
+				if (uc_rsp.params.originalCmdOp ==
+					ipa3_ctx->uc_ctx.pending_cmd) {
+					ipa3_ctx->uc_ctx.uc_status =
+						uc_rsp.params.status;
+					break;
+				}
+			}
+			if (ipa3_ctx->apply_rg10_wa)
+				udelay(IPA_UC_POLL_SLEEP_USEC);
+			else
+				usleep_range(IPA_UC_POLL_SLEEP_USEC,
+					IPA_UC_POLL_SLEEP_USEC);
+		}
+
+		if (index == IPA_UC_POLL_MAX_RETRY) {
+			IPAERR("uC max polling retries reached\n");
+			if (ipa3_ctx->uc_ctx.uc_failed) {
+				IPAERR("uC reported on Error, errorType = %s\n",
+					ipa_hw_error_str(ipa3_ctx->
+					uc_ctx.uc_error_type));
+			}
+			IPA3_UC_UNLOCK(flags);
+			BUG();
+			return -EFAULT;
+		}
+	} else {
+		if (wait_for_completion_timeout(&ipa3_ctx->uc_ctx.uc_completion,
+			timeout_jiffies) == 0) {
+			IPAERR("uC timed out\n");
+			if (ipa3_ctx->uc_ctx.uc_failed) {
+				IPAERR("uC reported on Error, errorType = %s\n",
+					ipa_hw_error_str(ipa3_ctx->
+					uc_ctx.uc_error_type));
+			}
+			IPA3_UC_UNLOCK(flags);
+			BUG();
+			return -EFAULT;
+		}
+	}
+
+	if (ipa3_ctx->uc_ctx.uc_status != expected_status) {
+		if (ipa3_ctx->uc_ctx.uc_status ==
+			IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE) {
+			retries++;
+			if (retries == IPA_GSI_CHANNEL_STOP_MAX_RETRY) {
+				IPAERR("Failed after %d tries\n", retries);
+				IPA3_UC_UNLOCK(flags);
+				BUG();
+				return -EFAULT;
+			}
+			IPA3_UC_UNLOCK(flags);
+			ipa3_inject_dma_task_for_gsi();
+			/* sleep for short period to flush IPA */
+			usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
+				IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
+			goto send_cmd_lock;
+		}
+
+		if (ipa3_ctx->uc_ctx.uc_status ==
+			IPA_HW_GSI_CH_NOT_EMPTY_FAILURE) {
+			retries++;
+			if (retries >= IPA_GSI_CHANNEL_EMPTY_MAX_RETRY) {
+				IPAERR("Failed after %d tries\n", retries);
+				IPA3_UC_UNLOCK(flags);
+				return -EFAULT;
+			}
+			if (ipa3_ctx->apply_rg10_wa)
+				udelay(
+				IPA_GSI_CHANNEL_EMPTY_SLEEP_MAX_USEC / 2 +
+				IPA_GSI_CHANNEL_EMPTY_SLEEP_MIN_USEC / 2);
+			else
+				usleep_range(
+				IPA_GSI_CHANNEL_EMPTY_SLEEP_MIN_USEC,
+				IPA_GSI_CHANNEL_EMPTY_SLEEP_MAX_USEC);
+			goto send_cmd;
+		}
+
+		IPAERR("Recevied status %u, Expected status %u\n",
+			ipa3_ctx->uc_ctx.uc_status, expected_status);
+		IPA3_UC_UNLOCK(flags);
+		return -EFAULT;
+	}
+
+	IPA3_UC_UNLOCK(flags);
+
+	IPADBG("uC cmd %u send succeeded\n", opcode);
+
+	return 0;
+}
+
+/**
+ * ipa3_uc_interface_init() - Initialize the interface with the uC
+ *
+ * Return value: 0 on success, negative value otherwise
+ */
+int ipa3_uc_interface_init(void)
+{
+	int result;
+	unsigned long phys_addr;
+
+	if (ipa3_ctx->uc_ctx.uc_inited) {
+		IPADBG("uC interface already initialized\n");
+		return 0;
+	}
+
+	mutex_init(&ipa3_ctx->uc_ctx.uc_lock);
+	spin_lock_init(&ipa3_ctx->uc_ctx.uc_spinlock);
+
+	phys_addr = ipa3_ctx->ipa_wrapper_base +
+		ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0);
+	ipa3_ctx->uc_ctx.uc_sram_mmio = ioremap(phys_addr,
+					       IPA_RAM_UC_SMEM_SIZE);
+	if (!ipa3_ctx->uc_ctx.uc_sram_mmio) {
+		IPAERR("Fail to ioremap IPA uC SRAM\n");
+		result = -ENOMEM;
+		goto remap_fail;
+	}
+
+	if (!ipa3_ctx->apply_rg10_wa) {
+		result = ipa3_add_interrupt_handler(IPA_UC_IRQ_0,
+			ipa3_uc_event_handler, true,
+			ipa3_ctx);
+		if (result) {
+			IPAERR("Fail to register for UC_IRQ0 rsp interrupt\n");
+			result = -EFAULT;
+			goto irq_fail0;
+		}
+
+		result = ipa3_add_interrupt_handler(IPA_UC_IRQ_1,
+			ipa3_uc_response_hdlr, true,
+			ipa3_ctx);
+		if (result) {
+			IPAERR("fail to register for UC_IRQ1 rsp interrupt\n");
+			result = -EFAULT;
+			goto irq_fail1;
+		}
+	}
+
+	ipa3_ctx->uc_ctx.uc_inited = true;
+
+	IPADBG("IPA uC interface is initialized\n");
+	return 0;
+
+irq_fail1:
+	ipa3_remove_interrupt_handler(IPA_UC_IRQ_0);
+irq_fail0:
+	iounmap(ipa3_ctx->uc_ctx.uc_sram_mmio);
+remap_fail:
+	return result;
+}
+
+/**
+ * ipa3_uc_load_notify() - Notification about uC loading
+ *
+ * This function should be called when IPA uC interface layer cannot
+ * determine by itself about uC loading by waits for external notification.
+ * Example is resource group 10 limitation were ipa driver does not get uC
+ * interrupts.
+ * The function should perform actions that were not done at init due to uC
+ * not being loaded then.
+ */
+void ipa3_uc_load_notify(void)
+{
+	int i;
+	int result;
+
+	if (!ipa3_ctx->apply_rg10_wa)
+		return;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipa3_ctx->uc_ctx.uc_loaded = true;
+	IPADBG("IPA uC loaded\n");
+
+	ipa3_proxy_clk_unvote();
+
+	ipa3_init_interrupts();
+
+	result = ipa3_add_interrupt_handler(IPA_UC_IRQ_0,
+		ipa3_uc_event_handler, true,
+		ipa3_ctx);
+	if (result)
+		IPAERR("Fail to register for UC_IRQ0 rsp interrupt.\n");
+
+	for (i = 0; i < IPA_HW_NUM_FEATURES; i++) {
+		if (ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr)
+			ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr();
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+EXPORT_SYMBOL(ipa3_uc_load_notify);
+
+/**
+ * ipa3_uc_send_cmd() - Send a command to the uC
+ *
+ * Note1: This function sends command with 32bit parameter and do not
+ *	use the higher 32bit of the command parameter (set to zero).
+ *
+ * Note2: In case the operation times out (No response from the uC) or
+ *       polling maximal amount of retries has reached, the logic
+ *       considers it as an invalid state of the uC/IPA, and
+ *       issues a kernel panic.
+ *
+ * Returns: 0 on success.
+ *          -EINVAL in case of invalid input.
+ *          -EBADF in case uC interface is not initialized /
+ *                 or the uC has failed previously.
+ *          -EFAULT in case the received status doesn't match
+ *                  the expected.
+ */
+int ipa3_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
+		    bool polling_mode, unsigned long timeout_jiffies)
+{
+	return ipa3_uc_send_cmd_64b_param(cmd, 0, opcode,
+		expected_status, polling_mode, timeout_jiffies);
+}
+
+/**
+ * ipa3_uc_register_handlers() - Registers event, response and log event
+ *                              handlers for a specific feature.Please note
+ *                              that currently only one handler can be
+ *                              registered per feature.
+ *
+ * Return value: None
+ */
+void ipa3_uc_register_handlers(enum ipa3_hw_features feature,
+			      struct ipa3_uc_hdlrs *hdlrs)
+{
+	unsigned long flags;
+
+	if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
+		IPAERR("Feature %u is invalid, not registering hdlrs\n",
+		       feature);
+		return;
+	}
+
+	IPA3_UC_LOCK(flags);
+	ipa3_uc_hdlrs[feature] = *hdlrs;
+	IPA3_UC_UNLOCK(flags);
+
+	IPADBG("uC handlers registered for feature %u\n", feature);
+}
+
+/**
+ * ipa3_uc_reset_pipe() - reset a BAM pipe using the uC interface
+ * @ipa_client: [in] ipa client handle representing the pipe
+ *
+ * The function uses the uC interface in order to issue a BAM
+ * PIPE reset request. The uC makes sure there's no traffic in
+ * the TX command queue before issuing the reset.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_uc_reset_pipe(enum ipa_client_type ipa_client)
+{
+	union IpaHwResetPipeCmdData_t cmd;
+	int ep_idx;
+	int ret;
+
+	ep_idx = ipa3_get_ep_mapping(ipa_client);
+	if (ep_idx == -1) {
+		IPAERR("Invalid IPA client\n");
+		return 0;
+	}
+
+	/*
+	 * If the uC interface has not been initialized yet,
+	 * continue with the sequence without resetting the
+	 * pipe.
+	 */
+	if (ipa3_uc_state_check()) {
+		IPADBG("uC interface will not be used to reset %s pipe %d\n",
+		       IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD",
+		       ep_idx);
+		return 0;
+	}
+
+	/*
+	 * IPA consumer = 0, IPA producer = 1.
+	 * IPA driver concept of PROD/CONS is the opposite of the
+	 * IPA HW concept. Therefore, IPA AP CLIENT PRODUCER = IPA CONSUMER,
+	 * and vice-versa.
+	 */
+	cmd.params.direction = (u8)(IPA_CLIENT_IS_PROD(ipa_client) ? 0 : 1);
+	cmd.params.pipeNum = (u8)ep_idx;
+
+	IPADBG("uC pipe reset on IPA %s pipe %d\n",
+	       IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD", ep_idx);
+
+	ret = ipa3_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_RESET_PIPE, 0,
+			      false, 10*HZ);
+
+	return ret;
+}
+
+int ipa3_uc_is_gsi_channel_empty(enum ipa_client_type ipa_client)
+{
+	struct ipa_gsi_ep_config *gsi_ep_info;
+	union IpaHwChkChEmptyCmdData_t cmd;
+	int ret;
+
+	gsi_ep_info = ipa3_get_gsi_ep_info(ipa3_get_ep_mapping(ipa_client));
+	if (!gsi_ep_info) {
+		IPAERR("Invalid IPA ep index\n");
+		return 0;
+	}
+
+	if (ipa3_uc_state_check()) {
+		IPADBG("uC cannot be used to validate ch emptiness clnt=%d\n"
+			, ipa_client);
+		return 0;
+	}
+
+	cmd.params.ee_n = gsi_ep_info->ee;
+	cmd.params.vir_ch_id = gsi_ep_info->ipa_gsi_chan_num;
+
+	IPADBG("uC emptiness check for IPA GSI Channel %d\n",
+	       gsi_ep_info->ipa_gsi_chan_num);
+
+	ret = ipa3_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_GSI_CH_EMPTY, 0,
+			      false, 10*HZ);
+
+	return ret;
+}
+
+
+/**
+ * ipa3_uc_notify_clk_state() - notify to uC of clock enable / disable
+ * @enabled: true if clock are enabled
+ *
+ * The function uses the uC interface in order to notify uC before IPA clocks
+ * are disabled to make sure uC is not in the middle of operation.
+ * Also after clocks are enabled ned to notify uC to start processing.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_uc_notify_clk_state(bool enabled)
+{
+	u32 opcode;
+
+	/*
+	 * If the uC interface has not been initialized yet,
+	 * don't notify the uC on the enable/disable
+	 */
+	if (ipa3_uc_state_check()) {
+		IPADBG("uC interface will not notify the UC on clock state\n");
+		return 0;
+	}
+
+	IPADBG("uC clock %s notification\n", (enabled) ? "UNGATE" : "GATE");
+
+	opcode = (enabled) ? IPA_CPU_2_HW_CMD_CLK_UNGATE :
+			     IPA_CPU_2_HW_CMD_CLK_GATE;
+
+	return ipa3_uc_send_cmd(0, opcode, 0, true, 0);
+}
+
+/**
+ * ipa3_uc_update_hw_flags() - send uC the HW flags to be used
+ * @flags: This field is expected to be used as bitmask for enum ipa3_hw_flags
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_uc_update_hw_flags(u32 flags)
+{
+	union IpaHwUpdateFlagsCmdData_t cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.params.newFlags = flags;
+	return ipa3_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_UPDATE_FLAGS, 0,
+		false, HZ);
+}
+
+/**
+ * ipa3_uc_rg10_write_reg() - write to register possibly via uC
+ *
+ * if the RG10 limitation workaround is enabled, then writing
+ * to a register will be proxied by the uC due to H/W limitation.
+ * This func should be called for RG10 registers only
+ *
+ * @Parameters: Like ipahal_write_reg_n() parameters
+ *
+ */
+void ipa3_uc_rg10_write_reg(enum ipahal_reg_name reg, u32 n, u32 val)
+{
+	int ret;
+	u32 paddr;
+
+	if (!ipa3_ctx->apply_rg10_wa)
+		return ipahal_write_reg_n(reg, n, val);
+
+
+	/* calculate register physical address */
+	paddr = ipa3_ctx->ipa_wrapper_base + ipa3_ctx->ctrl->ipa_reg_base_ofst;
+	paddr += ipahal_get_reg_n_ofst(reg, n);
+
+	IPADBG("Sending uC cmd to reg write: addr=0x%x val=0x%x\n",
+		paddr, val);
+	ret = ipa3_uc_send_cmd_64b_param(paddr, val,
+		IPA_CPU_2_HW_CMD_REG_WRITE, 0, true, 0);
+	if (ret) {
+		IPAERR("failed to send cmd to uC for reg write\n");
+		BUG();
+	}
+}
+
+/**
+ * ipa3_uc_memcpy() - Perform a memcpy action using IPA uC
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
+{
+	int res;
+	struct ipa_mem_buffer mem;
+	struct IpaHwMemCopyData_t *cmd;
+
+	IPADBG("dest 0x%pa src 0x%pa len %d\n", &dest, &src, len);
+	mem.size = sizeof(cmd);
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+	cmd = (struct IpaHwMemCopyData_t *)mem.base;
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->destination_addr = dest;
+	cmd->dest_buffer_size = len;
+	cmd->source_addr = src;
+	cmd->source_buffer_size = len;
+	res = ipa3_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MEMCPY, 0,
+		true, 10 * HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		goto free_coherent;
+	}
+
+	res = 0;
+free_coherent:
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+	return res;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c
new file mode 100644
index 0000000..7949d91
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c
@@ -0,0 +1,962 @@
+/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa.h>
+#include "ipa_i.h"
+
+/* MHI uC interface definitions */
+#define IPA_HW_INTERFACE_MHI_VERSION            0x0004
+
+#define IPA_HW_MAX_NUMBER_OF_CHANNELS	2
+#define IPA_HW_MAX_NUMBER_OF_EVENTRINGS	2
+#define IPA_HW_MAX_CHANNEL_HANDLE	(IPA_HW_MAX_NUMBER_OF_CHANNELS-1)
+
+/**
+ * Values that represent the MHI commands from CPU to IPA HW.
+ * @IPA_CPU_2_HW_CMD_MHI_INIT: Initialize HW to be ready for MHI processing.
+ *	Once operation was completed HW shall respond with
+ *	IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED.
+ * @IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL: Initialize specific channel to be ready
+ *	to serve MHI transfers. Once initialization was completed HW shall
+ *	respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE.
+ *		IPA_HW_MHI_CHANNEL_STATE_ENABLE
+ * @IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI: Update MHI MSI interrupts data.
+ *	Once operation was completed HW shall respond with
+ *	IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED.
+ * @IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE: Change specific channel
+ *	processing state following host request. Once operation was completed
+ *	HW shall respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE.
+ * @IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO: Info related to DL UL syncronization.
+ * @IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE: Cmd to stop event ring processing.
+ */
+enum ipa_cpu_2_hw_mhi_commands {
+	IPA_CPU_2_HW_CMD_MHI_INIT
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+	IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1),
+	IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2),
+	IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 3),
+	IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4),
+	IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5)
+};
+
+/**
+ * Values that represent MHI related HW responses to CPU commands.
+ * @IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE: Response to
+ *	IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL or
+ *	IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE commands.
+ */
+enum ipa_hw_2_cpu_mhi_responses {
+	IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+};
+
+/**
+ * Values that represent MHI related HW event to be sent to CPU.
+ * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR: Event specify the device detected an
+ *	error in an element from the transfer ring associated with the channel
+ * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST: Event specify a bam
+ *	interrupt was asserted when MHI engine is suspended
+ */
+enum ipa_hw_2_cpu_mhi_events {
+	IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+	IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1),
+};
+
+/**
+ * Channel error types.
+ * @IPA_HW_CHANNEL_ERROR_NONE: No error persists.
+ * @IPA_HW_CHANNEL_INVALID_RE_ERROR: Invalid Ring Element was detected
+ */
+enum ipa_hw_channel_errors {
+	IPA_HW_CHANNEL_ERROR_NONE,
+	IPA_HW_CHANNEL_INVALID_RE_ERROR
+};
+
+/**
+ * MHI error types.
+ * @IPA_HW_INVALID_MMIO_ERROR: Invalid data read from MMIO space
+ * @IPA_HW_INVALID_CHANNEL_ERROR: Invalid data read from channel context array
+ * @IPA_HW_INVALID_EVENT_ERROR: Invalid data read from event ring context array
+ * @IPA_HW_NO_ED_IN_RING_ERROR: No event descriptors are available to report on
+ *	secondary event ring
+ * @IPA_HW_LINK_ERROR: Link error
+ */
+enum ipa_hw_mhi_errors {
+	IPA_HW_INVALID_MMIO_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+	IPA_HW_INVALID_CHANNEL_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1),
+	IPA_HW_INVALID_EVENT_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2),
+	IPA_HW_NO_ED_IN_RING_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4),
+	IPA_HW_LINK_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5),
+};
+
+
+/**
+ * Structure referring to the common and MHI section of 128B shared memory
+ * located in offset zero of SW Partition in IPA SRAM.
+ * The shared memory is used for communication between IPA HW and CPU.
+ * @common: common section in IPA SRAM
+ * @interfaceVersionMhi: The MHI interface version as reported by HW
+ * @mhiState: Overall MHI state
+ * @reserved_2B: reserved
+ * @mhiCnl0State: State of MHI channel 0.
+ *	The state carries information regarding the error type.
+ *	See IPA_HW_MHI_CHANNEL_STATES.
+ * @mhiCnl0State: State of MHI channel 1.
+ * @mhiCnl0State: State of MHI channel 2.
+ * @mhiCnl0State: State of MHI channel 3
+ * @mhiCnl0State: State of MHI channel 4.
+ * @mhiCnl0State: State of MHI channel 5.
+ * @mhiCnl0State: State of MHI channel 6.
+ * @mhiCnl0State: State of MHI channel 7.
+ * @reserved_37_34: reserved
+ * @reserved_3B_38: reserved
+ * @reserved_3F_3C: reserved
+ */
+struct IpaHwSharedMemMhiMapping_t {
+	struct IpaHwSharedMemCommonMapping_t common;
+	u16 interfaceVersionMhi;
+	u8 mhiState;
+	u8 reserved_2B;
+	u8 mhiCnl0State;
+	u8 mhiCnl1State;
+	u8 mhiCnl2State;
+	u8 mhiCnl3State;
+	u8 mhiCnl4State;
+	u8 mhiCnl5State;
+	u8 mhiCnl6State;
+	u8 mhiCnl7State;
+	u32 reserved_37_34;
+	u32 reserved_3B_38;
+	u32 reserved_3F_3C;
+};
+
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT command.
+ * Parameters are sent as pointer thus should be reside in address accessible
+ * to HW.
+ * @msiAddress: The MSI base (in device space) used for asserting the interrupt
+ *	(MSI) associated with the event ring
+ * mmioBaseAddress: The address (in device space) of MMIO structure in
+ *	host space
+ * deviceMhiCtrlBaseAddress: Base address of the memory region in the device
+ *	address space where the MHI control data structures are allocated by
+ *	the host, including channel context array, event context array,
+ *	and rings. This value is used for host/device address translation.
+ * deviceMhiDataBaseAddress: Base address of the memory region in the device
+ *	address space where the MHI data buffers are allocated by the host.
+ *	This value is used for host/device address translation.
+ * firstChannelIndex: First channel ID. Doorbell 0 is mapped to this channel
+ * firstEventRingIndex: First event ring ID. Doorbell 16 is mapped to this
+ *	event ring.
+ */
+struct IpaHwMhiInitCmdData_t {
+	u32 msiAddress;
+	u32 mmioBaseAddress;
+	u32 deviceMhiCtrlBaseAddress;
+	u32 deviceMhiDataBaseAddress;
+	u32 firstChannelIndex;
+	u32 firstEventRingIndex;
+};
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL
+ *	command. Parameters are sent as 32b immediate parameters.
+ * @hannelHandle: The channel identifier as allocated by driver.
+ *	value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @contexArrayIndex: Unique index for channels, between 0 and 255. The index is
+ *	used as an index in channel context array structures.
+ * @bamPipeId: The BAM pipe number for pipe dedicated for this channel
+ * @channelDirection: The direction of the channel as defined in the channel
+ *	type field (CHTYPE) in the channel context data structure.
+ * @reserved: reserved.
+ */
+union IpaHwMhiInitChannelCmdData_t {
+	struct IpaHwMhiInitChannelCmdParams_t {
+		u32 channelHandle:8;
+		u32 contexArrayIndex:8;
+		u32 bamPipeId:6;
+		u32 channelDirection:2;
+		u32 reserved:8;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI command.
+ * @msiAddress_low: The MSI lower base addr (in device space) used for asserting
+ *	the interrupt (MSI) associated with the event ring.
+ * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting
+ *	the interrupt (MSI) associated with the event ring.
+ * @msiMask: Mask indicating number of messages assigned by the host to device
+ * @msiData: Data Pattern to use when generating the MSI
+ */
+struct IpaHwMhiMsiCmdData_t {
+	u32 msiAddress_low;
+	u32 msiAddress_hi;
+	u32 msiMask;
+	u32 msiData;
+};
+
+/**
+ * Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE command.
+ * Parameters are sent as 32b immediate parameters.
+ * @requestedState: The requested channel state as was indicated from Host.
+ *	Use IPA_HW_MHI_CHANNEL_STATES to specify the requested state
+ * @channelHandle: The channel identifier as allocated by driver.
+ *	value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @LPTransitionRejected: Indication that low power state transition was
+ *	rejected
+ * @reserved: reserved
+ */
+union IpaHwMhiChangeChannelStateCmdData_t {
+	struct IpaHwMhiChangeChannelStateCmdParams_t {
+		u32 requestedState:8;
+		u32 channelHandle:8;
+		u32 LPTransitionRejected:8;
+		u32 reserved:8;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ *	IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE command.
+ * Parameters are sent as 32b immediate parameters.
+ * @channelHandle: The channel identifier as allocated by driver.
+ *	value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @reserved: reserved
+ */
+union IpaHwMhiStopEventUpdateData_t {
+	struct IpaHwMhiStopEventUpdateDataParams_t {
+		u32 channelHandle:8;
+		u32 reserved:24;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ *	IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE response.
+ * Parameters are sent as 32b immediate parameters.
+ * @state: The new channel state. In case state is not as requested this is
+ *	error indication for the last command
+ * @channelHandle: The channel identifier
+ * @additonalParams: For stop: the number of pending bam descriptors currently
+ *	queued
+*/
+union IpaHwMhiChangeChannelStateResponseData_t {
+	struct IpaHwMhiChangeChannelStateResponseParams_t {
+		u32 state:8;
+		u32 channelHandle:8;
+		u32 additonalParams:16;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ *	IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR event.
+ * Parameters are sent as 32b immediate parameters.
+ * @errorType: Type of error - IPA_HW_CHANNEL_ERRORS
+ * @channelHandle: The channel identifier as allocated by driver.
+ *	value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @reserved: reserved
+ */
+union IpaHwMhiChannelErrorEventData_t {
+	struct IpaHwMhiChannelErrorEventParams_t {
+		u32 errorType:8;
+		u32 channelHandle:8;
+		u32 reserved:16;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ *	IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST event.
+ * Parameters are sent as 32b immediate parameters.
+ * @channelHandle: The channel identifier as allocated by driver.
+ *	value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @reserved: reserved
+ */
+union IpaHwMhiChannelWakeupEventData_t {
+	struct IpaHwMhiChannelWakeupEventParams_t {
+		u32 channelHandle:8;
+		u32 reserved:24;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the MHI Common statistics
+ * @numULDLSync: Number of times UL activity trigged due to DL activity
+ * @numULTimerExpired: Number of times UL Accm Timer expired
+ */
+struct IpaHwStatsMhiCmnInfoData_t {
+	u32 numULDLSync;
+	u32 numULTimerExpired;
+	u32 numChEvCtxWpRead;
+	u32 reserved;
+};
+
+/**
+ * Structure holding the MHI Channel statistics
+ * @doorbellInt: The number of doorbell int
+ * @reProccesed: The number of ring elements processed
+ * @bamFifoFull: Number of times Bam Fifo got full
+ * @bamFifoEmpty: Number of times Bam Fifo got empty
+ * @bamFifoUsageHigh: Number of times Bam fifo usage went above 75%
+ * @bamFifoUsageLow: Number of times Bam fifo usage went below 25%
+ * @bamInt: Number of BAM Interrupts
+ * @ringFull: Number of times Transfer Ring got full
+ * @ringEmpty: umber of times Transfer Ring got empty
+ * @ringUsageHigh: Number of times Transfer Ring usage went above 75%
+ * @ringUsageLow: Number of times Transfer Ring usage went below 25%
+ * @delayedMsi: Number of times device triggered MSI to host after
+ *	Interrupt Moderation Timer expiry
+ * @immediateMsi: Number of times device triggered MSI to host immediately
+ * @thresholdMsi: Number of times device triggered MSI due to max pending
+ *	events threshold reached
+ * @numSuspend: Number of times channel was suspended
+ * @numResume: Number of times channel was suspended
+ * @num_OOB: Number of times we indicated that we are OOB
+ * @num_OOB_timer_expiry: Number of times we indicated that we are OOB
+ *	after timer expiry
+ * @num_OOB_moderation_timer_start: Number of times we started timer after
+ *	sending OOB and hitting OOB again before we processed threshold
+ *	number of packets
+ * @num_db_mode_evt: Number of times we indicated that we are in Doorbell mode
+ */
+struct IpaHwStatsMhiCnlInfoData_t {
+	u32 doorbellInt;
+	u32 reProccesed;
+	u32 bamFifoFull;
+	u32 bamFifoEmpty;
+	u32 bamFifoUsageHigh;
+	u32 bamFifoUsageLow;
+	u32 bamInt;
+	u32 ringFull;
+	u32 ringEmpty;
+	u32 ringUsageHigh;
+	u32 ringUsageLow;
+	u32 delayedMsi;
+	u32 immediateMsi;
+	u32 thresholdMsi;
+	u32 numSuspend;
+	u32 numResume;
+	u32 num_OOB;
+	u32 num_OOB_timer_expiry;
+	u32 num_OOB_moderation_timer_start;
+	u32 num_db_mode_evt;
+};
+
+/**
+ * Structure holding the MHI statistics
+ * @mhiCmnStats: Stats pertaining to MHI
+ * @mhiCnlStats: Stats pertaining to each channel
+ */
+struct IpaHwStatsMhiInfoData_t {
+	struct IpaHwStatsMhiCmnInfoData_t mhiCmnStats;
+	struct IpaHwStatsMhiCnlInfoData_t mhiCnlStats[
+						IPA_HW_MAX_NUMBER_OF_CHANNELS];
+};
+
+/**
+ * Structure holding the MHI Common Config info
+ * @isDlUlSyncEnabled: Flag to indicate if DL-UL synchronization is enabled
+ * @UlAccmVal: Out Channel(UL) accumulation time in ms when DL UL Sync is
+ *	enabled
+ * @ulMsiEventThreshold: Threshold at which HW fires MSI to host for UL events
+ * @dlMsiEventThreshold: Threshold at which HW fires MSI to host for DL events
+ */
+struct IpaHwConfigMhiCmnInfoData_t {
+	u8 isDlUlSyncEnabled;
+	u8 UlAccmVal;
+	u8 ulMsiEventThreshold;
+	u8 dlMsiEventThreshold;
+};
+
+/**
+ * Structure holding the parameters for MSI info data
+ * @msiAddress_low: The MSI lower base addr (in device space) used for asserting
+ *	the interrupt (MSI) associated with the event ring.
+ * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting
+ *	the interrupt (MSI) associated with the event ring.
+ * @msiMask: Mask indicating number of messages assigned by the host to device
+ * @msiData: Data Pattern to use when generating the MSI
+ */
+struct IpaHwConfigMhiMsiInfoData_t {
+	u32 msiAddress_low;
+	u32 msiAddress_hi;
+	u32 msiMask;
+	u32 msiData;
+};
+
+/**
+ * Structure holding the MHI Channel Config info
+ * @transferRingSize: The Transfer Ring size in terms of Ring Elements
+ * @transferRingIndex: The Transfer Ring channel number as defined by host
+ * @eventRingIndex: The Event Ring Index associated with this Transfer Ring
+ * @bamPipeIndex: The BAM Pipe associated with this channel
+ * @isOutChannel: Indication for the direction of channel
+ * @reserved_0: Reserved byte for maintaining 4byte alignment
+ * @reserved_1: Reserved byte for maintaining 4byte alignment
+ */
+struct IpaHwConfigMhiCnlInfoData_t {
+	u16 transferRingSize;
+	u8  transferRingIndex;
+	u8  eventRingIndex;
+	u8  bamPipeIndex;
+	u8  isOutChannel;
+	u8  reserved_0;
+	u8  reserved_1;
+};
+
+/**
+ * Structure holding the MHI Event Config info
+ * @msiVec: msi vector to invoke MSI interrupt
+ * @intmodtValue: Interrupt moderation timer (in milliseconds)
+ * @eventRingSize: The Event Ring size in terms of Ring Elements
+ * @eventRingIndex: The Event Ring number as defined by host
+ * @reserved_0: Reserved byte for maintaining 4byte alignment
+ * @reserved_1: Reserved byte for maintaining 4byte alignment
+ * @reserved_2: Reserved byte for maintaining 4byte alignment
+ */
+struct IpaHwConfigMhiEventInfoData_t {
+	u32 msiVec;
+	u16 intmodtValue;
+	u16 eventRingSize;
+	u8  eventRingIndex;
+	u8  reserved_0;
+	u8  reserved_1;
+	u8  reserved_2;
+};
+
+/**
+ * Structure holding the MHI Config info
+ * @mhiCmnCfg: Common Config pertaining to MHI
+ * @mhiMsiCfg: Config pertaining to MSI config
+ * @mhiCnlCfg: Config pertaining to each channel
+ * @mhiEvtCfg: Config pertaining to each event Ring
+ */
+struct IpaHwConfigMhiInfoData_t {
+	struct IpaHwConfigMhiCmnInfoData_t mhiCmnCfg;
+	struct IpaHwConfigMhiMsiInfoData_t mhiMsiCfg;
+	struct IpaHwConfigMhiCnlInfoData_t mhiCnlCfg[
+						IPA_HW_MAX_NUMBER_OF_CHANNELS];
+	struct IpaHwConfigMhiEventInfoData_t mhiEvtCfg[
+					IPA_HW_MAX_NUMBER_OF_EVENTRINGS];
+};
+
+
+struct ipa3_uc_mhi_ctx {
+	u8 expected_responseOp;
+	u32 expected_responseParams;
+	void (*ready_cb)(void);
+	void (*wakeup_request_cb)(void);
+	u32 mhi_uc_stats_ofst;
+	struct IpaHwStatsMhiInfoData_t *mhi_uc_stats_mmio;
+};
+
+#define PRINT_COMMON_STATS(x) \
+	(nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \
+	#x "=0x%x\n", ipa3_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCmnStats.x))
+
+#define PRINT_CHANNEL_STATS(ch, x) \
+	(nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \
+	#x "=0x%x\n", ipa3_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCnlStats[ch].x))
+
+struct ipa3_uc_mhi_ctx *ipa3_uc_mhi_ctx;
+
+static int ipa3_uc_mhi_response_hdlr(struct IpaHwSharedMemCommonMapping_t
+	*uc_sram_mmio, u32 *uc_status)
+{
+	IPADBG("responseOp=%d\n", uc_sram_mmio->responseOp);
+	if (uc_sram_mmio->responseOp == ipa3_uc_mhi_ctx->expected_responseOp &&
+	    uc_sram_mmio->responseParams ==
+	    ipa3_uc_mhi_ctx->expected_responseParams) {
+		*uc_status = 0;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static void ipa3_uc_mhi_event_hdlr(struct IpaHwSharedMemCommonMapping_t
+	*uc_sram_mmio)
+{
+	if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+	    IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR) {
+		union IpaHwMhiChannelErrorEventData_t evt;
+
+		IPAERR("Channel error\n");
+		evt.raw32b = uc_sram_mmio->eventParams;
+		IPAERR("errorType=%d channelHandle=%d reserved=%d\n",
+			evt.params.errorType, evt.params.channelHandle,
+			evt.params.reserved);
+	} else if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+		   IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST) {
+		union IpaHwMhiChannelWakeupEventData_t evt;
+
+		IPADBG("WakeUp channel request\n");
+		evt.raw32b = uc_sram_mmio->eventParams;
+		IPADBG("channelHandle=%d reserved=%d\n",
+			evt.params.channelHandle, evt.params.reserved);
+		ipa3_uc_mhi_ctx->wakeup_request_cb();
+	}
+}
+
+static void ipa3_uc_mhi_event_log_info_hdlr(
+	struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+
+{
+	if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_MHI)) == 0) {
+		IPAERR("MHI feature missing 0x%x\n",
+			uc_event_top_mmio->featureMask);
+		return;
+	}
+
+	if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_MHI].
+		params.size != sizeof(struct IpaHwStatsMhiInfoData_t)) {
+		IPAERR("mhi stats sz invalid exp=%zu is=%u\n",
+			sizeof(struct IpaHwStatsMhiInfoData_t),
+			uc_event_top_mmio->statsInfo.
+			featureInfo[IPA_HW_FEATURE_MHI].params.size);
+		return;
+	}
+
+	ipa3_uc_mhi_ctx->mhi_uc_stats_ofst = uc_event_top_mmio->
+		statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
+		featureInfo[IPA_HW_FEATURE_MHI].params.offset;
+	IPAERR("MHI stats ofst=0x%x\n", ipa3_uc_mhi_ctx->mhi_uc_stats_ofst);
+	if (ipa3_uc_mhi_ctx->mhi_uc_stats_ofst +
+		sizeof(struct IpaHwStatsMhiInfoData_t) >=
+		ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
+		ipa3_ctx->smem_sz) {
+		IPAERR("uc_mhi_stats 0x%x outside SRAM\n",
+			ipa3_uc_mhi_ctx->mhi_uc_stats_ofst);
+		return;
+	}
+
+	ipa3_uc_mhi_ctx->mhi_uc_stats_mmio =
+		ioremap(ipa3_ctx->ipa_wrapper_base +
+		ipa3_uc_mhi_ctx->mhi_uc_stats_ofst,
+		sizeof(struct IpaHwStatsMhiInfoData_t));
+	if (!ipa3_uc_mhi_ctx->mhi_uc_stats_mmio) {
+		IPAERR("fail to ioremap uc mhi stats\n");
+		return;
+	}
+}
+
+int ipa3_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void))
+{
+	struct ipa3_uc_hdlrs hdlrs;
+
+	if (ipa3_uc_mhi_ctx) {
+		IPAERR("Already initialized\n");
+		return -EFAULT;
+	}
+
+	ipa3_uc_mhi_ctx = kzalloc(sizeof(*ipa3_uc_mhi_ctx), GFP_KERNEL);
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("no mem\n");
+		return -ENOMEM;
+	}
+
+	ipa3_uc_mhi_ctx->ready_cb = ready_cb;
+	ipa3_uc_mhi_ctx->wakeup_request_cb = wakeup_request_cb;
+
+	memset(&hdlrs, 0, sizeof(hdlrs));
+	hdlrs.ipa_uc_loaded_hdlr = ipa3_uc_mhi_ctx->ready_cb;
+	hdlrs.ipa3_uc_response_hdlr = ipa3_uc_mhi_response_hdlr;
+	hdlrs.ipa_uc_event_hdlr = ipa3_uc_mhi_event_hdlr;
+	hdlrs.ipa_uc_event_log_info_hdlr = ipa3_uc_mhi_event_log_info_hdlr;
+	ipa3_uc_register_handlers(IPA_HW_FEATURE_MHI, &hdlrs);
+
+	IPADBG("Done\n");
+	return 0;
+}
+
+void ipa3_uc_mhi_cleanup(void)
+{
+	struct ipa3_uc_hdlrs null_hdlrs = { 0 };
+
+	IPADBG("Enter\n");
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("ipa3_uc_mhi_ctx is not initialized\n");
+		return;
+	}
+	ipa3_uc_register_handlers(IPA_HW_FEATURE_MHI, &null_hdlrs);
+	kfree(ipa3_uc_mhi_ctx);
+	ipa3_uc_mhi_ctx = NULL;
+
+	IPADBG("Done\n");
+}
+
+int ipa3_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
+	u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx,
+	u32 first_evt_idx)
+{
+	int res;
+	struct ipa_mem_buffer mem;
+	struct IpaHwMhiInitCmdData_t *init_cmd_data;
+	struct IpaHwMhiMsiCmdData_t *msi_cmd;
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	res = ipa3_uc_update_hw_flags(0);
+	if (res) {
+		IPAERR("ipa3_uc_update_hw_flags failed %d\n", res);
+		goto disable_clks;
+	}
+
+	mem.size = sizeof(*init_cmd_data);
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		res = -ENOMEM;
+		goto disable_clks;
+	}
+	memset(mem.base, 0, mem.size);
+	init_cmd_data = (struct IpaHwMhiInitCmdData_t *)mem.base;
+	init_cmd_data->msiAddress = msi->addr_low;
+	init_cmd_data->mmioBaseAddress = mmio_addr;
+	init_cmd_data->deviceMhiCtrlBaseAddress = host_ctrl_addr;
+	init_cmd_data->deviceMhiDataBaseAddress = host_data_addr;
+	init_cmd_data->firstChannelIndex = first_ch_idx;
+	init_cmd_data->firstEventRingIndex = first_evt_idx;
+	res = ipa3_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MHI_INIT, 0,
+		false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
+			mem.phys_base);
+		goto disable_clks;
+	}
+
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+	mem.size = sizeof(*msi_cmd);
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		res = -ENOMEM;
+		goto disable_clks;
+	}
+
+	msi_cmd = (struct IpaHwMhiMsiCmdData_t *)mem.base;
+	msi_cmd->msiAddress_hi = msi->addr_hi;
+	msi_cmd->msiAddress_low = msi->addr_low;
+	msi_cmd->msiData = msi->data;
+	msi_cmd->msiMask = msi->mask;
+	res = ipa3_uc_send_cmd((u32)mem.phys_base,
+		IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
+			mem.phys_base);
+		goto disable_clks;
+	}
+
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+	res = 0;
+
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+
+}
+
+int ipa3_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle,
+	int contexArrayIndex, int channelDirection)
+
+{
+	int res;
+	union IpaHwMhiInitChannelCmdData_t init_cmd;
+	union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	if (ipa_ep_idx < 0  || ipa_ep_idx >= ipa3_ctx->ipa_num_pipes) {
+		IPAERR("Invalid ipa_ep_idx.\n");
+		return -EINVAL;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&uc_rsp, 0, sizeof(uc_rsp));
+	uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+	uc_rsp.params.channelHandle = channelHandle;
+	ipa3_uc_mhi_ctx->expected_responseOp =
+		IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+	ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+	memset(&init_cmd, 0, sizeof(init_cmd));
+	init_cmd.params.channelHandle = channelHandle;
+	init_cmd.params.contexArrayIndex = contexArrayIndex;
+	init_cmd.params.bamPipeId = ipa_ep_idx;
+	init_cmd.params.channelDirection = channelDirection;
+
+	res = ipa3_uc_send_cmd(init_cmd.raw32b,
+		IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+
+int ipa3_uc_mhi_reset_channel(int channelHandle)
+{
+	union IpaHwMhiChangeChannelStateCmdData_t cmd;
+	union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+	int res;
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&uc_rsp, 0, sizeof(uc_rsp));
+	uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
+	uc_rsp.params.channelHandle = channelHandle;
+	ipa3_uc_mhi_ctx->expected_responseOp =
+		IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+	ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
+	cmd.params.channelHandle = channelHandle;
+	res = ipa3_uc_send_cmd(cmd.raw32b,
+		IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa3_uc_mhi_suspend_channel(int channelHandle)
+{
+	union IpaHwMhiChangeChannelStateCmdData_t cmd;
+	union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+	int res;
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&uc_rsp, 0, sizeof(uc_rsp));
+	uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
+	uc_rsp.params.channelHandle = channelHandle;
+	ipa3_uc_mhi_ctx->expected_responseOp =
+		IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+	ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
+	cmd.params.channelHandle = channelHandle;
+	res = ipa3_uc_send_cmd(cmd.raw32b,
+		IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa3_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected)
+{
+	union IpaHwMhiChangeChannelStateCmdData_t cmd;
+	union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+	int res;
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&uc_rsp, 0, sizeof(uc_rsp));
+	uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+	uc_rsp.params.channelHandle = channelHandle;
+	ipa3_uc_mhi_ctx->expected_responseOp =
+		IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+	ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_RUN;
+	cmd.params.channelHandle = channelHandle;
+	cmd.params.LPTransitionRejected = LPTransitionRejected;
+	res = ipa3_uc_send_cmd(cmd.raw32b,
+		IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa3_uc_mhi_stop_event_update_channel(int channelHandle)
+{
+	union IpaHwMhiStopEventUpdateData_t cmd;
+	int res;
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.params.channelHandle = channelHandle;
+
+	ipa3_uc_mhi_ctx->expected_responseOp =
+		IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE;
+	ipa3_uc_mhi_ctx->expected_responseParams = cmd.raw32b;
+
+	res = ipa3_uc_send_cmd(cmd.raw32b,
+		IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd)
+{
+	int res;
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	IPADBG("isDlUlSyncEnabled=0x%x UlAccmVal=0x%x\n",
+		cmd->params.isDlUlSyncEnabled, cmd->params.UlAccmVal);
+	IPADBG("ulMsiEventThreshold=0x%x dlMsiEventThreshold=0x%x\n",
+		cmd->params.ulMsiEventThreshold,
+		cmd->params.dlMsiEventThreshold);
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	res = ipa3_uc_send_cmd(cmd->raw32b,
+		IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa3_uc_mhi_print_stats(char *dbg_buff, int size)
+{
+	int nBytes = 0;
+	int i;
+
+	if (!ipa3_uc_mhi_ctx->mhi_uc_stats_mmio) {
+		IPAERR("MHI uc stats is not valid\n");
+		return 0;
+	}
+
+	nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes,
+		"Common Stats:\n");
+	PRINT_COMMON_STATS(numULDLSync);
+	PRINT_COMMON_STATS(numULTimerExpired);
+	PRINT_COMMON_STATS(numChEvCtxWpRead);
+
+	for (i = 0; i < IPA_HW_MAX_NUMBER_OF_CHANNELS; i++) {
+		nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes,
+			"Channel %d Stats:\n", i);
+		PRINT_CHANNEL_STATS(i, doorbellInt);
+		PRINT_CHANNEL_STATS(i, reProccesed);
+		PRINT_CHANNEL_STATS(i, bamFifoFull);
+		PRINT_CHANNEL_STATS(i, bamFifoEmpty);
+		PRINT_CHANNEL_STATS(i, bamFifoUsageHigh);
+		PRINT_CHANNEL_STATS(i, bamFifoUsageLow);
+		PRINT_CHANNEL_STATS(i, bamInt);
+		PRINT_CHANNEL_STATS(i, ringFull);
+		PRINT_CHANNEL_STATS(i, ringEmpty);
+		PRINT_CHANNEL_STATS(i, ringUsageHigh);
+		PRINT_CHANNEL_STATS(i, ringUsageLow);
+		PRINT_CHANNEL_STATS(i, delayedMsi);
+		PRINT_CHANNEL_STATS(i, immediateMsi);
+		PRINT_CHANNEL_STATS(i, thresholdMsi);
+		PRINT_CHANNEL_STATS(i, numSuspend);
+		PRINT_CHANNEL_STATS(i, numResume);
+		PRINT_CHANNEL_STATS(i, num_OOB);
+		PRINT_CHANNEL_STATS(i, num_OOB_timer_expiry);
+		PRINT_CHANNEL_STATS(i, num_OOB_moderation_timer_start);
+		PRINT_CHANNEL_STATS(i, num_db_mode_evt);
+	}
+
+	return nBytes;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
new file mode 100644
index 0000000..7b89184
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
@@ -0,0 +1,410 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "ipa_i.h"
+
+#define IPA_UC_NTN_DB_PA_TX 0x79620DC
+#define IPA_UC_NTN_DB_PA_RX 0x79620D8
+
+static void ipa3_uc_ntn_event_handler(struct IpaHwSharedMemCommonMapping_t
+				     *uc_sram_mmio)
+
+{
+	union Ipa3HwNTNErrorEventData_t ntn_evt;
+
+	if (uc_sram_mmio->eventOp ==
+		IPA_HW_2_CPU_EVENT_NTN_ERROR) {
+		ntn_evt.raw32b = uc_sram_mmio->eventParams;
+		IPADBG("uC NTN evt errType=%u pipe=%d cherrType=%u\n",
+			   ntn_evt.params.ntn_error_type,
+			   ntn_evt.params.ipa_pipe_number,
+			   ntn_evt.params.ntn_ch_err_type);
+	}
+}
+
+static void ipa3_uc_ntn_event_log_info_handler(
+struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+
+{
+	if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_NTN)) == 0) {
+		IPAERR("NTN feature missing 0x%x\n",
+			uc_event_top_mmio->featureMask);
+		return;
+	}
+
+	if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_NTN].
+		params.size != sizeof(struct Ipa3HwStatsNTNInfoData_t)) {
+		IPAERR("NTN stats sz invalid exp=%zu is=%u\n",
+			   sizeof(struct Ipa3HwStatsNTNInfoData_t),
+			   uc_event_top_mmio->statsInfo.
+			   featureInfo[IPA_HW_FEATURE_NTN].params.size);
+		return;
+	}
+
+	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst = uc_event_top_mmio->
+		statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
+		featureInfo[IPA_HW_FEATURE_NTN].params.offset;
+	IPAERR("NTN stats ofst=0x%x\n", ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+	if (ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst +
+		sizeof(struct Ipa3HwStatsNTNInfoData_t) >=
+		ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
+		ipa3_ctx->smem_sz) {
+		IPAERR("uc_ntn_stats 0x%x outside SRAM\n",
+			   ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+		return;
+	}
+
+	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio =
+		ioremap(ipa3_ctx->ipa_wrapper_base +
+		ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst,
+		sizeof(struct Ipa3HwStatsNTNInfoData_t));
+	if (!ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+		IPAERR("fail to ioremap uc ntn stats\n");
+		return;
+	}
+}
+
+/**
+ * ipa2_get_wdi_stats() - Query WDI statistics from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats)
+{
+#define TX_STATS(y) stats->tx_ch_stats[0].y = \
+	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) stats->rx_ch_stats[0].y = \
+	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+	if (unlikely(!ipa3_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (!stats || !ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+		IPAERR("bad parms stats=%p ntn_stats=%p\n",
+			stats,
+			ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio);
+		return -EINVAL;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	TX_STATS(num_pkts_processed);
+	TX_STATS(tail_ptr_val);
+	TX_STATS(num_db_fired);
+	TX_STATS(tx_comp_ring_stats.ringFull);
+	TX_STATS(tx_comp_ring_stats.ringEmpty);
+	TX_STATS(tx_comp_ring_stats.ringUsageHigh);
+	TX_STATS(tx_comp_ring_stats.ringUsageLow);
+	TX_STATS(tx_comp_ring_stats.RingUtilCount);
+	TX_STATS(bam_stats.bamFifoFull);
+	TX_STATS(bam_stats.bamFifoEmpty);
+	TX_STATS(bam_stats.bamFifoUsageHigh);
+	TX_STATS(bam_stats.bamFifoUsageLow);
+	TX_STATS(bam_stats.bamUtilCount);
+	TX_STATS(num_db);
+	TX_STATS(num_unexpected_db);
+	TX_STATS(num_bam_int_handled);
+	TX_STATS(num_bam_int_in_non_running_state);
+	TX_STATS(num_qmb_int_handled);
+	TX_STATS(num_bam_int_handled_while_wait_for_bam);
+	TX_STATS(num_bam_int_handled_while_not_in_bam);
+
+	RX_STATS(max_outstanding_pkts);
+	RX_STATS(num_pkts_processed);
+	RX_STATS(rx_ring_rp_value);
+	RX_STATS(rx_ind_ring_stats.ringFull);
+	RX_STATS(rx_ind_ring_stats.ringEmpty);
+	RX_STATS(rx_ind_ring_stats.ringUsageHigh);
+	RX_STATS(rx_ind_ring_stats.ringUsageLow);
+	RX_STATS(rx_ind_ring_stats.RingUtilCount);
+	RX_STATS(bam_stats.bamFifoFull);
+	RX_STATS(bam_stats.bamFifoEmpty);
+	RX_STATS(bam_stats.bamFifoUsageHigh);
+	RX_STATS(bam_stats.bamFifoUsageLow);
+	RX_STATS(bam_stats.bamUtilCount);
+	RX_STATS(num_bam_int_handled);
+	RX_STATS(num_db);
+	RX_STATS(num_unexpected_db);
+	RX_STATS(num_pkts_in_dis_uninit_state);
+	RX_STATS(num_bam_int_handled_while_not_in_bam);
+	RX_STATS(num_bam_int_handled_while_in_bam_state);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+int ipa3_ntn_init(void)
+{
+	struct ipa3_uc_hdlrs uc_ntn_cbs = { 0 };
+
+	uc_ntn_cbs.ipa_uc_event_hdlr = ipa3_uc_ntn_event_handler;
+	uc_ntn_cbs.ipa_uc_event_log_info_hdlr =
+		ipa3_uc_ntn_event_log_info_handler;
+
+	ipa3_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs);
+
+	return 0;
+}
+
+static int ipa3_uc_send_ntn_setup_pipe_cmd(
+	struct ipa_ntn_setup_info *ntn_info, u8 dir)
+{
+	int ipa_ep_idx;
+	int result = 0;
+	struct ipa_mem_buffer cmd;
+	struct Ipa3HwNtnSetUpCmdData_t *Ntn_params;
+	struct IpaHwOffloadSetUpCmdData_t *cmd_data;
+
+	if (ntn_info == NULL) {
+		IPAERR("invalid input\n");
+		return -EINVAL;
+	}
+
+	ipa_ep_idx = ipa_get_ep_mapping(ntn_info->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("fail to get ep idx.\n");
+		return -EFAULT;
+	}
+
+	IPADBG("client=%d ep=%d\n", ntn_info->client, ipa_ep_idx);
+
+	IPADBG("ring_base_pa = 0x%pa\n",
+			&ntn_info->ring_base_pa);
+	IPADBG("ntn_ring_size = %d\n", ntn_info->ntn_ring_size);
+	IPADBG("buff_pool_base_pa = 0x%pa\n", &ntn_info->buff_pool_base_pa);
+	IPADBG("num_buffers = %d\n", ntn_info->num_buffers);
+	IPADBG("data_buff_size = %d\n", ntn_info->data_buff_size);
+	IPADBG("tail_ptr_base_pa = 0x%pa\n", &ntn_info->ntn_reg_base_ptr_pa);
+
+	cmd.size = sizeof(*cmd_data);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		IPAERR("fail to get DMA memory.\n");
+		return -ENOMEM;
+	}
+
+	cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base;
+	cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+	Ntn_params = &cmd_data->SetupCh_params.NtnSetupCh_params;
+	Ntn_params->ring_base_pa = ntn_info->ring_base_pa;
+	Ntn_params->buff_pool_base_pa = ntn_info->buff_pool_base_pa;
+	Ntn_params->ntn_ring_size = ntn_info->ntn_ring_size;
+	Ntn_params->num_buffers = ntn_info->num_buffers;
+	Ntn_params->ntn_reg_base_ptr_pa = ntn_info->ntn_reg_base_ptr_pa;
+	Ntn_params->data_buff_size = ntn_info->data_buff_size;
+	Ntn_params->ipa_pipe_number = ipa_ep_idx;
+	Ntn_params->dir = dir;
+
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP,
+				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+	if (result)
+		result = -EFAULT;
+
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	return result;
+}
+
+/**
+ * ipa3_setup_uc_ntn_pipes() - setup uc offload pipes
+ */
+int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+	ipa_notify_cb notify, void *priv, u8 hdr_len,
+	struct ipa_ntn_conn_out_params *outp)
+{
+	struct ipa3_ep_context *ep_ul;
+	struct ipa3_ep_context *ep_dl;
+	int ipa_ep_idx_ul;
+	int ipa_ep_idx_dl;
+	int result = 0;
+
+	if (in == NULL) {
+		IPAERR("invalid input\n");
+		return -EINVAL;
+	}
+
+	ipa_ep_idx_ul = ipa_get_ep_mapping(in->ul.client);
+	ipa_ep_idx_dl = ipa_get_ep_mapping(in->dl.client);
+	if (ipa_ep_idx_ul == -1 || ipa_ep_idx_dl == -1) {
+		IPAERR("fail to alloc EP.\n");
+		return -EFAULT;
+	}
+
+	ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul];
+	ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl];
+
+	if (ep_ul->valid || ep_dl->valid) {
+		IPAERR("EP already allocated.\n");
+		return -EFAULT;
+	}
+
+	memset(ep_ul, 0, offsetof(struct ipa3_ep_context, sys));
+	memset(ep_dl, 0, offsetof(struct ipa3_ep_context, sys));
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	/* setup ul ep cfg */
+	ep_ul->valid = 1;
+	ep_ul->client = in->ul.client;
+	result = ipa3_enable_data_path(ipa_ep_idx_ul);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx_ul);
+		return -EFAULT;
+	}
+	ep_ul->client_notify = notify;
+	ep_ul->priv = priv;
+
+	memset(&ep_ul->cfg, 0, sizeof(ep_ul->cfg));
+	ep_ul->cfg.nat.nat_en = IPA_SRC_NAT;
+	ep_ul->cfg.hdr.hdr_len = hdr_len;
+	ep_ul->cfg.mode.mode = IPA_BASIC;
+
+	if (ipa3_cfg_ep(ipa_ep_idx_ul, &ep_ul->cfg)) {
+		IPAERR("fail to setup ul pipe cfg\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->ul, IPA_NTN_RX_DIR)) {
+		IPAERR("fail to send cmd to uc for ul pipe\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	ipa3_install_dflt_flt_rules(ipa_ep_idx_ul);
+	outp->ul_uc_db_pa = IPA_UC_NTN_DB_PA_RX;
+	ep_ul->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+	IPADBG("client %d (ep: %d) connected\n", in->ul.client,
+		ipa_ep_idx_ul);
+
+	/* setup dl ep cfg */
+	ep_dl->valid = 1;
+	ep_dl->client = in->dl.client;
+	result = ipa3_enable_data_path(ipa_ep_idx_dl);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx_dl);
+		result = -EFAULT;
+		goto fail;
+	}
+
+	memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg));
+	ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT;
+	ep_dl->cfg.hdr.hdr_len = hdr_len;
+	ep_dl->cfg.mode.mode = IPA_BASIC;
+
+	if (ipa3_cfg_ep(ipa_ep_idx_dl, &ep_dl->cfg)) {
+		IPAERR("fail to setup dl pipe cfg\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->dl, IPA_NTN_TX_DIR)) {
+		IPAERR("fail to send cmd to uc for dl pipe\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX;
+	ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+	IPADBG("client %d (ep: %d) connected\n", in->dl.client,
+		ipa_ep_idx_dl);
+
+fail:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+}
+
+/**
+ * ipa3_tear_down_uc_offload_pipes() - tear down uc offload pipes
+ */
+
+int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
+		int ipa_ep_idx_dl)
+{
+	struct ipa_mem_buffer cmd;
+	struct ipa3_ep_context *ep_ul, *ep_dl;
+	struct IpaHwOffloadCommonChCmdData_t *cmd_data;
+	union Ipa3HwNtnCommonChCmdData_t *tear;
+	int result = 0;
+
+	IPADBG("ep_ul = %d\n", ipa_ep_idx_ul);
+	IPADBG("ep_dl = %d\n", ipa_ep_idx_dl);
+
+	ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul];
+	ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl];
+
+	if (ep_ul->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED ||
+		ep_dl->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED) {
+		IPAERR("channel bad state: ul %d dl %d\n",
+			ep_ul->uc_offload_state, ep_dl->uc_offload_state);
+		return -EFAULT;
+	}
+
+	cmd.size = sizeof(*cmd_data);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		IPAERR("fail to get DMA memory.\n");
+		return -ENOMEM;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	/* teardown the UL pipe */
+	cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base;
+	cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+	tear = &cmd_data->CommonCh_params.NtnCommonCh_params;
+	tear->params.ipa_pipe_number = ipa_ep_idx_ul;
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+	if (result) {
+		IPAERR("fail to tear down ul pipe\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	ipa3_disable_data_path(ipa_ep_idx_ul);
+	ipa3_delete_dflt_flt_rules(ipa_ep_idx_ul);
+	memset(&ipa3_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa3_ep_context));
+	IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
+
+	/* teardown the DL pipe */
+	tear->params.ipa_pipe_number = ipa_ep_idx_dl;
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+	if (result) {
+		IPAERR("fail to tear down ul pipe\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	ipa3_disable_data_path(ipa_ep_idx_dl);
+	memset(&ipa3_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa3_ep_context));
+	IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
+
+fail:
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
new file mode 100644
index 0000000..946fc7e
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
@@ -0,0 +1,580 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_UC_OFFLOAD_I_H_
+#define _IPA_UC_OFFLOAD_I_H_
+
+#include <linux/ipa.h>
+#include "ipa_i.h"
+
+/*
+ * Neutrino protocol related data structures
+ */
+
+#define IPA_UC_MAX_NTN_TX_CHANNELS 1
+#define IPA_UC_MAX_NTN_RX_CHANNELS 1
+
+#define IPA_NTN_TX_DIR 1
+#define IPA_NTN_RX_DIR 2
+
+/**
+ *  @brief   Enum value determined based on the feature it
+ *           corresponds to
+ *  +----------------+----------------+
+ *  |    3 bits      |     5 bits     |
+ *  +----------------+----------------+
+ *  |   HW_FEATURE   |     OPCODE     |
+ *  +----------------+----------------+
+ *
+ */
+#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode)
+#define EXTRACT_UC_FEATURE(value) (value >> 5)
+
+#define IPA_HW_NUM_FEATURES 0x8
+
+/**
+ * enum ipa3_hw_features - Values that represent the features supported
+ * in IPA HW
+ * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW
+ * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW
+ * @IPA_HW_FEATURE_POWER_COLLAPSE: Feature related to IPA Power collapse
+ * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW
+ * @IPA_HW_FEATURE_ZIP: Feature related to CMP/DCMP operation in IPA HW
+ * @IPA_HW_FEATURE_NTN : Feature related to NTN operation in IPA HW
+ * @IPA_HW_FEATURE_OFFLOAD : Feature related to NTN operation in IPA HW
+*/
+enum ipa3_hw_features {
+	IPA_HW_FEATURE_COMMON		=	0x0,
+	IPA_HW_FEATURE_MHI		=	0x1,
+	IPA_HW_FEATURE_POWER_COLLAPSE	=	0x2,
+	IPA_HW_FEATURE_WDI		=	0x3,
+	IPA_HW_FEATURE_ZIP		=	0x4,
+	IPA_HW_FEATURE_NTN		=	0x5,
+	IPA_HW_FEATURE_OFFLOAD	=	0x6,
+	IPA_HW_FEATURE_MAX		=	IPA_HW_NUM_FEATURES
+};
+
+/**
+ * enum ipa3_hw_2_cpu_events - Values that represent HW event to be sent to CPU.
+ * @IPA_HW_2_CPU_EVENT_NO_OP : No event present
+ * @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the
+ *  device
+ * @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information
+ */
+enum ipa3_hw_2_cpu_events {
+	IPA_HW_2_CPU_EVENT_NO_OP     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+	IPA_HW_2_CPU_EVENT_ERROR     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+	IPA_HW_2_CPU_EVENT_LOG_INFO  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+};
+
+/**
+ * enum ipa3_hw_errors - Common error types.
+ * @IPA_HW_ERROR_NONE : No error persists
+ * @IPA_HW_INVALID_DOORBELL_ERROR : Invalid data read from doorbell
+ * @IPA_HW_DMA_ERROR : Unexpected DMA error
+ * @IPA_HW_FATAL_SYSTEM_ERROR : HW has crashed and requires reset.
+ * @IPA_HW_INVALID_OPCODE : Invalid opcode sent
+ * @IPA_HW_INVALID_PARAMS : Invalid params for the requested command
+ * @IPA_HW_GSI_CH_NOT_EMPTY_FAILURE : GSI channel emptiness validation failed
+ */
+enum ipa3_hw_errors {
+	IPA_HW_ERROR_NONE              =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+	IPA_HW_INVALID_DOORBELL_ERROR  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+	IPA_HW_DMA_ERROR               =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+	IPA_HW_FATAL_SYSTEM_ERROR      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
+	IPA_HW_INVALID_OPCODE          =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
+	IPA_HW_INVALID_PARAMS        =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
+	IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
+	IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7),
+	IPA_HW_GSI_CH_NOT_EMPTY_FAILURE =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8)
+};
+
+/**
+ * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common
+ * section in 128B shared memory located in offset zero of SW Partition in IPA
+ * SRAM.
+ * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS
+ * @cmdParams : CPU->HW command parameter lower 32bit.
+ * @cmdParams_hi : CPU->HW command parameter higher 32bit.
+ * of parameters (immediate parameters) and point on structure in system memory
+ * (in such case the address must be accessible for HW)
+ * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES
+ * @responseParams : HW->CPU response parameter. The parameter filed can hold 32
+ * bits of parameters (immediate parameters) and point on structure in system
+ * memory
+ * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS
+ * @eventParams : HW->CPU event parameter. The parameter filed can hold 32
+ *		bits of parameters (immediate parameters) and point on
+ *		structure in system memory
+ * @firstErrorAddress : Contains the address of first error-source on SNOC
+ * @hwState : State of HW. The state carries information regarding the
+ *				error type.
+ * @warningCounter : The warnings counter. The counter carries information
+ *						regarding non fatal errors in HW
+ * @interfaceVersionCommon : The Common interface version as reported by HW
+ *
+ * The shared memory is used for communication between IPA HW and CPU.
+ */
+struct IpaHwSharedMemCommonMapping_t {
+	u8  cmdOp;
+	u8  reserved_01;
+	u16 reserved_03_02;
+	u32 cmdParams;
+	u32 cmdParams_hi;
+	u8  responseOp;
+	u8  reserved_0D;
+	u16 reserved_0F_0E;
+	u32 responseParams;
+	u8  eventOp;
+	u8  reserved_15;
+	u16 reserved_17_16;
+	u32 eventParams;
+	u32 firstErrorAddress;
+	u8  hwState;
+	u8  warningCounter;
+	u16 reserved_23_22;
+	u16 interfaceVersionCommon;
+	u16 reserved_27_26;
+} __packed;
+
+/**
+ * union Ipa3HwFeatureInfoData_t - parameters for stats/config blob
+ *
+ * @offset : Location of a feature within the EventInfoData
+ * @size : Size of the feature
+ */
+union Ipa3HwFeatureInfoData_t {
+	struct IpaHwFeatureInfoParams_t {
+		u32 offset:16;
+		u32 size:16;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwErrorEventData_t - HW->CPU Common Events
+ * @errorType : Entered when a system error is detected by the HW. Type of
+ * error is specified by IPA_HW_ERRORS
+ * @reserved : Reserved
+ */
+union IpaHwErrorEventData_t {
+	struct IpaHwErrorEventParams_t {
+		u32 errorType:8;
+		u32 reserved:24;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * struct Ipa3HwEventInfoData_t - Structure holding the parameters for
+ * statistics and config info
+ *
+ * @baseAddrOffset : Base Address Offset of the statistics or config
+ * structure from IPA_WRAPPER_BASE
+ * @Ipa3HwFeatureInfoData_t : Location and size of each feature within
+ * the statistics or config structure
+ *
+ * @note    Information about each feature in the featureInfo[]
+ * array is populated at predefined indices per the IPA_HW_FEATURES
+ * enum definition
+ */
+struct Ipa3HwEventInfoData_t {
+	u32 baseAddrOffset;
+	union Ipa3HwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES];
+} __packed;
+
+/**
+ * struct IpaHwEventLogInfoData_t - Structure holding the parameters for
+ * IPA_HW_2_CPU_EVENT_LOG_INFO Event
+ *
+ * @featureMask : Mask indicating the features enabled in HW.
+ * Refer IPA_HW_FEATURE_MASK
+ * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event
+ * Log Buffer structure
+ * @statsInfo : Statistics related information
+ * @configInfo : Configuration related information
+ *
+ * @note    The offset location of this structure from IPA_WRAPPER_BASE
+ * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO
+ * Event
+ */
+struct IpaHwEventLogInfoData_t {
+	u32 featureMask;
+	u32 circBuffBaseAddrOffset;
+	struct Ipa3HwEventInfoData_t statsInfo;
+	struct Ipa3HwEventInfoData_t configInfo;
+
+} __packed;
+
+/**
+ * struct ipa3_uc_ntn_ctx
+ * @ntn_uc_stats_ofst: Neutrino stats offset
+ * @ntn_uc_stats_mmio: Neutrino stats
+ * @priv: private data of client
+ * @uc_ready_cb: uc Ready cb
+ */
+struct ipa3_uc_ntn_ctx {
+	u32 ntn_uc_stats_ofst;
+	struct Ipa3HwStatsNTNInfoData_t *ntn_uc_stats_mmio;
+	void *priv;
+	ipa_uc_ready_cb uc_ready_cb;
+};
+
+/**
+ * enum ipa3_hw_2_cpu_ntn_events - Values that represent HW event
+ *			to be sent to CPU
+ * @IPA_HW_2_CPU_EVENT_NTN_ERROR : Event to specify that HW
+ *			detected an error in NTN
+ *
+ */
+enum ipa3_hw_2_cpu_ntn_events {
+	IPA_HW_2_CPU_EVENT_NTN_ERROR =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_NTN, 0),
+};
+
+
+/**
+ * enum ipa3_hw_ntn_errors - NTN specific error types.
+ * @IPA_HW_NTN_ERROR_NONE : No error persists
+ * @IPA_HW_NTN_CHANNEL_ERROR : Error is specific to channel
+ */
+enum ipa3_hw_ntn_errors {
+	IPA_HW_NTN_ERROR_NONE    = 0,
+	IPA_HW_NTN_CHANNEL_ERROR = 1
+};
+
+/**
+ * enum ipa3_hw_ntn_channel_states - Values that represent NTN
+ * channel state machine.
+ * @IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED : Channel is
+ *			initialized but disabled
+ * @IPA_HW_NTN_CHANNEL_STATE_RUNNING : Channel is running.
+ *     Entered after SET_UP_COMMAND is processed successfully
+ * @IPA_HW_NTN_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_NTN_CHANNEL_STATE_INVALID : Invalid state. Shall not
+ * be in use in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not reflect the
+ * sub-state the state machine may be in.
+ */
+enum ipa3_hw_ntn_channel_states {
+	IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED = 1,
+	IPA_HW_NTN_CHANNEL_STATE_RUNNING  = 2,
+	IPA_HW_NTN_CHANNEL_STATE_ERROR    = 3,
+	IPA_HW_NTN_CHANNEL_STATE_INVALID  = 0xFF
+};
+
+/**
+ * enum ipa3_hw_ntn_channel_errors - List of NTN Channel error
+ * types. This is present in the event param
+ * @IPA_HW_NTN_CH_ERR_NONE: No error persists
+ * @IPA_HW_NTN_TX_FSM_ERROR: Error in the state machine
+ *		transition
+ * @IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL: Error while calculating
+ *		num RE to bring
+ * @IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL: Write pointer update
+ *		failed in Rx ring
+ * @IPA_HW_NTN_RX_FSM_ERROR: Error in the state machine
+ *		transition
+ * @IPA_HW_NTN_RX_CACHE_NON_EMPTY:
+ * @IPA_HW_NTN_CH_ERR_RESERVED:
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in.
+ */
+enum ipa3_hw_ntn_channel_errors {
+	IPA_HW_NTN_CH_ERR_NONE            = 0,
+	IPA_HW_NTN_TX_RING_WP_UPDATE_FAIL = 1,
+	IPA_HW_NTN_TX_FSM_ERROR           = 2,
+	IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL  = 3,
+	IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL = 4,
+	IPA_HW_NTN_RX_FSM_ERROR           = 5,
+	IPA_HW_NTN_RX_CACHE_NON_EMPTY     = 6,
+	IPA_HW_NTN_CH_ERR_RESERVED        = 0xFF
+};
+
+
+/**
+ * struct Ipa3HwNtnSetUpCmdData_t  - Ntn setup command data
+ * @ring_base_pa: physical address of the base of the Tx/Rx NTN
+ *  ring
+ * @buff_pool_base_pa: physical address of the base of the Tx/Rx
+ *  buffer pool
+ * @ntn_ring_size: size of the Tx/Rx NTN ring
+ * @num_buffers: Rx/tx buffer pool size
+ * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN
+ *  Ring's tail pointer
+ * @ipa_pipe_number: IPA pipe number that has to be used for the
+ *  Tx/Rx path
+ * @dir: Tx/Rx Direction
+ * @data_buff_size: size of the each data buffer allocated in
+ *  DDR
+ */
+struct Ipa3HwNtnSetUpCmdData_t {
+	u32 ring_base_pa;
+	u32 buff_pool_base_pa;
+	u16 ntn_ring_size;
+	u16 num_buffers;
+	u32 ntn_reg_base_ptr_pa;
+	u8  ipa_pipe_number;
+	u8  dir;
+	u16 data_buff_size;
+
+} __packed;
+
+/**
+ * struct Ipa3HwNtnCommonChCmdData_t - Structure holding the
+ * parameters for Ntn Tear down command data params
+ *
+ *@ipa_pipe_number: IPA pipe number. This could be Tx or an Rx pipe
+ */
+union Ipa3HwNtnCommonChCmdData_t {
+	struct IpaHwNtnCommonChCmdParams_t {
+		u32  ipa_pipe_number :8;
+		u32  reserved        :24;
+	} __packed params;
+	uint32_t raw32b;
+} __packed;
+
+
+/**
+ * struct Ipa3HwNTNErrorEventData_t - Structure holding the
+ * IPA_HW_2_CPU_EVENT_NTN_ERROR event. The parameters are passed
+ * as immediate params in the shared memory
+ *
+ *@ntn_error_type: type of NTN error (ipa3_hw_ntn_errors)
+ *@ipa_pipe_number: IPA pipe number on which error has happened
+ *   Applicable only if error type indicates channel error
+ *@ntn_ch_err_type: Information about the channel error (if
+ *		available)
+ */
+union Ipa3HwNTNErrorEventData_t {
+	struct IpaHwNTNErrorEventParams_t {
+		u32  ntn_error_type  :8;
+		u32  reserved        :8;
+		u32  ipa_pipe_number :8;
+		u32  ntn_ch_err_type :8;
+	} __packed params;
+	uint32_t raw32b;
+} __packed;
+
+/**
+ * struct NTN3RxInfoData_t - NTN Structure holding the Rx pipe
+ * information
+ *
+ *@max_outstanding_pkts: Number of outstanding packets in Rx
+ *		Ring
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@rx_ring_rp_value: Read pointer last advertized to the WLAN FW
+ *
+ *@ntn_ch_err_type: Information about the channel error (if
+ *		available)
+ *@rx_ind_ring_stats:
+ *@bam_stats:
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_pkts_in_dis_uninit_state:
+ *@num_bam_int_handled_while_not_in_bam: Number of Bam
+ *		Interrupts handled by FW
+ *@num_bam_int_handled_while_in_bam_state: Number of Bam
+ *   Interrupts handled by FW
+ */
+struct NTN3RxInfoData_t {
+	u32  max_outstanding_pkts;
+	u32  num_pkts_processed;
+	u32  rx_ring_rp_value;
+	struct IpaHwRingStats_t rx_ind_ring_stats;
+	struct IpaHwBamStats_t bam_stats;
+	u32  num_bam_int_handled;
+	u32  num_db;
+	u32  num_unexpected_db;
+	u32  num_pkts_in_dis_uninit_state;
+	u32  num_bam_int_handled_while_not_in_bam;
+	u32  num_bam_int_handled_while_in_bam_state;
+} __packed;
+
+
+/**
+ * struct NTNTxInfoData_t - Structure holding the NTN Tx channel
+ * Ensure that this is always word aligned
+ *
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@tail_ptr_val: Latest value of doorbell written to copy engine
+ *@num_db_fired: Number of DB from uC FW to Copy engine
+ *
+ *@tx_comp_ring_stats:
+ *@bam_stats:
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_bam_int_in_non_running_state: Number of Bam interrupts
+ *			while not in Running state
+ *@num_qmb_int_handled: Number of QMB interrupts handled
+ *@num_bam_int_handled_while_wait_for_bam: Number of times the
+ *		Imm Cmd is injected due to fw_desc change
+ */
+struct NTNTxInfoData_t {
+	u32  num_pkts_processed;
+	u32  tail_ptr_val;
+	u32  num_db_fired;
+	struct IpaHwRingStats_t tx_comp_ring_stats;
+	struct IpaHwBamStats_t bam_stats;
+	u32  num_db;
+	u32  num_unexpected_db;
+	u32  num_bam_int_handled;
+	u32  num_bam_int_in_non_running_state;
+	u32  num_qmb_int_handled;
+	u32  num_bam_int_handled_while_wait_for_bam;
+	u32  num_bam_int_handled_while_not_in_bam;
+} __packed;
+
+
+/**
+ * struct Ipa3HwStatsNTNInfoData_t - Structure holding the NTN Tx
+ * channel Ensure that this is always word aligned
+ *
+ */
+struct Ipa3HwStatsNTNInfoData_t {
+	struct NTN3RxInfoData_t rx_ch_stats[IPA_UC_MAX_NTN_RX_CHANNELS];
+	struct NTNTxInfoData_t tx_ch_stats[IPA_UC_MAX_NTN_TX_CHANNELS];
+} __packed;
+
+
+/*
+ * uC offload related data structures
+ */
+#define IPA_UC_OFFLOAD_CONNECTED BIT(0)
+#define IPA_UC_OFFLOAD_ENABLED BIT(1)
+#define IPA_UC_OFFLOAD_RESUMED BIT(2)
+
+/**
+ * enum ipa_cpu_2_hw_offload_commands -  Values that represent
+ * the offload commands from CPU
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP : Command to set up
+ *				Offload protocol's Tx/Rx Path
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_RX_SET_UP : Command to tear down
+ *				Offload protocol's Tx/ Rx Path
+ */
+enum ipa_cpu_2_hw_offload_commands {
+	IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+	IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+};
+
+
+/**
+ * enum ipa3_hw_offload_channel_states - Values that represent
+ * offload channel state machine.
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED : Channel is
+ *			initialized but disabled
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING : Channel is running.
+ *			Entered after SET_UP_COMMAND is processed successfully
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID : Invalid state. Shall not
+ *				be in use in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in
+ */
+enum ipa3_hw_offload_channel_states {
+	IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED = 1,
+	IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING  = 2,
+	IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR    = 3,
+	IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID  = 0xFF
+};
+
+
+/**
+ * enum ipa3_hw_2_cpu_cmd_resp_status -  Values that represent
+ * offload related command response status to be sent to CPU.
+ */
+enum ipa3_hw_2_cpu_offload_cmd_resp_status {
+	IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 0),
+	IPA_HW_2_CPU_OFFLOAD_MAX_TX_CHANNELS  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+	IPA_HW_2_CPU_OFFLOAD_TX_RING_OVERRUN_POSSIBILITY  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2),
+	IPA_HW_2_CPU_OFFLOAD_TX_RING_SET_UP_FAILURE  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3),
+	IPA_HW_2_CPU_OFFLOAD_TX_RING_PARAMS_UNALIGNED  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4),
+	IPA_HW_2_CPU_OFFLOAD_UNKNOWN_TX_CHANNEL  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5),
+	IPA_HW_2_CPU_OFFLOAD_TX_INVALID_FSM_TRANSITION  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6),
+	IPA_HW_2_CPU_OFFLOAD_TX_FSM_TRANSITION_ERROR  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 7),
+	IPA_HW_2_CPU_OFFLOAD_MAX_RX_CHANNELS  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 8),
+	IPA_HW_2_CPU_OFFLOAD_RX_RING_PARAMS_UNALIGNED  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 9),
+	IPA_HW_2_CPU_OFFLOAD_RX_RING_SET_UP_FAILURE  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 10),
+	IPA_HW_2_CPU_OFFLOAD_UNKNOWN_RX_CHANNEL  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 11),
+	IPA_HW_2_CPU_OFFLOAD_RX_INVALID_FSM_TRANSITION  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 12),
+	IPA_HW_2_CPU_OFFLOAD_RX_FSM_TRANSITION_ERROR  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 13),
+	IPA_HW_2_CPU_OFFLOAD_RX_RING_OVERRUN_POSSIBILITY  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 14),
+};
+
+/**
+ * struct IpaHwSetUpCmd  -
+ *
+ *
+ */
+union IpaHwSetUpCmd {
+	struct Ipa3HwNtnSetUpCmdData_t NtnSetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwOffloadSetUpCmdData_t  -
+ *
+ *
+ */
+struct IpaHwOffloadSetUpCmdData_t {
+	u8 protocol;
+	union IpaHwSetUpCmd SetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwCommonChCmd  - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN
+ *
+ *
+ */
+union IpaHwCommonChCmd {
+	union Ipa3HwNtnCommonChCmdData_t NtnCommonCh_params;
+} __packed;
+
+struct IpaHwOffloadCommonChCmdData_t {
+	u8 protocol;
+	union IpaHwCommonChCmd CommonCh_params;
+} __packed;
+
+#endif /* _IPA_UC_OFFLOAD_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
new file mode 100644
index 0000000..e1deb58
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -0,0 +1,1815 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "ipa_i.h"
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+
+#define IPA_HOLB_TMR_DIS 0x0
+
+#define IPA_HW_INTERFACE_WDI_VERSION 0x0001
+#define IPA_HW_WDI_RX_MBOX_START_INDEX 48
+#define IPA_HW_WDI_TX_MBOX_START_INDEX 50
+#define IPA_WDI_RING_ALIGNMENT 8
+
+#define IPA_WDI_CONNECTED BIT(0)
+#define IPA_WDI_ENABLED BIT(1)
+#define IPA_WDI_RESUMED BIT(2)
+#define IPA_UC_POLL_SLEEP_USEC 100
+
+#define IPA_WDI_RX_RING_RES			0
+#define IPA_WDI_RX_RING_RP_RES		1
+#define IPA_WDI_RX_COMP_RING_RES	2
+#define IPA_WDI_RX_COMP_RING_WP_RES	3
+#define IPA_WDI_TX_RING_RES			4
+#define IPA_WDI_CE_RING_RES			5
+#define IPA_WDI_CE_DB_RES			6
+#define IPA_WDI_MAX_RES				7
+
+struct ipa_wdi_res {
+	struct ipa_wdi_buffer_info *res;
+	unsigned int nents;
+	bool valid;
+};
+
+static struct ipa_wdi_res wdi_res[IPA_WDI_MAX_RES];
+
+static void ipa3_uc_wdi_loaded_handler(void);
+
+/**
+ * enum ipa_hw_2_cpu_wdi_events - Values that represent HW event to be sent to
+ * CPU.
+ * @IPA_HW_2_CPU_EVENT_WDI_ERROR : Event to specify that HW detected an error
+ * in WDI
+ */
+enum ipa_hw_2_cpu_wdi_events {
+	IPA_HW_2_CPU_EVENT_WDI_ERROR =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+};
+
+/**
+ * enum ipa_hw_wdi_channel_states - Values that represent WDI channel state
+ * machine.
+ * @IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED : Channel is initialized but
+ * disabled
+ * @IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND : Channel is enabled but in
+ * suspended state
+ * @IPA_HW_WDI_CHANNEL_STATE_RUNNING : Channel is running. Entered after
+ * SET_UP_COMMAND is processed successfully
+ * @IPA_HW_WDI_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_WDI_CHANNEL_STATE_INVALID : Invalid state. Shall not be in use in
+ * operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not reflect the
+ * sub-state the state machine may be in.
+ */
+enum ipa_hw_wdi_channel_states {
+	IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED = 1,
+	IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND = 2,
+	IPA_HW_WDI_CHANNEL_STATE_RUNNING         = 3,
+	IPA_HW_WDI_CHANNEL_STATE_ERROR           = 4,
+	IPA_HW_WDI_CHANNEL_STATE_INVALID         = 0xFF
+};
+
+/**
+ * enum ipa3_cpu_2_hw_commands -  Values that represent the WDI commands from
+ * CPU
+ * @IPA_CPU_2_HW_CMD_WDI_TX_SET_UP : Command to set up WDI Tx Path
+ * @IPA_CPU_2_HW_CMD_WDI_RX_SET_UP : Command to set up WDI Rx Path
+ * @IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG : Provide extended config info for Rx path
+ * @IPA_CPU_2_HW_CMD_WDI_CH_ENABLE : Command to enable a channel
+ * @IPA_CPU_2_HW_CMD_WDI_CH_DISABLE : Command to disable a channel
+ * @IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND : Command to suspend a channel
+ * @IPA_CPU_2_HW_CMD_WDI_CH_RESUME : Command to resume a channel
+ * @IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN : Command to tear down WDI Tx/ Rx Path
+ */
+enum ipa_cpu_2_hw_wdi_commands {
+	IPA_CPU_2_HW_CMD_WDI_TX_SET_UP  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+	IPA_CPU_2_HW_CMD_WDI_RX_SET_UP  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
+	IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
+	IPA_CPU_2_HW_CMD_WDI_CH_ENABLE  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
+	IPA_CPU_2_HW_CMD_WDI_CH_DISABLE =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
+	IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
+	IPA_CPU_2_HW_CMD_WDI_CH_RESUME  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
+	IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
+};
+
+/**
+ * enum ipa_hw_2_cpu_cmd_resp_status -  Values that represent WDI related
+ * command response status to be sent to CPU.
+ */
+enum ipa_hw_2_cpu_cmd_resp_status {
+	IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+	IPA_HW_2_CPU_MAX_WDI_TX_CHANNELS               =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
+	IPA_HW_2_CPU_WDI_CE_RING_OVERRUN_POSSIBILITY   =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
+	IPA_HW_2_CPU_WDI_CE_RING_SET_UP_FAILURE        =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
+	IPA_HW_2_CPU_WDI_CE_RING_PARAMS_UNALIGNED      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
+	IPA_HW_2_CPU_WDI_COMP_RING_OVERRUN_POSSIBILITY =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
+	IPA_HW_2_CPU_WDI_COMP_RING_SET_UP_FAILURE      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
+	IPA_HW_2_CPU_WDI_COMP_RING_PARAMS_UNALIGNED    =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
+	IPA_HW_2_CPU_WDI_UNKNOWN_TX_CHANNEL            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 8),
+	IPA_HW_2_CPU_WDI_TX_INVALID_FSM_TRANSITION     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 9),
+	IPA_HW_2_CPU_WDI_TX_FSM_TRANSITION_ERROR       =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 10),
+	IPA_HW_2_CPU_MAX_WDI_RX_CHANNELS               =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 11),
+	IPA_HW_2_CPU_WDI_RX_RING_PARAMS_UNALIGNED      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 12),
+	IPA_HW_2_CPU_WDI_RX_RING_SET_UP_FAILURE        =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 13),
+	IPA_HW_2_CPU_WDI_UNKNOWN_RX_CHANNEL            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 14),
+	IPA_HW_2_CPU_WDI_RX_INVALID_FSM_TRANSITION     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 15),
+	IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR       =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 16),
+};
+
+/**
+ * enum ipa_hw_wdi_errors - WDI specific error types.
+ * @IPA_HW_WDI_ERROR_NONE : No error persists
+ * @IPA_HW_WDI_CHANNEL_ERROR : Error is specific to channel
+ */
+enum ipa_hw_wdi_errors {
+	IPA_HW_WDI_ERROR_NONE    = 0,
+	IPA_HW_WDI_CHANNEL_ERROR = 1
+};
+
+/**
+ * enum ipa_hw_wdi_ch_errors = List of WDI Channel error types. This is present
+ * in the event param.
+ * @IPA_HW_WDI_CH_ERR_NONE : No error persists
+ * @IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL : Write pointer update failed in Tx
+ * Completion ring
+ * @IPA_HW_WDI_TX_FSM_ERROR : Error in the state machine transition
+ * @IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL : Error while calculating num RE to bring
+ * @IPA_HW_WDI_CH_ERR_RESERVED : Reserved - Not available for CPU to use
+*/
+enum ipa_hw_wdi_ch_errors {
+	IPA_HW_WDI_CH_ERR_NONE                 = 0,
+	IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL = 1,
+	IPA_HW_WDI_TX_FSM_ERROR                = 2,
+	IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL       = 3,
+	IPA_HW_WDI_CH_ERR_RESERVED             = 0xFF
+};
+
+/**
+ * struct IpaHwSharedMemWdiMapping_t  - Structure referring to the common and
+ * WDI section of 128B shared memory located in offset zero of SW Partition in
+ * IPA SRAM.
+ *
+ * The shared memory is used for communication between IPA HW and CPU.
+ */
+struct IpaHwSharedMemWdiMapping_t {
+	struct IpaHwSharedMemCommonMapping_t common;
+	u32 reserved_2B_28;
+	u32 reserved_2F_2C;
+	u32 reserved_33_30;
+	u32 reserved_37_34;
+	u32 reserved_3B_38;
+	u32 reserved_3F_3C;
+	u16 interfaceVersionWdi;
+	u16 reserved_43_42;
+	u8  wdi_tx_ch_0_state;
+	u8  wdi_rx_ch_0_state;
+	u16 reserved_47_46;
+} __packed;
+
+/**
+ * struct IpaHwWdiTxSetUpCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_TX_SET_UP command.
+ * @comp_ring_base_pa : This is the physical address of the base of the Tx
+ * completion ring
+ * @comp_ring_size : This is the size of the Tx completion ring
+ * @reserved_comp_ring : Reserved field for expansion of Completion ring params
+ * @ce_ring_base_pa : This is the physical address of the base of the Copy
+ * Engine Source Ring
+ * @ce_ring_size : Copy Engine Ring size
+ * @reserved_ce_ring : Reserved field for expansion of CE ring params
+ * @ce_ring_doorbell_pa : This is the physical address of the doorbell that the
+ * IPA uC has to write into to trigger the copy engine
+ * @num_tx_buffers : Number of pkt buffers allocated. The size of the CE ring
+ * and the Tx completion ring has to be atleast ( num_tx_buffers + 1)
+ * @ipa_pipe_number : This is the IPA pipe number that has to be used for the
+ * Tx path
+ * @reserved : Reserved field
+ *
+ * Parameters are sent as pointer thus should be reside in address accessible
+ * to HW
+ */
+struct IpaHwWdiTxSetUpCmdData_t {
+	u32 comp_ring_base_pa;
+	u16 comp_ring_size;
+	u16 reserved_comp_ring;
+	u32 ce_ring_base_pa;
+	u16 ce_ring_size;
+	u16 reserved_ce_ring;
+	u32 ce_ring_doorbell_pa;
+	u16 num_tx_buffers;
+	u8  ipa_pipe_number;
+	u8  reserved;
+} __packed;
+
+struct IpaHwWdi2TxSetUpCmdData_t {
+	u32 comp_ring_base_pa;
+	u32 comp_ring_base_pa_hi;
+	u16 comp_ring_size;
+	u16 reserved_comp_ring;
+	u32 ce_ring_base_pa;
+	u32 ce_ring_base_pa_hi;
+	u16 ce_ring_size;
+	u16 reserved_ce_ring;
+	u32 ce_ring_doorbell_pa;
+	u32 ce_ring_doorbell_pa_hi;
+	u16 num_tx_buffers;
+	u8  ipa_pipe_number;
+	u8  reserved;
+} __packed;
+/**
+ * struct IpaHwWdiRxSetUpCmdData_t -  Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_RX_SET_UP command.
+ * @rx_ring_base_pa : This is the physical address of the base of the Rx ring
+ * (containing Rx buffers)
+ * @rx_ring_size : This is the size of the Rx ring
+ * @rx_ring_rp_pa : This is the physical address of the location through which
+ * IPA uc is expected to communicate about the Read pointer into the Rx Ring
+ * @ipa_pipe_number : This is the IPA pipe number that has to be used for the
+ * Rx path
+ *
+ * Parameters are sent as pointer thus should be reside in address accessible
+ * to HW
+*/
+struct IpaHwWdiRxSetUpCmdData_t {
+	u32 rx_ring_base_pa;
+	u32 rx_ring_size;
+	u32 rx_ring_rp_pa;
+	u8  ipa_pipe_number;
+} __packed;
+
+struct IpaHwWdi2RxSetUpCmdData_t {
+	u32 rx_ring_base_pa;
+	u32 rx_ring_base_pa_hi;
+	u32 rx_ring_size;
+	u32 rx_ring_rp_pa;
+	u32 rx_ring_rp_pa_hi;
+	u32 rx_comp_ring_base_pa;
+	u32 rx_comp_ring_base_pa_hi;
+	u32 rx_comp_ring_size;
+	u32 rx_comp_ring_wp_pa;
+	u32 rx_comp_ring_wp_pa_hi;
+	u8  ipa_pipe_number;
+} __packed;
+/**
+ * union IpaHwWdiRxExtCfgCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG command.
+ * @ipa_pipe_number : The IPA pipe number for which this config is passed
+ * @qmap_id : QMAP ID to be set in the metadata register
+ * @reserved : Reserved
+ *
+ * The parameters are passed as immediate params in the shared memory
+*/
+union IpaHwWdiRxExtCfgCmdData_t {
+	struct IpaHwWdiRxExtCfgCmdParams_t {
+		u32 ipa_pipe_number:8;
+		u32 qmap_id:8;
+		u32 reserved:16;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwWdiCommonChCmdData_t -  Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN,
+ * IPA_CPU_2_HW_CMD_WDI_CH_ENABLE,
+ * IPA_CPU_2_HW_CMD_WDI_CH_DISABLE,
+ * IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND,
+ * IPA_CPU_2_HW_CMD_WDI_CH_RESUME command.
+ * @ipa_pipe_number :  The IPA pipe number. This could be Tx or an Rx pipe
+ * @reserved : Reserved
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+union IpaHwWdiCommonChCmdData_t {
+	struct IpaHwWdiCommonChCmdParams_t {
+		u32 ipa_pipe_number:8;
+		u32 reserved:24;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwWdiErrorEventData_t - parameters for IPA_HW_2_CPU_EVENT_WDI_ERROR
+ * event.
+ * @wdi_error_type : The IPA pipe number to be torn down. This could be Tx or
+ * an Rx pipe
+ * @reserved : Reserved
+ * @ipa_pipe_number : IPA pipe number on which error has happened. Applicable
+ * only if error type indicates channel error
+ * @wdi_ch_err_type : Information about the channel error (if available)
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+union IpaHwWdiErrorEventData_t {
+	struct IpaHwWdiErrorEventParams_t {
+		u32 wdi_error_type:8;
+		u32 reserved:8;
+		u32 ipa_pipe_number:8;
+		u32 wdi_ch_err_type:8;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+static void ipa3_uc_wdi_event_log_info_handler(
+struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+
+{
+	if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_WDI)) == 0) {
+		IPAERR("WDI feature missing 0x%x\n",
+			uc_event_top_mmio->featureMask);
+		return;
+	}
+
+	if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_WDI].
+		params.size != sizeof(struct IpaHwStatsWDIInfoData_t)) {
+		IPAERR("wdi stats sz invalid exp=%zu is=%u\n",
+			sizeof(struct IpaHwStatsWDIInfoData_t),
+			uc_event_top_mmio->statsInfo.
+			featureInfo[IPA_HW_FEATURE_WDI].params.size);
+		return;
+	}
+
+	ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst = uc_event_top_mmio->
+		statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
+		featureInfo[IPA_HW_FEATURE_WDI].params.offset;
+	IPAERR("WDI stats ofst=0x%x\n", ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst);
+	if (ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst +
+		sizeof(struct IpaHwStatsWDIInfoData_t) >=
+		ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
+		ipa3_ctx->smem_sz) {
+		IPAERR("uc_wdi_stats 0x%x outside SRAM\n",
+			ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst);
+		return;
+	}
+
+	ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio =
+		ioremap(ipa3_ctx->ipa_wrapper_base +
+		ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst,
+		sizeof(struct IpaHwStatsWDIInfoData_t));
+	if (!ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) {
+		IPAERR("fail to ioremap uc wdi stats\n");
+		return;
+	}
+}
+
+static void ipa3_uc_wdi_event_handler(struct IpaHwSharedMemCommonMapping_t
+				     *uc_sram_mmio)
+
+{
+	union IpaHwWdiErrorEventData_t wdi_evt;
+	struct IpaHwSharedMemWdiMapping_t *wdi_sram_mmio_ext;
+
+	if (uc_sram_mmio->eventOp ==
+		IPA_HW_2_CPU_EVENT_WDI_ERROR) {
+		wdi_evt.raw32b = uc_sram_mmio->eventParams;
+		IPADBG("uC WDI evt errType=%u pipe=%d cherrType=%u\n",
+			wdi_evt.params.wdi_error_type,
+			wdi_evt.params.ipa_pipe_number,
+			wdi_evt.params.wdi_ch_err_type);
+		wdi_sram_mmio_ext =
+			(struct IpaHwSharedMemWdiMapping_t *)
+			uc_sram_mmio;
+		IPADBG("tx_ch_state=%u rx_ch_state=%u\n",
+			wdi_sram_mmio_ext->wdi_tx_ch_0_state,
+			wdi_sram_mmio_ext->wdi_rx_ch_0_state);
+	}
+}
+
+/**
+ * ipa3_get_wdi_stats() - Query WDI statistics from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
+{
+#define TX_STATS(y) stats->tx_ch_stats.y = \
+	ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->tx_ch_stats.y
+#define RX_STATS(y) stats->rx_ch_stats.y = \
+	ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->rx_ch_stats.y
+
+	if (!stats || !ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) {
+		IPAERR("bad parms stats=%p wdi_stats=%p\n",
+			stats,
+			ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio);
+		return -EINVAL;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	TX_STATS(num_pkts_processed);
+	TX_STATS(copy_engine_doorbell_value);
+	TX_STATS(num_db_fired);
+	TX_STATS(tx_comp_ring_stats.ringFull);
+	TX_STATS(tx_comp_ring_stats.ringEmpty);
+	TX_STATS(tx_comp_ring_stats.ringUsageHigh);
+	TX_STATS(tx_comp_ring_stats.ringUsageLow);
+	TX_STATS(tx_comp_ring_stats.RingUtilCount);
+	TX_STATS(bam_stats.bamFifoFull);
+	TX_STATS(bam_stats.bamFifoEmpty);
+	TX_STATS(bam_stats.bamFifoUsageHigh);
+	TX_STATS(bam_stats.bamFifoUsageLow);
+	TX_STATS(bam_stats.bamUtilCount);
+	TX_STATS(num_db);
+	TX_STATS(num_unexpected_db);
+	TX_STATS(num_bam_int_handled);
+	TX_STATS(num_bam_int_in_non_running_state);
+	TX_STATS(num_qmb_int_handled);
+	TX_STATS(num_bam_int_handled_while_wait_for_bam);
+
+	RX_STATS(max_outstanding_pkts);
+	RX_STATS(num_pkts_processed);
+	RX_STATS(rx_ring_rp_value);
+	RX_STATS(rx_ind_ring_stats.ringFull);
+	RX_STATS(rx_ind_ring_stats.ringEmpty);
+	RX_STATS(rx_ind_ring_stats.ringUsageHigh);
+	RX_STATS(rx_ind_ring_stats.ringUsageLow);
+	RX_STATS(rx_ind_ring_stats.RingUtilCount);
+	RX_STATS(bam_stats.bamFifoFull);
+	RX_STATS(bam_stats.bamFifoEmpty);
+	RX_STATS(bam_stats.bamFifoUsageHigh);
+	RX_STATS(bam_stats.bamFifoUsageLow);
+	RX_STATS(bam_stats.bamUtilCount);
+	RX_STATS(num_bam_int_handled);
+	RX_STATS(num_db);
+	RX_STATS(num_unexpected_db);
+	RX_STATS(num_pkts_in_dis_uninit_state);
+	RX_STATS(reserved1);
+	RX_STATS(reserved2);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+int ipa3_wdi_init(void)
+{
+	struct ipa3_uc_hdlrs uc_wdi_cbs = { 0 };
+
+	uc_wdi_cbs.ipa_uc_event_hdlr = ipa3_uc_wdi_event_handler;
+	uc_wdi_cbs.ipa_uc_event_log_info_hdlr =
+		ipa3_uc_wdi_event_log_info_handler;
+	uc_wdi_cbs.ipa_uc_loaded_hdlr =
+		ipa3_uc_wdi_loaded_handler;
+
+	ipa3_uc_register_handlers(IPA_HW_FEATURE_WDI, &uc_wdi_cbs);
+
+	return 0;
+}
+
+static int ipa_create_uc_smmu_mapping_pa(phys_addr_t pa, size_t len,
+		bool device, unsigned long *iova)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
+	unsigned long va = roundup(cb->next_addr, PAGE_SIZE);
+	int prot = IOMMU_READ | IOMMU_WRITE;
+	size_t true_len = roundup(len + pa - rounddown(pa, PAGE_SIZE),
+			PAGE_SIZE);
+	int ret;
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+
+	ret = ipa3_iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
+			true_len,
+			device ? (prot | IOMMU_DEVICE) : prot);
+	if (ret) {
+		IPAERR("iommu map failed for pa=%pa len=%zu\n", &pa, true_len);
+		return -EINVAL;
+	}
+
+	ipa3_ctx->wdi_map_cnt++;
+	cb->next_addr = va + true_len;
+	*iova = va + pa - rounddown(pa, PAGE_SIZE);
+	return 0;
+}
+
+static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt,
+		unsigned long *iova)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
+	unsigned long va = roundup(cb->next_addr, PAGE_SIZE);
+	int prot = IOMMU_READ | IOMMU_WRITE;
+	int ret;
+	int i;
+	struct scatterlist *sg;
+	unsigned long start_iova = va;
+	phys_addr_t phys;
+	size_t len;
+	int count = 0;
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+	if (!sgt) {
+		IPAERR("Bad parameters, scatter / gather list is NULL\n");
+		return -EINVAL;
+	}
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		/* directly get sg_tbl PA from wlan-driver */
+		phys = sg->dma_address;
+		len = PAGE_ALIGN(sg->offset + sg->length);
+
+		ret = ipa3_iommu_map(cb->mapping->domain, va, phys, len, prot);
+		if (ret) {
+			IPAERR("iommu map failed for pa=%pa len=%zu\n",
+					&phys, len);
+			goto bad_mapping;
+		}
+		va += len;
+		ipa3_ctx->wdi_map_cnt++;
+		count++;
+	}
+	cb->next_addr = va;
+	*iova = start_iova;
+
+	return 0;
+
+bad_mapping:
+	for_each_sg(sgt->sgl, sg, count, i)
+		iommu_unmap(cb->mapping->domain, sg_dma_address(sg),
+				sg_dma_len(sg));
+	return -EINVAL;
+}
+
+static void ipa_release_uc_smmu_mappings(enum ipa_client_type client)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
+	int i;
+	int j;
+	int start;
+	int end;
+
+	if (IPA_CLIENT_IS_CONS(client)) {
+		start = IPA_WDI_TX_RING_RES;
+		end = IPA_WDI_CE_DB_RES;
+	} else {
+		start = IPA_WDI_RX_RING_RES;
+		if (ipa3_ctx->ipa_wdi2)
+			end = IPA_WDI_RX_COMP_RING_WP_RES;
+		else
+			end = IPA_WDI_RX_RING_RP_RES;
+	}
+
+	for (i = start; i <= end; i++) {
+		if (wdi_res[i].valid) {
+			for (j = 0; j < wdi_res[i].nents; j++) {
+				iommu_unmap(cb->mapping->domain,
+					wdi_res[i].res[j].iova,
+					wdi_res[i].res[j].size);
+				ipa3_ctx->wdi_map_cnt--;
+			}
+			kfree(wdi_res[i].res);
+			wdi_res[i].valid = false;
+		}
+	}
+
+	if (ipa3_ctx->wdi_map_cnt == 0)
+		cb->next_addr = cb->va_end;
+
+}
+
+static void ipa_save_uc_smmu_mapping_pa(int res_idx, phys_addr_t pa,
+		unsigned long iova, size_t len)
+{
+	IPADBG("--res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx,
+			&pa, iova, len);
+	wdi_res[res_idx].res = kzalloc(sizeof(struct ipa_wdi_res), GFP_KERNEL);
+	if (!wdi_res[res_idx].res)
+		BUG();
+	wdi_res[res_idx].nents = 1;
+	wdi_res[res_idx].valid = true;
+	wdi_res[res_idx].res->pa = rounddown(pa, PAGE_SIZE);
+	wdi_res[res_idx].res->iova = rounddown(iova, PAGE_SIZE);
+	wdi_res[res_idx].res->size = roundup(len + pa - rounddown(pa,
+				PAGE_SIZE), PAGE_SIZE);
+	IPADBG("res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx,
+			&wdi_res[res_idx].res->pa, wdi_res[res_idx].res->iova,
+			wdi_res[res_idx].res->size);
+}
+
+static void ipa_save_uc_smmu_mapping_sgt(int res_idx, struct sg_table *sgt,
+		unsigned long iova)
+{
+	int i;
+	struct scatterlist *sg;
+	unsigned long curr_iova = iova;
+
+	if (!sgt) {
+		IPAERR("Bad parameters, scatter / gather list is NULL\n");
+		return;
+	}
+
+	wdi_res[res_idx].res = kcalloc(sgt->nents, sizeof(struct ipa_wdi_res),
+			GFP_KERNEL);
+	if (!wdi_res[res_idx].res)
+		BUG();
+	wdi_res[res_idx].nents = sgt->nents;
+	wdi_res[res_idx].valid = true;
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		/* directly get sg_tbl PA from wlan */
+		wdi_res[res_idx].res[i].pa = sg->dma_address;
+		wdi_res[res_idx].res[i].iova = curr_iova;
+		wdi_res[res_idx].res[i].size = PAGE_ALIGN(sg->offset +
+				sg->length);
+		IPADBG("res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx,
+			&wdi_res[res_idx].res[i].pa,
+			wdi_res[res_idx].res[i].iova,
+			wdi_res[res_idx].res[i].size);
+		curr_iova += wdi_res[res_idx].res[i].size;
+	}
+}
+
+static int ipa_create_uc_smmu_mapping(int res_idx, bool wlan_smmu_en,
+		phys_addr_t pa, struct sg_table *sgt, size_t len, bool device,
+		unsigned long *iova)
+{
+	/* support for SMMU on WLAN but no SMMU on IPA */
+	if (wlan_smmu_en && ipa3_ctx->smmu_s1_bypass) {
+		IPAERR("Unsupported SMMU pairing\n");
+		return -EINVAL;
+	}
+
+	/* legacy: no SMMUs on either end */
+	if (!wlan_smmu_en && ipa3_ctx->smmu_s1_bypass) {
+		*iova = pa;
+		return 0;
+	}
+
+	/* no SMMU on WLAN but SMMU on IPA */
+	if (!wlan_smmu_en && !ipa3_ctx->smmu_s1_bypass) {
+		if (ipa_create_uc_smmu_mapping_pa(pa, len,
+			(res_idx == IPA_WDI_CE_DB_RES) ? true : false, iova)) {
+			IPAERR("Fail to create mapping res %d\n", res_idx);
+			return -EFAULT;
+		}
+		ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len);
+		return 0;
+	}
+
+	/* SMMU on WLAN and SMMU on IPA */
+	if (wlan_smmu_en && !ipa3_ctx->smmu_s1_bypass) {
+		switch (res_idx) {
+		case IPA_WDI_RX_RING_RP_RES:
+		case IPA_WDI_RX_COMP_RING_WP_RES:
+		case IPA_WDI_CE_DB_RES:
+			if (ipa_create_uc_smmu_mapping_pa(pa, len,
+				(res_idx == IPA_WDI_CE_DB_RES) ? true : false,
+				iova)) {
+				IPAERR("Fail to create mapping res %d\n",
+						res_idx);
+				return -EFAULT;
+			}
+			ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len);
+			break;
+		case IPA_WDI_RX_RING_RES:
+		case IPA_WDI_RX_COMP_RING_RES:
+		case IPA_WDI_TX_RING_RES:
+		case IPA_WDI_CE_RING_RES:
+			if (ipa_create_uc_smmu_mapping_sgt(sgt, iova)) {
+				IPAERR("Fail to create mapping res %d\n",
+						res_idx);
+				return -EFAULT;
+			}
+			ipa_save_uc_smmu_mapping_sgt(res_idx, sgt, *iova);
+			break;
+		default:
+			BUG();
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_connect_wdi_pipe() - WDI client connect
+ * @in:	[in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+		struct ipa_wdi_out_params *out)
+{
+	int ipa_ep_idx;
+	int result = -EFAULT;
+	struct ipa3_ep_context *ep;
+	struct ipa_mem_buffer cmd;
+	struct IpaHwWdiTxSetUpCmdData_t *tx;
+	struct IpaHwWdiRxSetUpCmdData_t *rx;
+	struct IpaHwWdi2TxSetUpCmdData_t *tx_2;
+	struct IpaHwWdi2RxSetUpCmdData_t *rx_2;
+
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	unsigned long va;
+	phys_addr_t pa;
+	u32 len;
+
+	if (in == NULL || out == NULL || in->sys.client >= IPA_CLIENT_MAX) {
+		IPAERR("bad parm. in=%p out=%p\n", in, out);
+		if (in)
+			IPAERR("client = %d\n", in->sys.client);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+		if (in->u.dl.comp_ring_base_pa % IPA_WDI_RING_ALIGNMENT ||
+			in->u.dl.ce_ring_base_pa % IPA_WDI_RING_ALIGNMENT) {
+			IPAERR("alignment failure on TX\n");
+			return -EINVAL;
+		}
+	} else {
+		if (in->u.ul.rdy_ring_base_pa % IPA_WDI_RING_ALIGNMENT) {
+			IPAERR("alignment failure on RX\n");
+			return -EINVAL;
+		}
+	}
+
+	result = ipa3_uc_state_check();
+	if (result)
+		return result;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(in->sys.client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("fail to alloc EP.\n");
+		goto fail;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (ep->valid) {
+		IPAERR("EP already allocated.\n");
+		goto fail;
+	}
+
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+	IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
+
+	IPADBG("client=%d ep=%d\n", in->sys.client, ipa_ep_idx);
+	if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+		if (ipa3_ctx->ipa_wdi2)
+			cmd.size = sizeof(*tx_2);
+		else
+			cmd.size = sizeof(*tx);
+		IPADBG("comp_ring_base_pa=0x%pa\n",
+				&in->u.dl.comp_ring_base_pa);
+		IPADBG("comp_ring_size=%d\n", in->u.dl.comp_ring_size);
+		IPADBG("ce_ring_base_pa=0x%pa\n", &in->u.dl.ce_ring_base_pa);
+		IPADBG("ce_ring_size=%d\n", in->u.dl.ce_ring_size);
+		IPADBG("ce_ring_doorbell_pa=0x%pa\n",
+				&in->u.dl.ce_door_bell_pa);
+		IPADBG("num_tx_buffers=%d\n", in->u.dl.num_tx_buffers);
+	} else {
+		if (ipa3_ctx->ipa_wdi2)
+			cmd.size = sizeof(*rx_2);
+		else
+			cmd.size = sizeof(*rx);
+		IPADBG("rx_ring_base_pa=0x%pa\n",
+			&in->u.ul.rdy_ring_base_pa);
+		IPADBG("rx_ring_size=%d\n",
+			in->u.ul.rdy_ring_size);
+		IPADBG("rx_ring_rp_pa=0x%pa\n",
+			&in->u.ul.rdy_ring_rp_pa);
+		IPADBG("rx_comp_ring_base_pa=0x%pa\n",
+			&in->u.ul.rdy_comp_ring_base_pa);
+		IPADBG("rx_comp_ring_size=%d\n",
+			in->u.ul.rdy_comp_ring_size);
+		IPADBG("rx_comp_ring_wp_pa=0x%pa\n",
+			&in->u.ul.rdy_comp_ring_wp_pa);
+		ipa3_ctx->uc_ctx.rdy_ring_base_pa =
+			in->u.ul.rdy_ring_base_pa;
+		ipa3_ctx->uc_ctx.rdy_ring_rp_pa =
+			in->u.ul.rdy_ring_rp_pa;
+		ipa3_ctx->uc_ctx.rdy_ring_size =
+			in->u.ul.rdy_ring_size;
+		ipa3_ctx->uc_ctx.rdy_comp_ring_base_pa =
+			in->u.ul.rdy_comp_ring_base_pa;
+		ipa3_ctx->uc_ctx.rdy_comp_ring_wp_pa =
+			in->u.ul.rdy_comp_ring_wp_pa;
+		ipa3_ctx->uc_ctx.rdy_comp_ring_size =
+			in->u.ul.rdy_comp_ring_size;
+
+		/* check if the VA is empty */
+		if (ipa3_ctx->ipa_wdi2) {
+			if (in->smmu_enabled) {
+				if (!in->u.ul_smmu.rdy_ring_rp_va ||
+					!in->u.ul_smmu.rdy_comp_ring_wp_va)
+					goto dma_alloc_fail;
+			} else {
+				if (!in->u.ul.rdy_ring_rp_va ||
+					!in->u.ul.rdy_comp_ring_wp_va)
+					goto dma_alloc_fail;
+			}
+			IPADBG("rdy_ring_rp value =%d\n",
+				in->smmu_enabled ?
+				*in->u.ul_smmu.rdy_ring_rp_va :
+				*in->u.ul.rdy_ring_rp_va);
+			IPADBG("rx_comp_ring_wp value=%d\n",
+				in->smmu_enabled ?
+				*in->u.ul_smmu.rdy_comp_ring_wp_va :
+				*in->u.ul.rdy_comp_ring_wp_va);
+				ipa3_ctx->uc_ctx.rdy_ring_rp_va =
+					in->smmu_enabled ?
+					in->u.ul_smmu.rdy_ring_rp_va :
+					in->u.ul.rdy_ring_rp_va;
+				ipa3_ctx->uc_ctx.rdy_comp_ring_wp_va =
+					in->smmu_enabled ?
+					in->u.ul_smmu.rdy_comp_ring_wp_va :
+					in->u.ul.rdy_comp_ring_wp_va;
+		}
+	}
+
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		IPAERR("fail to get DMA memory.\n");
+		result = -ENOMEM;
+		goto dma_alloc_fail;
+	}
+
+	if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+		if (ipa3_ctx->ipa_wdi2) {
+			tx_2 = (struct IpaHwWdi2TxSetUpCmdData_t *)cmd.base;
+
+			len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size :
+				in->u.dl.comp_ring_size;
+			IPADBG("TX_2 ring smmu_en=%d ring_size=%d %d\n",
+				in->smmu_enabled,
+				in->u.dl_smmu.comp_ring_size,
+				in->u.dl.comp_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES,
+					in->smmu_enabled,
+					in->u.dl.comp_ring_base_pa,
+					&in->u.dl_smmu.comp_ring,
+					len,
+					false,
+					&va)) {
+				IPAERR("fail to create uc mapping TX ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx_2->comp_ring_base_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			tx_2->comp_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+			tx_2->comp_ring_size = len;
+			IPADBG("TX_2 comp_ring_base_pa_hi=0x%08x :0x%08x\n",
+					tx_2->comp_ring_base_pa_hi,
+					tx_2->comp_ring_base_pa);
+
+			len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size :
+				in->u.dl.ce_ring_size;
+			IPADBG("TX_2 CE ring smmu_en=%d ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.dl_smmu.ce_ring_size,
+					in->u.dl.ce_ring_size);
+			/* WA: wlan passed ce_ring sg_table PA directly */
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES,
+						in->smmu_enabled,
+						in->u.dl.ce_ring_base_pa,
+						&in->u.dl_smmu.ce_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping CE ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx_2->ce_ring_base_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			tx_2->ce_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+			tx_2->ce_ring_size = len;
+			IPADBG("TX_2 ce_ring_base_pa_hi=0x%08x :0x%08x\n",
+					tx_2->ce_ring_base_pa_hi,
+					tx_2->ce_ring_base_pa);
+
+			pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa :
+				in->u.dl.ce_door_bell_pa;
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						true,
+						&va)) {
+				IPAERR("fail to create uc mapping CE DB.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx_2->ce_ring_doorbell_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			tx_2->ce_ring_doorbell_pa = (u32) (va & 0xFFFFFFFF);
+			IPADBG("TX_2 ce_ring_doorbell_pa_hi=0x%08x :0x%08x\n",
+					tx_2->ce_ring_doorbell_pa_hi,
+					tx_2->ce_ring_doorbell_pa);
+
+			tx_2->num_tx_buffers = in->smmu_enabled ?
+				in->u.dl_smmu.num_tx_buffers :
+				in->u.dl.num_tx_buffers;
+			tx_2->ipa_pipe_number = ipa_ep_idx;
+		} else {
+			tx = (struct IpaHwWdiTxSetUpCmdData_t *)cmd.base;
+
+			len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size :
+				in->u.dl.comp_ring_size;
+			IPADBG("TX ring smmu_en=%d ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.dl_smmu.comp_ring_size,
+					in->u.dl.comp_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES,
+						in->smmu_enabled,
+						in->u.dl.comp_ring_base_pa,
+						&in->u.dl_smmu.comp_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping TX ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx->comp_ring_base_pa = va;
+			tx->comp_ring_size = len;
+			len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size :
+				in->u.dl.ce_ring_size;
+			IPADBG("TX CE ring smmu_en=%d ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.dl_smmu.ce_ring_size,
+					in->u.dl.ce_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES,
+						in->smmu_enabled,
+						in->u.dl.ce_ring_base_pa,
+						&in->u.dl_smmu.ce_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping CE ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx->ce_ring_base_pa = va;
+			tx->ce_ring_size = len;
+			pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa :
+				in->u.dl.ce_door_bell_pa;
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						true,
+						&va)) {
+				IPAERR("fail to create uc mapping CE DB.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx->ce_ring_doorbell_pa = va;
+			tx->num_tx_buffers = in->u.dl.num_tx_buffers;
+			tx->ipa_pipe_number = ipa_ep_idx;
+		}
+		out->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
+				ipahal_get_reg_base() +
+				ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+				IPA_HW_WDI_TX_MBOX_START_INDEX/32,
+				IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
+	} else {
+		if (ipa3_ctx->ipa_wdi2) {
+			rx_2 = (struct IpaHwWdi2RxSetUpCmdData_t *)cmd.base;
+
+			len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size :
+				in->u.ul.rdy_ring_size;
+			IPADBG("RX_2 ring smmu_en=%d ring_size=%d %d\n",
+				in->smmu_enabled,
+				in->u.ul_smmu.rdy_ring_size,
+				in->u.ul.rdy_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES,
+						in->smmu_enabled,
+						in->u.ul.rdy_ring_base_pa,
+						&in->u.ul_smmu.rdy_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc RX_2 ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx_2->rx_ring_base_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			rx_2->rx_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+			rx_2->rx_ring_size = len;
+			IPADBG("RX_2 rx_ring_base_pa_hi=0x%08x:0x%08x\n",
+					rx_2->rx_ring_base_pa_hi,
+					rx_2->rx_ring_base_pa);
+
+			pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa :
+				in->u.ul.rdy_ring_rp_pa;
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						false,
+						&va)) {
+				IPAERR("fail to create uc RX_2 rng RP\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx_2->rx_ring_rp_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			rx_2->rx_ring_rp_pa = (u32) (va & 0xFFFFFFFF);
+			IPADBG("RX_2 rx_ring_rp_pa_hi=0x%08x :0x%08x\n",
+					rx_2->rx_ring_rp_pa_hi,
+					rx_2->rx_ring_rp_pa);
+			len = in->smmu_enabled ?
+				in->u.ul_smmu.rdy_comp_ring_size :
+				in->u.ul.rdy_comp_ring_size;
+			IPADBG("RX_2 ring smmu_en=%d comp_ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.ul_smmu.rdy_comp_ring_size,
+					in->u.ul.rdy_comp_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_COMP_RING_RES,
+						in->smmu_enabled,
+						in->u.ul.rdy_comp_ring_base_pa,
+						&in->u.ul_smmu.rdy_comp_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc RX_2 comp_ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx_2->rx_comp_ring_base_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			rx_2->rx_comp_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+			rx_2->rx_comp_ring_size = len;
+			IPADBG("RX_2 rx_comp_ring_base_pa_hi=0x%08x:0x%08x\n",
+					rx_2->rx_comp_ring_base_pa_hi,
+					rx_2->rx_comp_ring_base_pa);
+
+			pa = in->smmu_enabled ?
+				in->u.ul_smmu.rdy_comp_ring_wp_pa :
+				in->u.ul.rdy_comp_ring_wp_pa;
+			if (ipa_create_uc_smmu_mapping(
+						IPA_WDI_RX_COMP_RING_WP_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						false,
+						&va)) {
+				IPAERR("fail to create uc RX_2 comp_rng WP\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx_2->rx_comp_ring_wp_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			rx_2->rx_comp_ring_wp_pa = (u32) (va & 0xFFFFFFFF);
+			IPADBG("RX_2 rx_comp_ring_wp_pa_hi=0x%08x:0x%08x\n",
+					rx_2->rx_comp_ring_wp_pa_hi,
+					rx_2->rx_comp_ring_wp_pa);
+			rx_2->ipa_pipe_number = ipa_ep_idx;
+		} else {
+			rx = (struct IpaHwWdiRxSetUpCmdData_t *)cmd.base;
+
+			len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size :
+				in->u.ul.rdy_ring_size;
+			IPADBG("RX ring smmu_en=%d ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.ul_smmu.rdy_ring_size,
+					in->u.ul.rdy_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES,
+						in->smmu_enabled,
+						in->u.ul.rdy_ring_base_pa,
+						&in->u.ul_smmu.rdy_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping RX ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx->rx_ring_base_pa = va;
+			rx->rx_ring_size = len;
+
+			pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa :
+				in->u.ul.rdy_ring_rp_pa;
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping RX rng RP\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx->rx_ring_rp_pa = va;
+			rx->ipa_pipe_number = ipa_ep_idx;
+		}
+		out->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
+				ipahal_get_reg_base() +
+				ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+					IPA_HW_WDI_RX_MBOX_START_INDEX/32,
+					IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
+	}
+
+	ep->valid = 1;
+	ep->client = in->sys.client;
+	ep->keep_ipa_awake = in->sys.keep_ipa_awake;
+	result = ipa3_disable_data_path(ipa_ep_idx);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx);
+		goto uc_timeout;
+	}
+	if (IPA_CLIENT_IS_PROD(in->sys.client)) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_delay = true;
+		ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
+	}
+
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CLIENT_IS_CONS(in->sys.client) ?
+				IPA_CPU_2_HW_CMD_WDI_TX_SET_UP :
+				IPA_CPU_2_HW_CMD_WDI_RX_SET_UP,
+				IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+
+	ep->skip_ep_cfg = in->sys.skip_ep_cfg;
+	ep->client_notify = in->sys.notify;
+	ep->priv = in->sys.priv;
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa3_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto ipa_cfg_ep_fail;
+		}
+		IPADBG("ep configuration successful\n");
+	} else {
+		IPADBG("Skipping endpoint configuration.\n");
+	}
+
+	out->clnt_hdl = ipa_ep_idx;
+
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client))
+		ipa3_install_dflt_flt_rules(ipa_ep_idx);
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	ep->uc_offload_state |= IPA_WDI_CONNECTED;
+	IPADBG("client %d (ep: %d) connected\n", in->sys.client, ipa_ep_idx);
+
+	return 0;
+
+ipa_cfg_ep_fail:
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+uc_timeout:
+	ipa_release_uc_smmu_mappings(in->sys.client);
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+dma_alloc_fail:
+	IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+fail:
+	return result;
+}
+
+/**
+ * ipa3_disconnect_wdi_pipe() - WDI client disconnect
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_disconnect_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	union IpaHwWdiCommonChCmdData_t tear;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	result = ipa3_uc_state_check();
+	if (result)
+		return result;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	tear.params.ipa_pipe_number = clnt_hdl;
+
+	result = ipa3_uc_send_cmd(tear.raw32b,
+				IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN,
+				IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+
+	ipa3_delete_dflt_flt_rules(clnt_hdl);
+	ipa_release_uc_smmu_mappings(ep->client);
+
+	memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context));
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+uc_timeout:
+	return result;
+}
+
+/**
+ * ipa3_enable_wdi_pipe() - WDI client enable
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_enable_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	union IpaHwWdiCommonChCmdData_t enable;
+	struct ipa_ep_cfg_holb holb_cfg;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	result = ipa3_uc_state_check();
+	if (result)
+		return result;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	enable.params.ipa_pipe_number = clnt_hdl;
+
+	result = ipa3_uc_send_cmd(enable.raw32b,
+		IPA_CPU_2_HW_CMD_WDI_CH_ENABLE,
+		IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+		false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		memset(&holb_cfg, 0, sizeof(holb_cfg));
+		holb_cfg.en = IPA_HOLB_TMR_DIS;
+		holb_cfg.tmr_val = 0;
+		result = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	ep->uc_offload_state |= IPA_WDI_ENABLED;
+	IPADBG("client (ep: %d) enabled\n", clnt_hdl);
+
+uc_timeout:
+	return result;
+}
+
+/**
+ * ipa3_disable_wdi_pipe() - WDI client disable
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_disable_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	union IpaHwWdiCommonChCmdData_t disable;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	u32 prod_hdl;
+	int i;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	result = ipa3_uc_state_check();
+	if (result)
+		return result;
+
+	/* checking rdy_ring_rp_pa matches the rdy_comp_ring_wp_pa on WDI2.0 */
+	if (ipa3_ctx->ipa_wdi2) {
+		for (i = 0; i < IPA_UC_FINISH_MAX; i++) {
+			IPADBG("(%d) rp_value(%u), comp_wp_value(%u)\n",
+					i,
+					*ipa3_ctx->uc_ctx.rdy_ring_rp_va,
+					*ipa3_ctx->uc_ctx.rdy_comp_ring_wp_va);
+			if (*ipa3_ctx->uc_ctx.rdy_ring_rp_va !=
+				*ipa3_ctx->uc_ctx.rdy_comp_ring_wp_va) {
+				usleep_range(IPA_UC_WAIT_MIN_SLEEP,
+					IPA_UC_WAII_MAX_SLEEP);
+			} else {
+				break;
+			}
+		}
+		/* In case ipa_uc still haven't processed all
+		 * pending descriptors, we have to assert
+		 */
+		if (i == IPA_UC_FINISH_MAX)
+			WARN_ON(1);
+	}
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	result = ipa3_disable_data_path(clnt_hdl);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			clnt_hdl);
+		result = -EPERM;
+		goto uc_timeout;
+	}
+
+	/**
+	 * To avoid data stall during continuous SAP on/off before
+	 * setting delay to IPA Consumer pipe, remove delay and enable
+	 * holb on IPA Producer pipe
+	 */
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+
+		prod_hdl = ipa3_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+		if (ipa3_ctx->ep[prod_hdl].valid == 1) {
+			result = ipa3_disable_data_path(prod_hdl);
+			if (result) {
+				IPAERR("disable data path failed\n");
+				IPAERR("res=%d clnt=%d\n",
+					result, prod_hdl);
+				result = -EPERM;
+				goto uc_timeout;
+			}
+		}
+		usleep_range(IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC,
+			IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC);
+	}
+
+	disable.params.ipa_pipe_number = clnt_hdl;
+
+	result = ipa3_uc_send_cmd(disable.raw32b,
+		IPA_CPU_2_HW_CMD_WDI_CH_DISABLE,
+		IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+		false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+
+	/* Set the delay after disabling IPA Producer pipe */
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_delay = true;
+		ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	ep->uc_offload_state &= ~IPA_WDI_ENABLED;
+	IPADBG("client (ep: %d) disabled\n", clnt_hdl);
+
+uc_timeout:
+	return result;
+}
+
+/**
+ * ipa3_resume_wdi_pipe() - WDI client resume
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_resume_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	union IpaHwWdiCommonChCmdData_t resume;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	result = ipa3_uc_state_check();
+	if (result)
+		return result;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	resume.params.ipa_pipe_number = clnt_hdl;
+
+	result = ipa3_uc_send_cmd(resume.raw32b,
+		IPA_CPU_2_HW_CMD_WDI_CH_RESUME,
+		IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+		false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+
+	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+	if (result)
+		IPAERR("client (ep: %d) fail un-susp/delay result=%d\n",
+				clnt_hdl, result);
+	else
+		IPADBG("client (ep: %d) un-susp/delay\n", clnt_hdl);
+
+	ep->uc_offload_state |= IPA_WDI_RESUMED;
+	IPADBG("client (ep: %d) resumed\n", clnt_hdl);
+
+uc_timeout:
+	return result;
+}
+
+/**
+ * ipa3_suspend_wdi_pipe() - WDI client suspend
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_suspend_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	union IpaHwWdiCommonChCmdData_t suspend;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	result = ipa3_uc_state_check();
+	if (result)
+		return result;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED |
+				IPA_WDI_RESUMED)) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+
+	suspend.params.ipa_pipe_number = clnt_hdl;
+
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		IPADBG("Post suspend event first for IPA Producer\n");
+		IPADBG("Client: %d clnt_hdl: %d\n", ep->client, clnt_hdl);
+		result = ipa3_uc_send_cmd(suspend.raw32b,
+			IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND,
+			IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+			false, 10*HZ);
+
+		if (result) {
+			result = -EFAULT;
+			goto uc_timeout;
+		}
+	}
+
+	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		ep_cfg_ctrl.ipa_ep_suspend = true;
+		result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+		if (result)
+			IPAERR("client (ep: %d) failed to suspend result=%d\n",
+					clnt_hdl, result);
+		else
+			IPADBG("client (ep: %d) suspended\n", clnt_hdl);
+	} else {
+		ep_cfg_ctrl.ipa_ep_delay = true;
+		result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+		if (result)
+			IPAERR("client (ep: %d) failed to delay result=%d\n",
+					clnt_hdl, result);
+		else
+			IPADBG("client (ep: %d) delayed\n", clnt_hdl);
+	}
+
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		result = ipa3_uc_send_cmd(suspend.raw32b,
+			IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND,
+			IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+			false, 10*HZ);
+
+		if (result) {
+			result = -EFAULT;
+			goto uc_timeout;
+		}
+	}
+
+	ipa3_ctx->tag_process_before_gating = true;
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	ep->uc_offload_state &= ~IPA_WDI_RESUMED;
+	IPADBG("client (ep: %d) suspended\n", clnt_hdl);
+
+uc_timeout:
+	return result;
+}
+
+int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	union IpaHwWdiRxExtCfgCmdData_t qmap;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	result = ipa3_uc_state_check();
+	if (result)
+		return result;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	qmap.params.ipa_pipe_number = clnt_hdl;
+	qmap.params.qmap_id = qmap_id;
+
+	result = ipa3_uc_send_cmd(qmap.raw32b,
+		IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG,
+		IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+		false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("client (ep: %d) qmap_id %d updated\n", clnt_hdl, qmap_id);
+
+uc_timeout:
+	return result;
+}
+
+/**
+ * ipa3_uc_reg_rdyCB() - To register uC
+ * ready CB if uC not ready
+ * @inout:	[in/out] input/output parameters
+ * from/to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa3_uc_reg_rdyCB(
+	struct ipa_wdi_uc_ready_params *inout)
+{
+	int result = 0;
+
+	if (inout == NULL) {
+		IPAERR("bad parm. inout=%p ", inout);
+		return -EINVAL;
+	}
+
+	result = ipa3_uc_state_check();
+	if (result) {
+		inout->is_uC_ready = false;
+		ipa3_ctx->uc_wdi_ctx.uc_ready_cb = inout->notify;
+		ipa3_ctx->uc_wdi_ctx.priv = inout->priv;
+	} else {
+		inout->is_uC_ready = true;
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_uc_dereg_rdyCB() - To de-register uC ready CB
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa3_uc_dereg_rdyCB(void)
+{
+	ipa3_ctx->uc_wdi_ctx.uc_ready_cb = NULL;
+	ipa3_ctx->uc_wdi_ctx.priv = NULL;
+
+	return 0;
+}
+
+
+/**
+ * ipa3_uc_wdi_get_dbpa() - To retrieve
+ * doorbell physical address of wlan pipes
+ * @param:  [in/out] input/output parameters
+ *          from/to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa3_uc_wdi_get_dbpa(
+	struct ipa_wdi_db_params *param)
+{
+	if (param == NULL || param->client >= IPA_CLIENT_MAX) {
+		IPAERR("bad parm. param=%p ", param);
+		if (param)
+			IPAERR("client = %d\n", param->client);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(param->client)) {
+		param->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
+				ipahal_get_reg_base() +
+				ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+					IPA_HW_WDI_TX_MBOX_START_INDEX/32,
+					IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
+	} else {
+		param->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
+				ipahal_get_reg_base() +
+				ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+					IPA_HW_WDI_RX_MBOX_START_INDEX/32,
+					IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
+	}
+
+	return 0;
+}
+
+static void ipa3_uc_wdi_loaded_handler(void)
+{
+	if (!ipa3_ctx) {
+		IPAERR("IPA ctx is null\n");
+		return;
+	}
+
+	if (ipa3_ctx->uc_wdi_ctx.uc_ready_cb) {
+		ipa3_ctx->uc_wdi_ctx.uc_ready_cb(
+			ipa3_ctx->uc_wdi_ctx.priv);
+
+		ipa3_ctx->uc_wdi_ctx.uc_ready_cb =
+			NULL;
+		ipa3_ctx->uc_wdi_ctx.priv = NULL;
+	}
+}
+
+int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx();
+	int i;
+	int ret = 0;
+	int prot = IOMMU_READ | IOMMU_WRITE;
+
+	if (!info) {
+		IPAERR("info = %p\n", info);
+		return -EINVAL;
+	}
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_buffers; i++) {
+		IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
+			&info[i].pa, info[i].iova, info[i].size);
+		info[i].result = ipa3_iommu_map(cb->iommu,
+			rounddown(info[i].iova, PAGE_SIZE),
+			rounddown(info[i].pa, PAGE_SIZE),
+			roundup(info[i].size + info[i].pa -
+				rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE),
+			prot);
+	}
+
+	return ret;
+}
+
+int ipa3_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx();
+	int i;
+	int ret = 0;
+
+	if (!info) {
+		IPAERR("info = %p\n", info);
+		return -EINVAL;
+	}
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_buffers; i++) {
+		IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
+			&info[i].pa, info[i].iova, info[i].size);
+		info[i].result = iommu_unmap(cb->iommu,
+			rounddown(info[i].iova, PAGE_SIZE),
+			roundup(info[i].size + info[i].pa -
+				rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE));
+	}
+
+	return ret;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
new file mode 100644
index 0000000..a6e462f6
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -0,0 +1,3639 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <net/ip.h>
+#include <linux/genalloc.h>	/* gen_pool_alloc() */
+#include <linux/io.h>
+#include <linux/ratelimit.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm_gsi.h>
+#include <linux/elf.h>
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+#include "../ipa_rm_i.h"
+
+#define IPA_V3_0_CLK_RATE_SVS (75 * 1000 * 1000UL)
+#define IPA_V3_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL)
+#define IPA_V3_0_CLK_RATE_TURBO (200 * 1000 * 1000UL)
+#define IPA_V3_0_MAX_HOLB_TMR_VAL (4294967296 - 1)
+
+#define IPA_V3_0_BW_THRESHOLD_TURBO_MBPS (1000)
+#define IPA_V3_0_BW_THRESHOLD_NOMINAL_MBPS (600)
+
+#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK 0xFF0000
+#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT 0x10
+
+/* Max pipes + ICs for TAG process */
+#define IPA_TAG_MAX_DESC (IPA3_MAX_NUM_PIPES + 6)
+
+#define IPA_TAG_SLEEP_MIN_USEC (1000)
+#define IPA_TAG_SLEEP_MAX_USEC (2000)
+#define IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT (10 * HZ)
+#define IPA_BCR_REG_VAL_v3_0 (0x00000001)
+#define IPA_BCR_REG_VAL_v3_5 (0x0000003B)
+#define IPA_AGGR_GRAN_MIN (1)
+#define IPA_AGGR_GRAN_MAX (32)
+#define IPA_EOT_COAL_GRAN_MIN (1)
+#define IPA_EOT_COAL_GRAN_MAX (16)
+
+#define IPA_AGGR_BYTE_LIMIT (\
+		IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK >> \
+		IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT)
+#define IPA_AGGR_PKT_LIMIT (\
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >> \
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT)
+
+/* In IPAv3 only endpoints 0-3 can be configured to deaggregation */
+#define IPA_EP_SUPPORTS_DEAGGR(idx) ((idx) >= 0 && (idx) <= 3)
+
+/* configure IPA spare register 1 in order to have correct IPA version
+ * set bits 0,2,3 and 4. see SpareBits documentation.xlsx
+ */
+#define IPA_SPARE_REG_1_VAL (0x0000081D)
+
+
+/* HPS, DPS sequencers Types*/
+#define IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY  0x00000000
+/* DMA + DECIPHER/CIPHER */
+#define IPA_DPS_HPS_SEQ_TYPE_DMA_DEC 0x00000011
+/* Packet Processing + no decipher + uCP (for Ethernet Bridging) */
+#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP 0x00000002
+/* Packet Processing + decipher + uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_DEC_UCP 0x00000013
+/* 2 Packet Processing pass + no decipher + uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP 0x00000004
+/* 2 Packet Processing pass + decipher + uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP 0x00000015
+/* Packet Processing + no decipher + no uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP 0x00000006
+/* Packet Processing + no decipher + no uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_DEC_NO_UCP 0x00000017
+/* COMP/DECOMP */
+#define IPA_DPS_HPS_SEQ_TYPE_DMA_COMP_DECOMP 0x00000020
+/* Invalid sequencer type */
+#define IPA_DPS_HPS_SEQ_TYPE_INVALID 0xFFFFFFFF
+
+#define IPA_DPS_HPS_SEQ_TYPE_IS_DMA(seq_type) \
+	(seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY || \
+	seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_DEC || \
+	seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_COMP_DECOMP)
+
+#define QMB_MASTER_SELECT_DDR  (0)
+#define QMB_MASTER_SELECT_PCIE (1)
+
+#define IPA_CLIENT_NOT_USED \
+	{-1, -1, false, IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR}
+
+/* Resource Group index*/
+#define IPA_GROUP_UL		(0)
+#define IPA_GROUP_DL		(1)
+#define IPA_GROUP_DPL		IPA_GROUP_DL
+#define IPA_GROUP_DIAG		(2)
+#define IPA_GROUP_DMA		(3)
+#define IPA_GROUP_IMM_CMD	IPA_GROUP_DMA
+#define IPA_GROUP_Q6ZIP		(4)
+#define IPA_GROUP_Q6ZIP_GENERAL	IPA_GROUP_Q6ZIP
+#define IPA_GROUP_UC_RX_Q	(5)
+#define IPA_GROUP_Q6ZIP_ENGINE	IPA_GROUP_UC_RX_Q
+#define IPA_GROUP_MAX		(6)
+
+enum ipa_rsrc_grp_type_src {
+	IPA_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS,
+	IPA_RSRC_GRP_TYPE_SRC_HDR_SECTORS,
+	IPA_RSRC_GRP_TYPE_SRC_HDRI1_BUFFER,
+	IPA_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS,
+	IPA_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF,
+	IPA_RSRC_GRP_TYPE_SRC_HDRI2_BUFFERS,
+	IPA_RSRC_GRP_TYPE_SRC_HPS_DMARS,
+	IPA_RSRC_GRP_TYPE_SRC_ACK_ENTRIES,
+	IPA_RSRC_GRP_TYPE_SRC_MAX,
+};
+enum ipa_rsrc_grp_type_dst {
+	IPA_RSRC_GRP_TYPE_DST_DATA_SECTORS,
+	IPA_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS,
+	IPA_RSRC_GRP_TYPE_DST_DPS_DMARS,
+	IPA_RSRC_GRP_TYPE_DST_MAX,
+};
+enum ipa_rsrc_grp_type_rx {
+	IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ,
+	IPA_RSRC_GRP_TYPE_RX_MAX
+};
+struct rsrc_min_max {
+	u32 min;
+	u32 max;
+};
+
+static const struct rsrc_min_max ipa3_rsrc_src_grp_config
+			[IPA_RSRC_GRP_TYPE_SRC_MAX][IPA_GROUP_MAX] = {
+		/*UL	DL	DIAG	DMA	Not Used	uC Rx*/
+	[IPA_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{3, 255}, {3, 255}, {1, 255}, {1, 255}, {1, 255}, {2, 255} },
+	[IPA_RSRC_GRP_TYPE_SRC_HDR_SECTORS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+	[IPA_RSRC_GRP_TYPE_SRC_HDRI1_BUFFER] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+	[IPA_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = {
+		{14, 14}, {16, 16}, {5, 5}, {5, 5},  {0, 0}, {8, 8} },
+	[IPA_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{19, 19}, {26, 26}, {3, 3}, {7, 7}, {0, 0}, {8, 8} },
+	[IPA_RSRC_GRP_TYPE_SRC_HDRI2_BUFFERS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+	[IPA_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+	[IPA_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{14, 14}, {16, 16}, {5, 5}, {5, 5}, {0, 0}, {8, 8} },
+};
+static const struct rsrc_min_max ipa3_rsrc_dst_grp_config
+			[IPA_RSRC_GRP_TYPE_DST_MAX][IPA_GROUP_MAX] = {
+		/*UL	DL/DPL	DIAG	DMA  Q6zip_gen Q6zip_eng*/
+	[IPA_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{2, 2}, {3, 3}, {0, 0}, {2, 2}, {3, 3}, {3, 3} },
+	[IPA_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+	[IPA_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{1, 1}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, {0, 0} },
+};
+static const struct rsrc_min_max ipa3_rsrc_rx_grp_config
+			[IPA_RSRC_GRP_TYPE_RX_MAX][IPA_GROUP_MAX] = {
+		/*UL	DL	DIAG	DMA	Not Used	uC Rx*/
+	[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{16, 16}, {24, 24}, {8, 8}, {8, 8}, {0, 0}, {8, 8} },
+};
+
+enum ipa_ver {
+	IPA_3_0,
+	IPA_VER_MAX,
+};
+
+struct ipa_ep_configuration {
+	int pipe_num;
+	int group_num;
+	bool support_flt;
+	int sequencer_type;
+	u8 qmb_master_sel;
+};
+
+static const struct ipa_ep_configuration ipa3_ep_mapping
+					[IPA_VER_MAX][IPA_CLIENT_MAX] = {
+	[IPA_3_0][IPA_CLIENT_HSIC1_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_WLAN1_PROD]          = {10, IPA_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_HSIC2_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_USB2_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_HSIC3_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_USB3_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_HSIC4_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_USB4_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_HSIC5_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_USB_PROD]            = {1, IPA_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_UC_USB_PROD]         = {2, IPA_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_A5_WLAN_AMPDU_PROD]  = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_A2_EMBEDDED_PROD]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_A2_TETHERED_PROD]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_APPS_LAN_WAN_PROD]   = {14, IPA_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_APPS_CMD_PROD]
+			= {22, IPA_GROUP_IMM_CMD, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_ODU_PROD]            = {12, IPA_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_MHI_PROD]            = {0, IPA_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_PCIE},
+	[IPA_3_0][IPA_CLIENT_Q6_LAN_PROD]         = {9, IPA_GROUP_UL, false,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_Q6_WAN_PROD]         = {5, IPA_GROUP_DL,
+			true, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_Q6_CMD_PROD]
+			= {6, IPA_GROUP_IMM_CMD, false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_Q6_DECOMP_PROD]      = {7, IPA_GROUP_Q6ZIP,
+			false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_Q6_DECOMP2_PROD]     = {8, IPA_GROUP_Q6ZIP,
+			false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD]
+			= {12, IPA_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_PCIE},
+	[IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
+			= {13, IPA_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_PCIE},
+	/* Only for test purpose */
+	[IPA_3_0][IPA_CLIENT_TEST_PROD]           = {1, IPA_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_TEST1_PROD]          = {1, IPA_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_TEST2_PROD]          = {3, IPA_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_TEST3_PROD]          = {12, IPA_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_TEST4_PROD]          = {13, IPA_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+
+	[IPA_3_0][IPA_CLIENT_HSIC1_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_WLAN1_CONS]          = {25, IPA_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_HSIC2_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_USB2_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_WLAN2_CONS]          = {27, IPA_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_HSIC3_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_USB3_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_WLAN3_CONS]          = {28, IPA_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_HSIC4_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_USB4_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_WLAN4_CONS]          = {29, IPA_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_HSIC5_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_USB_CONS]            = {26, IPA_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_USB_DPL_CONS]        = {17, IPA_GROUP_DPL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_A2_EMBEDDED_CONS]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_A2_TETHERED_CONS]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_A5_LAN_WAN_CONS]     = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_APPS_LAN_CONS]       = {15, IPA_GROUP_UL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_APPS_WAN_CONS]       = {16, IPA_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_ODU_EMB_CONS]        = {23, IPA_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_ODU_TETH_CONS]       = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_MHI_CONS]            = {23, IPA_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE},
+	[IPA_3_0][IPA_CLIENT_Q6_LAN_CONS]         = {19, IPA_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_Q6_WAN_CONS]         = {18, IPA_GROUP_UL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_Q6_DUN_CONS]         = {30, IPA_GROUP_DIAG,
+			false, IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_Q6_DECOMP_CONS]
+			= {21, IPA_GROUP_Q6ZIP, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_Q6_DECOMP2_CONS]
+			= {4, IPA_GROUP_Q6ZIP, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS]
+			= {28, IPA_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE},
+	[IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS]
+			= {29, IPA_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE},
+	[IPA_3_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]     = IPA_CLIENT_NOT_USED,
+	/* Only for test purpose */
+	[IPA_3_0][IPA_CLIENT_TEST_CONS]           = {26, IPA_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_TEST1_CONS]          = {26, IPA_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_TEST2_CONS]          = {27, IPA_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_TEST3_CONS]          = {28, IPA_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_TEST4_CONS]          = {29, IPA_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+};
+
+/* this array include information tuple:
+ * {ipa_ep_num, ipa_gsi_chan_num, ipa_if_tlv, ipa_if_aos, ee}
+ */
+static struct ipa_gsi_ep_config ipa_gsi_ep_info[] = {
+	{0, 0, 8, 16, 0},
+	{1, 3, 8, 16, 0},
+	{3, 5, 16, 32, 0},
+	{4, 9, 4, 4, 1},
+	{5, 0, 16, 32, 1},
+	{6, 1, 18, 28, 1},
+	{7, 2, 0, 0, 1},
+	{8, 3, 0, 0, 1},
+	{9, 4, 8, 12, 1},
+	{10, 1, 8, 16, 3},
+	{12, 9, 8, 16, 0},
+	{13, 10, 8, 16, 0},
+	{14, 11, 8, 16, 0},
+	{15, 7, 8, 12, 0},
+	{16, 8, 8, 12, 0},
+	{17, 2, 8, 12, 0},
+	{18, 5, 8, 12, 1},
+	{19, 6, 8, 12, 1},
+	{21, 8, 4, 4, 1},
+	{22, 6, 18, 28, 0},
+	{23, 1, 8, 8, 0},
+	{25, 4, 8, 8, 3},
+	{26, 12, 8, 8, 0},
+	{27, 4, 8, 8, 0},
+	{28, 13, 8, 8, 0},
+	{29, 14, 8, 8, 0},
+	{30, 7, 4, 4, 1},
+	{-1, -1, -1, -1, -1}
+};
+
+static struct msm_bus_vectors ipa_init_vectors_v3_0[]  = {
+	{
+		.src = MSM_BUS_MASTER_IPA,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab = 0,
+		.ib = 0,
+	},
+	{
+		.src = MSM_BUS_MASTER_IPA,
+		.dst = MSM_BUS_SLAVE_OCIMEM,
+		.ab = 0,
+		.ib = 0,
+	},
+};
+
+static struct msm_bus_vectors ipa_nominal_perf_vectors_v3_0[]  = {
+	{
+		.src = MSM_BUS_MASTER_IPA,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab = 100000000,
+		.ib = 1300000000,
+	},
+	{
+		.src = MSM_BUS_MASTER_IPA,
+		.dst = MSM_BUS_SLAVE_OCIMEM,
+		.ab = 100000000,
+		.ib = 1300000000,
+	},
+};
+
+static struct msm_bus_paths ipa_usecases_v3_0[]  = {
+	{
+		ARRAY_SIZE(ipa_init_vectors_v3_0),
+		ipa_init_vectors_v3_0,
+	},
+	{
+		ARRAY_SIZE(ipa_nominal_perf_vectors_v3_0),
+		ipa_nominal_perf_vectors_v3_0,
+	},
+};
+
+static struct msm_bus_scale_pdata ipa_bus_client_pdata_v3_0 = {
+	ipa_usecases_v3_0,
+	ARRAY_SIZE(ipa_usecases_v3_0),
+	.name = "ipa",
+};
+
+void ipa3_active_clients_lock(void)
+{
+	unsigned long flags;
+
+	mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
+	spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
+	ipa3_ctx->ipa3_active_clients.mutex_locked = true;
+	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
+}
+
+int ipa3_active_clients_trylock(unsigned long *flags)
+{
+	spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients.spinlock, *flags);
+	if (ipa3_ctx->ipa3_active_clients.mutex_locked) {
+		spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock,
+					 *flags);
+		return 0;
+	}
+
+	return 1;
+}
+
+void ipa3_active_clients_trylock_unlock(unsigned long *flags)
+{
+	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock, *flags);
+}
+
+void ipa3_active_clients_unlock(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
+	ipa3_ctx->ipa3_active_clients.mutex_locked = false;
+	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
+	mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
+}
+
+/**
+ * ipa3_get_clients_from_rm_resource() - get IPA clients which are related to an
+ * IPA_RM resource
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ * @clients: [OUT] Empty array which will contain the list of clients. The
+ *         caller must initialize this array.
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa3_get_clients_from_rm_resource(
+	enum ipa_rm_resource_name resource,
+	struct ipa3_client_names *clients)
+{
+	int i = 0;
+
+	if (resource < 0 ||
+	    resource >= IPA_RM_RESOURCE_MAX ||
+	    !clients) {
+		IPAERR("Bad parameters\n");
+		return -EINVAL;
+	}
+
+	switch (resource) {
+	case IPA_RM_RESOURCE_USB_CONS:
+		clients->names[i++] = IPA_CLIENT_USB_CONS;
+		break;
+	case IPA_RM_RESOURCE_USB_DPL_CONS:
+		clients->names[i++] = IPA_CLIENT_USB_DPL_CONS;
+		break;
+	case IPA_RM_RESOURCE_HSIC_CONS:
+		clients->names[i++] = IPA_CLIENT_HSIC1_CONS;
+		break;
+	case IPA_RM_RESOURCE_WLAN_CONS:
+		clients->names[i++] = IPA_CLIENT_WLAN1_CONS;
+		clients->names[i++] = IPA_CLIENT_WLAN2_CONS;
+		clients->names[i++] = IPA_CLIENT_WLAN3_CONS;
+		clients->names[i++] = IPA_CLIENT_WLAN4_CONS;
+		break;
+	case IPA_RM_RESOURCE_MHI_CONS:
+		clients->names[i++] = IPA_CLIENT_MHI_CONS;
+		break;
+	case IPA_RM_RESOURCE_USB_PROD:
+		clients->names[i++] = IPA_CLIENT_USB_PROD;
+		break;
+	case IPA_RM_RESOURCE_HSIC_PROD:
+		clients->names[i++] = IPA_CLIENT_HSIC1_PROD;
+		break;
+	case IPA_RM_RESOURCE_MHI_PROD:
+		clients->names[i++] = IPA_CLIENT_MHI_PROD;
+		break;
+	default:
+		break;
+	}
+	clients->length = i;
+
+	return 0;
+}
+
+/**
+ * ipa3_should_pipe_be_suspended() - returns true when the client's pipe should
+ * be suspended during a power save scenario. False otherwise.
+ *
+ * @client: [IN] IPA client
+ */
+bool ipa3_should_pipe_be_suspended(enum ipa_client_type client)
+{
+	struct ipa3_ep_context *ep;
+	int ipa_ep_idx;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("Invalid client.\n");
+		WARN_ON(1);
+		return false;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (ep->keep_ipa_awake)
+		return false;
+
+	if (client == IPA_CLIENT_USB_CONS     ||
+	    client == IPA_CLIENT_USB_DPL_CONS ||
+	    client == IPA_CLIENT_MHI_CONS     ||
+	    client == IPA_CLIENT_HSIC1_CONS   ||
+	    client == IPA_CLIENT_WLAN1_CONS   ||
+	    client == IPA_CLIENT_WLAN2_CONS   ||
+	    client == IPA_CLIENT_WLAN3_CONS   ||
+	    client == IPA_CLIENT_WLAN4_CONS)
+		return true;
+
+	return false;
+}
+
+/**
+ * ipa3_suspend_resource_sync() - suspend client endpoints related to the IPA_RM
+ * resource and decrement active clients counter, which may result in clock
+ * gating of IPA clocks.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa3_suspend_resource_sync(enum ipa_rm_resource_name resource)
+{
+	struct ipa3_client_names clients;
+	int res;
+	int index;
+	struct ipa_ep_cfg_ctrl suspend;
+	enum ipa_client_type client;
+	int ipa_ep_idx;
+	bool pipe_suspended = false;
+
+	memset(&clients, 0, sizeof(clients));
+	res = ipa3_get_clients_from_rm_resource(resource, &clients);
+	if (res) {
+		IPAERR("Bad params.\n");
+		return res;
+	}
+
+	for (index = 0; index < clients.length; index++) {
+		client = clients.names[index];
+		ipa_ep_idx = ipa3_get_ep_mapping(client);
+		if (ipa_ep_idx == -1) {
+			IPAERR("Invalid client.\n");
+			res = -EINVAL;
+			continue;
+		}
+		ipa3_ctx->resume_on_connect[client] = false;
+		if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+		    ipa3_should_pipe_be_suspended(client)) {
+			if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+				/* suspend endpoint */
+				memset(&suspend, 0, sizeof(suspend));
+				suspend.ipa_ep_suspend = true;
+				ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
+				pipe_suspended = true;
+			}
+		}
+	}
+	/* Sleep ~1 msec */
+	if (pipe_suspended)
+		usleep_range(1000, 2000);
+
+	/* before gating IPA clocks do TAG process */
+	ipa3_ctx->tag_process_before_gating = true;
+	IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(resource));
+
+	return 0;
+}
+
+/**
+ * ipa3_suspend_resource_no_block() - suspend client endpoints related to the
+ * IPA_RM resource and decrement active clients counter. This function is
+ * guaranteed to avoid sleeping.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name resource)
+{
+	int res;
+	struct ipa3_client_names clients;
+	int index;
+	enum ipa_client_type client;
+	struct ipa_ep_cfg_ctrl suspend;
+	int ipa_ep_idx;
+	unsigned long flags;
+	struct ipa_active_client_logging_info log_info;
+
+	if (ipa3_active_clients_trylock(&flags) == 0)
+		return -EPERM;
+	if (ipa3_ctx->ipa3_active_clients.cnt == 1) {
+		res = -EPERM;
+		goto bail;
+	}
+
+	memset(&clients, 0, sizeof(clients));
+	res = ipa3_get_clients_from_rm_resource(resource, &clients);
+	if (res) {
+		IPAERR(
+			"ipa3_get_clients_from_rm_resource() failed, name = %d.\n",
+			resource);
+		goto bail;
+	}
+
+	for (index = 0; index < clients.length; index++) {
+		client = clients.names[index];
+		ipa_ep_idx = ipa3_get_ep_mapping(client);
+		if (ipa_ep_idx == -1) {
+			IPAERR("Invalid client.\n");
+			res = -EINVAL;
+			continue;
+		}
+		ipa3_ctx->resume_on_connect[client] = false;
+		if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+		    ipa3_should_pipe_be_suspended(client)) {
+			if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+				/* suspend endpoint */
+				memset(&suspend, 0, sizeof(suspend));
+				suspend.ipa_ep_suspend = true;
+				ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
+			}
+		}
+	}
+
+	if (res == 0) {
+		IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
+				ipa_rm_resource_str(resource));
+		ipa3_active_clients_log_dec(&log_info, true);
+		ipa3_ctx->ipa3_active_clients.cnt--;
+		IPADBG("active clients = %d\n",
+		       ipa3_ctx->ipa3_active_clients.cnt);
+	}
+bail:
+	ipa3_active_clients_trylock_unlock(&flags);
+
+	return res;
+}
+
+/**
+ * ipa3_resume_resource() - resume client endpoints related to the IPA_RM
+ * resource.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa3_resume_resource(enum ipa_rm_resource_name resource)
+{
+
+	struct ipa3_client_names clients;
+	int res;
+	int index;
+	struct ipa_ep_cfg_ctrl suspend;
+	enum ipa_client_type client;
+	int ipa_ep_idx;
+
+	memset(&clients, 0, sizeof(clients));
+	res = ipa3_get_clients_from_rm_resource(resource, &clients);
+	if (res) {
+		IPAERR("ipa3_get_clients_from_rm_resource() failed.\n");
+		return res;
+	}
+
+	for (index = 0; index < clients.length; index++) {
+		client = clients.names[index];
+		ipa_ep_idx = ipa3_get_ep_mapping(client);
+		if (ipa_ep_idx == -1) {
+			IPAERR("Invalid client.\n");
+			res = -EINVAL;
+			continue;
+		}
+		/*
+		 * The related ep, will be resumed on connect
+		 * while its resource is granted
+		 */
+		ipa3_ctx->resume_on_connect[client] = true;
+		IPADBG("%d will be resumed on connect.\n", client);
+		if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+		    ipa3_should_pipe_be_suspended(client)) {
+			if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+				memset(&suspend, 0, sizeof(suspend));
+				suspend.ipa_ep_suspend = false;
+				ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
+			}
+		}
+	}
+
+	return res;
+}
+
+/**
+ * _ipa_sram_settings_read_v3_0() - Read SRAM settings from HW
+ *
+ * Returns:	None
+ */
+void _ipa_sram_settings_read_v3_0(void)
+{
+	struct ipahal_reg_shared_mem_size smem_sz;
+
+	memset(&smem_sz, 0, sizeof(smem_sz));
+
+	ipahal_read_reg_fields(IPA_SHARED_MEM_SIZE, &smem_sz);
+
+	ipa3_ctx->smem_restricted_bytes = smem_sz.shared_mem_baddr;
+	ipa3_ctx->smem_sz = smem_sz.shared_mem_sz;
+
+	/* reg fields are in 8B units */
+	ipa3_ctx->smem_restricted_bytes *= 8;
+	ipa3_ctx->smem_sz *= 8;
+	ipa3_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
+	ipa3_ctx->hdr_tbl_lcl = 0;
+	ipa3_ctx->hdr_proc_ctx_tbl_lcl = 1;
+
+	/*
+	 * when proc ctx table is located in internal memory,
+	 * modem entries resides first.
+	 */
+	if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
+		ipa3_ctx->hdr_proc_ctx_tbl.start_offset =
+			IPA_MEM_PART(modem_hdr_proc_ctx_size);
+	}
+	ipa3_ctx->ip4_rt_tbl_hash_lcl = 0;
+	ipa3_ctx->ip4_rt_tbl_nhash_lcl = 0;
+	ipa3_ctx->ip6_rt_tbl_hash_lcl = 0;
+	ipa3_ctx->ip6_rt_tbl_nhash_lcl = 0;
+	ipa3_ctx->ip4_flt_tbl_hash_lcl = 0;
+	ipa3_ctx->ip4_flt_tbl_nhash_lcl = 0;
+	ipa3_ctx->ip6_flt_tbl_hash_lcl = 0;
+	ipa3_ctx->ip6_flt_tbl_nhash_lcl = 0;
+}
+
+/**
+ * ipa3_cfg_route() - configure IPA route
+ * @route: IPA route
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa3_cfg_route(struct ipahal_reg_route *route)
+{
+
+	IPADBG("disable_route_block=%d, default_pipe=%d, default_hdr_tbl=%d\n",
+		route->route_dis,
+		route->route_def_pipe,
+		route->route_def_hdr_table);
+	IPADBG("default_hdr_ofst=%d, default_frag_pipe=%d\n",
+		route->route_def_hdr_ofst,
+		route->route_frag_def_pipe);
+
+	IPADBG("default_retain_hdr=%d\n",
+		route->route_def_retain_hdr);
+
+	if (route->route_dis) {
+		IPAERR("Route disable is not supported!\n");
+		return -EPERM;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	ipahal_write_reg_fields(IPA_ROUTE, route);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_filter() - configure filter
+ * @disable: disable value
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa3_cfg_filter(u32 disable)
+{
+	IPAERR("Filter disable is not supported!\n");
+	return -EPERM;
+}
+
+/**
+ * ipa3_cfg_qsb() - Configure IPA QSB maximal reads and writes
+ *
+ * Returns:	None
+ */
+void ipa3_cfg_qsb(void)
+{
+	int qsb_max_writes[2] = { 8, 2 };
+	int qsb_max_reads[2] = { 8, 8 };
+
+	ipahal_write_reg_fields(IPA_QSB_MAX_WRITES, qsb_max_writes);
+	ipahal_write_reg_fields(IPA_QSB_MAX_READS, qsb_max_reads);
+}
+
+/**
+ * ipa3_init_hw() - initialize HW
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa3_init_hw(void)
+{
+	u32 ipa_version = 0;
+	u32 val;
+
+	/* Read IPA version and make sure we have access to the registers */
+	ipa_version = ipahal_read_reg(IPA_VERSION);
+	if (ipa_version == 0)
+		return -EFAULT;
+
+	switch (ipa3_ctx->ipa_hw_type) {
+	case IPA_HW_v3_0:
+	case IPA_HW_v3_1:
+		val = IPA_BCR_REG_VAL_v3_0;
+		break;
+	case IPA_HW_v3_5:
+	case IPA_HW_v3_5_1:
+		val = IPA_BCR_REG_VAL_v3_5;
+		break;
+	default:
+		IPAERR("unknown HW type in dts\n");
+		return -EFAULT;
+	}
+
+	ipahal_write_reg(IPA_BCR, val);
+
+	ipa3_cfg_qsb();
+
+	return 0;
+}
+
+/**
+ * ipa3_get_hw_type_index() - Get HW type index which is used as the entry index
+ *	into ipa3_ep_mapping[] array.
+ *
+ * Return value: HW type index
+ */
+u8 ipa3_get_hw_type_index(void)
+{
+	u8 hw_type_index;
+
+	switch (ipa3_ctx->ipa_hw_type) {
+	case IPA_HW_v3_0:
+	case IPA_HW_v3_1:
+		hw_type_index = IPA_3_0;
+		break;
+	default:
+		IPAERR("Incorrect IPA version %d\n", ipa3_ctx->ipa_hw_type);
+		hw_type_index = IPA_3_0;
+		break;
+	}
+
+	return hw_type_index;
+}
+
+/**
+ * ipa3_get_ep_mapping() - provide endpoint mapping
+ * @client: client type
+ *
+ * Return value: endpoint mapping
+ */
+int ipa3_get_ep_mapping(enum ipa_client_type client)
+{
+	if (client >= IPA_CLIENT_MAX || client < 0) {
+		IPAERR("Bad client number! client =%d\n", client);
+		return -EINVAL;
+	}
+
+	return ipa3_ep_mapping[ipa3_get_hw_type_index()][client].pipe_num;
+}
+
+/**
+ * ipa3_get_gsi_ep_info() - provide gsi ep information
+ * @ipa_ep_idx: IPA endpoint index
+ *
+ * Return value: pointer to ipa_gsi_ep_info
+ */
+struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info(int ipa_ep_idx)
+{
+	int i;
+
+	for (i = 0; ; i++) {
+		if (ipa_gsi_ep_info[i].ipa_ep_num < 0)
+			break;
+
+		if (ipa_gsi_ep_info[i].ipa_ep_num ==
+			ipa_ep_idx)
+			return &(ipa_gsi_ep_info[i]);
+	}
+
+	return NULL;
+}
+
+/**
+ * ipa_get_ep_group() - provide endpoint group by client
+ * @client: client type
+ *
+ * Return value: endpoint group
+ */
+int ipa_get_ep_group(enum ipa_client_type client)
+{
+	if (client >= IPA_CLIENT_MAX || client < 0) {
+		IPAERR("Bad client number! client =%d\n", client);
+		return -EINVAL;
+	}
+
+	return ipa3_ep_mapping[ipa3_get_hw_type_index()][client].group_num;
+}
+
+/**
+ * ipa3_get_qmb_master_sel() - provide QMB master selection for the client
+ * @client: client type
+ *
+ * Return value: QMB master index
+ */
+u8 ipa3_get_qmb_master_sel(enum ipa_client_type client)
+{
+	if (client >= IPA_CLIENT_MAX || client < 0) {
+		IPAERR("Bad client number! client =%d\n", client);
+		return -EINVAL;
+	}
+
+	return ipa3_ep_mapping[ipa3_get_hw_type_index()]
+		[client].qmb_master_sel;
+}
+
+/* ipa3_set_client() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+
+void ipa3_set_client(int index, enum ipacm_client_enum client, bool uplink)
+{
+	if (client >= IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) {
+		IPAERR("Bad client number! client =%d\n", client);
+	} else if (index >= IPA3_MAX_NUM_PIPES || index < 0) {
+		IPAERR("Bad pipe index! index =%d\n", index);
+	} else {
+		ipa3_ctx->ipacm_client[index].client_enum = client;
+		ipa3_ctx->ipacm_client[index].uplink = uplink;
+	}
+}
+
+/**
+ * ipa3_get_client() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+enum ipacm_client_enum ipa3_get_client(int pipe_idx)
+{
+	if (pipe_idx >= IPA3_MAX_NUM_PIPES || pipe_idx < 0) {
+		IPAERR("Bad pipe index! pipe_idx =%d\n", pipe_idx);
+		return IPACM_CLIENT_MAX;
+	} else {
+		return ipa3_ctx->ipacm_client[pipe_idx].client_enum;
+	}
+}
+
+/**
+ * ipa2_get_client_uplink() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+bool ipa3_get_client_uplink(int pipe_idx)
+{
+	return ipa3_ctx->ipacm_client[pipe_idx].uplink;
+}
+
+/**
+ * ipa3_get_rm_resource_from_ep() - get the IPA_RM resource which is related to
+ * the supplied pipe index.
+ *
+ * @pipe_idx:
+ *
+ * Return value: IPA_RM resource related to the pipe, -1 if a resource was not
+ * found.
+ */
+enum ipa_rm_resource_name ipa3_get_rm_resource_from_ep(int pipe_idx)
+{
+	int i;
+	int j;
+	enum ipa_client_type client;
+	struct ipa3_client_names clients;
+	bool found = false;
+
+	if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
+		IPAERR("Bad pipe index!\n");
+		return -EINVAL;
+	}
+
+	client = ipa3_ctx->ep[pipe_idx].client;
+
+	for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) {
+		memset(&clients, 0, sizeof(clients));
+		ipa3_get_clients_from_rm_resource(i, &clients);
+		for (j = 0; j < clients.length; j++) {
+			if (clients.names[j] == client) {
+				found = true;
+				break;
+			}
+		}
+		if (found)
+			break;
+	}
+
+	if (!found)
+		return -EFAULT;
+
+	return i;
+}
+
+/**
+ * ipa3_get_client_mapping() - provide client mapping
+ * @pipe_idx: IPA end-point number
+ *
+ * Return value: client mapping
+ */
+enum ipa_client_type ipa3_get_client_mapping(int pipe_idx)
+{
+	if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
+		IPAERR("Bad pipe index!\n");
+		return -EINVAL;
+	}
+
+	return ipa3_ctx->ep[pipe_idx].client;
+}
+
+/**
+ * ipa_init_ep_flt_bitmap() - Initialize the bitmap
+ * that represents the End-points that supports filtering
+ */
+void ipa_init_ep_flt_bitmap(void)
+{
+	enum ipa_client_type cl;
+	u8 hw_type_idx = ipa3_get_hw_type_index();
+	u32 bitmap;
+
+	bitmap = 0;
+
+	BUG_ON(ipa3_ctx->ep_flt_bitmap);
+
+	for (cl = 0; cl < IPA_CLIENT_MAX ; cl++) {
+		if (ipa3_ep_mapping[hw_type_idx][cl].support_flt) {
+			bitmap |=
+				(1U<<ipa3_ep_mapping[hw_type_idx][cl].pipe_num);
+			if (bitmap != ipa3_ctx->ep_flt_bitmap) {
+				ipa3_ctx->ep_flt_bitmap = bitmap;
+				ipa3_ctx->ep_flt_num++;
+			}
+		}
+	}
+}
+
+/**
+ * ipa_is_ep_support_flt() - Given an End-point check
+ * whether it supports filtering or not.
+ *
+ * @pipe_idx:
+ *
+ * Return values:
+ * true if supports and false if not
+ */
+bool ipa_is_ep_support_flt(int pipe_idx)
+{
+	if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
+		IPAERR("Bad pipe index!\n");
+		return false;
+	}
+
+	return ipa3_ctx->ep_flt_bitmap & (1U<<pipe_idx);
+}
+
+/**
+ * ipa3_cfg_ep_seq() - IPA end-point HPS/DPS sequencer type configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_seq(u32 clnt_hdl, const struct ipa_ep_cfg_seq *seq_cfg)
+{
+	int type;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad param, clnt_hdl = %d", clnt_hdl);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
+		IPAERR("SEQ does not apply to IPA consumer EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	/*
+	 * Skip Configure sequencers type for test clients.
+	 * These are configured dynamically in ipa3_cfg_ep_mode
+	 */
+	if (IPA_CLIENT_IS_TEST(ipa3_ctx->ep[clnt_hdl].client)) {
+		IPADBG("Skip sequencers configuration for test clients\n");
+		return 0;
+	}
+
+	if (seq_cfg->set_dynamic)
+		type = seq_cfg->seq_type;
+	else
+		type = ipa3_ep_mapping[ipa3_get_hw_type_index()]
+			[ipa3_ctx->ep[clnt_hdl].client].sequencer_type;
+
+	if (type != IPA_DPS_HPS_SEQ_TYPE_INVALID) {
+		if (ipa3_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA &&
+			!IPA_DPS_HPS_SEQ_TYPE_IS_DMA(type)) {
+			IPAERR("Configuring non-DMA SEQ type to DMA pipe\n");
+			BUG();
+		}
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+		/* Configure sequencers type*/
+
+		IPADBG("set sequencers to sequence 0x%x, ep = %d\n", type,
+				clnt_hdl);
+		ipahal_write_reg_n(IPA_ENDP_INIT_SEQ_n, clnt_hdl, type);
+
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	} else {
+		IPADBG("should not set sequencer type of ep = %d\n", clnt_hdl);
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep - IPA end-point configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * This includes nat, header, mode, aggregation and route settings and is a one
+ * shot API to configure the IPA end-point fully
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg)
+{
+	int result = -EINVAL;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ipa_ep_cfg == NULL) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	result = ipa3_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr);
+	if (result)
+		return result;
+
+	result = ipa3_cfg_ep_hdr_ext(clnt_hdl, &ipa_ep_cfg->hdr_ext);
+	if (result)
+		return result;
+
+	result = ipa3_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr);
+	if (result)
+		return result;
+
+	result = ipa3_cfg_ep_cfg(clnt_hdl, &ipa_ep_cfg->cfg);
+	if (result)
+		return result;
+
+	if (IPA_CLIENT_IS_PROD(ipa3_ctx->ep[clnt_hdl].client)) {
+		result = ipa3_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat);
+		if (result)
+			return result;
+
+		result = ipa3_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode);
+		if (result)
+			return result;
+
+		result = ipa3_cfg_ep_seq(clnt_hdl, &ipa_ep_cfg->seq);
+		if (result)
+			return result;
+
+		result = ipa3_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route);
+		if (result)
+			return result;
+
+		result = ipa3_cfg_ep_deaggr(clnt_hdl, &ipa_ep_cfg->deaggr);
+		if (result)
+			return result;
+	} else {
+		result = ipa3_cfg_ep_metadata_mask(clnt_hdl,
+				&ipa_ep_cfg->metadata_mask);
+		if (result)
+			return result;
+	}
+
+	return 0;
+}
+
+const char *ipa3_get_nat_en_str(enum ipa_nat_en_type nat_en)
+{
+	switch (nat_en) {
+	case (IPA_BYPASS_NAT):
+		return "NAT disabled";
+	case (IPA_SRC_NAT):
+		return "Source NAT";
+	case (IPA_DST_NAT):
+		return "Dst NAT";
+	}
+
+	return "undefined";
+}
+
+/**
+ * ipa3_cfg_ep_nat() - IPA end-point NAT configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat)
+{
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_nat == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+					clnt_hdl,
+					ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
+		IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d, nat_en=%d(%s)\n",
+			clnt_hdl,
+			ep_nat->nat_en,
+			ipa3_get_nat_en_str(ep_nat->nat_en));
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].cfg.nat = *ep_nat;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_NAT_n, clnt_hdl, ep_nat);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+
+/**
+ * ipa3_cfg_ep_status() - IPA end-point status configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_status(u32 clnt_hdl,
+	const struct ipahal_reg_ep_cfg_status *ep_status)
+{
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_status == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+					clnt_hdl,
+					ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d, status_en=%d status_ep=%d status_location=%d\n",
+			clnt_hdl,
+			ep_status->status_en,
+			ep_status->status_ep,
+			ep_status->status_location);
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].status = *ep_status;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_STATUS_n, clnt_hdl, ep_status);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_cfg() - IPA end-point cfg configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg)
+{
+	u8 qmb_master_sel;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || cfg == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+					clnt_hdl,
+					ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].cfg.cfg = *cfg;
+
+	/* Override QMB master selection */
+	qmb_master_sel = ipa3_get_qmb_master_sel(ipa3_ctx->ep[clnt_hdl].client);
+	ipa3_ctx->ep[clnt_hdl].cfg.cfg.gen_qmb_master_sel = qmb_master_sel;
+	IPADBG(
+	       "pipe=%d, frag_ofld_en=%d cs_ofld_en=%d mdata_hdr_ofst=%d gen_qmb_master_sel=%d\n",
+			clnt_hdl,
+			ipa3_ctx->ep[clnt_hdl].cfg.cfg.frag_offload_en,
+			ipa3_ctx->ep[clnt_hdl].cfg.cfg.cs_offload_en,
+			ipa3_ctx->ep[clnt_hdl].cfg.cfg.cs_metadata_hdr_offset,
+			ipa3_ctx->ep[clnt_hdl].cfg.cfg.gen_qmb_master_sel);
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_CFG_n, clnt_hdl,
+				  &ipa3_ctx->ep[clnt_hdl].cfg.cfg);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_metadata_mask(u32 clnt_hdl,
+		const struct ipa_ep_cfg_metadata_mask
+		*metadata_mask)
+{
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || metadata_mask == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+					clnt_hdl,
+					ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d, metadata_mask=0x%x\n",
+			clnt_hdl,
+			metadata_mask->metadata_mask);
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].cfg.metadata_mask = *metadata_mask;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_METADATA_MASK_n,
+		clnt_hdl, metadata_mask);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_hdr() -  IPA end-point header configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr)
+{
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_hdr == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+				clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+	IPADBG("pipe=%d metadata_reg_valid=%d\n",
+		clnt_hdl,
+		ep_hdr->hdr_metadata_reg_valid);
+
+	IPADBG("remove_additional=%d, a5_mux=%d, ofst_pkt_size=0x%x\n",
+		ep_hdr->hdr_remove_additional,
+		ep_hdr->hdr_a5_mux,
+		ep_hdr->hdr_ofst_pkt_size);
+
+	IPADBG("ofst_pkt_size_valid=%d, additional_const_len=0x%x\n",
+		ep_hdr->hdr_ofst_pkt_size_valid,
+		ep_hdr->hdr_additional_const_len);
+
+	IPADBG("ofst_metadata=0x%x, ofst_metadata_valid=%d, len=0x%x",
+		ep_hdr->hdr_ofst_metadata,
+		ep_hdr->hdr_ofst_metadata_valid,
+		ep_hdr->hdr_len);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	/* copy over EP cfg */
+	ep->cfg.hdr = *ep_hdr;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_n, clnt_hdl, &ep->cfg.hdr);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_hdr_ext() -  IPA end-point extended header configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ep_hdr_ext:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_hdr_ext(u32 clnt_hdl,
+		       const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
+{
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_hdr_ext == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+				clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d hdr_pad_to_alignment=%d\n",
+		clnt_hdl,
+		ep_hdr_ext->hdr_pad_to_alignment);
+
+	IPADBG("hdr_total_len_or_pad_offset=%d\n",
+		ep_hdr_ext->hdr_total_len_or_pad_offset);
+
+	IPADBG("hdr_payload_len_inc_padding=%d hdr_total_len_or_pad=%d\n",
+		ep_hdr_ext->hdr_payload_len_inc_padding,
+		ep_hdr_ext->hdr_total_len_or_pad);
+
+	IPADBG("hdr_total_len_or_pad_valid=%d hdr_little_endian=%d\n",
+		ep_hdr_ext->hdr_total_len_or_pad_valid,
+		ep_hdr_ext->hdr_little_endian);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	/* copy over EP cfg */
+	ep->cfg.hdr_ext = *ep_hdr_ext;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_EXT_n, clnt_hdl,
+		&ep->cfg.hdr_ext);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_ctrl() -  IPA end-point Control configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg_ctrl:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
+{
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || ep_ctrl == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d ep_suspend=%d, ep_delay=%d\n",
+		clnt_hdl,
+		ep_ctrl->ipa_ep_suspend,
+		ep_ctrl->ipa_ep_delay);
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n, clnt_hdl, ep_ctrl);
+
+	if (ep_ctrl->ipa_ep_suspend == true &&
+			IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client))
+		ipa3_suspend_active_aggr_wa(clnt_hdl);
+
+	return 0;
+}
+
+const char *ipa3_get_mode_type_str(enum ipa_mode_type mode)
+{
+	switch (mode) {
+	case (IPA_BASIC):
+		return "Basic";
+	case (IPA_ENABLE_FRAMING_HDLC):
+		return "HDLC framing";
+	case (IPA_ENABLE_DEFRAMING_HDLC):
+		return "HDLC de-framing";
+	case (IPA_DMA):
+		return "DMA";
+	}
+
+	return "undefined";
+}
+
+/**
+ * ipa3_cfg_ep_mode() - IPA end-point mode configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode)
+{
+	int ep;
+	int type;
+	struct ipahal_reg_endp_init_mode init_mode;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_mode == NULL) {
+		IPAERR("bad params clnt_hdl=%d , ep_valid=%d ep_mode=%p\n",
+				clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid,
+				ep_mode);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
+		IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	ep = ipa3_get_ep_mapping(ep_mode->dst);
+	if (ep == -1 && ep_mode->mode == IPA_DMA) {
+		IPAERR("dst %d does not exist in DMA mode\n", ep_mode->dst);
+		return -EINVAL;
+	}
+
+	WARN_ON(ep_mode->mode == IPA_DMA && IPA_CLIENT_IS_PROD(ep_mode->dst));
+
+	if (!IPA_CLIENT_IS_CONS(ep_mode->dst))
+		ep = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+
+	IPADBG("pipe=%d mode=%d(%s), dst_client_number=%d",
+			clnt_hdl,
+			ep_mode->mode,
+			ipa3_get_mode_type_str(ep_mode->mode),
+			ep_mode->dst);
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].cfg.mode = *ep_mode;
+	ipa3_ctx->ep[clnt_hdl].dst_pipe_index = ep;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	init_mode.dst_pipe_number = ipa3_ctx->ep[clnt_hdl].dst_pipe_index;
+	init_mode.ep_mode = *ep_mode;
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_MODE_n, clnt_hdl, &init_mode);
+
+	 /* Configure sequencers type for test clients*/
+	if (IPA_CLIENT_IS_TEST(ipa3_ctx->ep[clnt_hdl].client)) {
+		if (ep_mode->mode == IPA_DMA)
+			type = IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY;
+		else
+			type = IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP;
+
+		IPADBG(" set sequencers to sequance 0x%x, ep = %d\n", type,
+				clnt_hdl);
+		ipahal_write_reg_n(IPA_ENDP_INIT_SEQ_n, clnt_hdl, type);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+const char *ipa3_get_aggr_enable_str(enum ipa_aggr_en_type aggr_en)
+{
+	switch (aggr_en) {
+	case (IPA_BYPASS_AGGR):
+			return "no aggregation";
+	case (IPA_ENABLE_AGGR):
+			return "aggregation enabled";
+	case (IPA_ENABLE_DEAGGR):
+		return "de-aggregation enabled";
+	}
+
+	return "undefined";
+}
+
+const char *ipa3_get_aggr_type_str(enum ipa_aggr_type aggr_type)
+{
+	switch (aggr_type) {
+	case (IPA_MBIM_16):
+			return "MBIM_16";
+	case (IPA_HDLC):
+		return "HDLC";
+	case (IPA_TLP):
+			return "TLP";
+	case (IPA_RNDIS):
+			return "RNDIS";
+	case (IPA_GENERIC):
+			return "GENERIC";
+	case (IPA_QCMAP):
+			return "QCMAP";
+	}
+	return "undefined";
+}
+
+/**
+ * ipa3_cfg_ep_aggr() - IPA end-point aggregation configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr)
+{
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_aggr == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+			clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	if (ep_aggr->aggr_en == IPA_ENABLE_DEAGGR &&
+	    !IPA_EP_SUPPORTS_DEAGGR(clnt_hdl)) {
+		IPAERR("pipe=%d cannot be configured to DEAGGR\n", clnt_hdl);
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d en=%d(%s), type=%d(%s), byte_limit=%d, time_limit=%d\n",
+			clnt_hdl,
+			ep_aggr->aggr_en,
+			ipa3_get_aggr_enable_str(ep_aggr->aggr_en),
+			ep_aggr->aggr,
+			ipa3_get_aggr_type_str(ep_aggr->aggr),
+			ep_aggr->aggr_byte_limit,
+			ep_aggr->aggr_time_limit);
+	IPADBG("hard_byte_limit_en=%d aggr_sw_eof_active=%d\n",
+		ep_aggr->aggr_hard_byte_limit_en,
+		ep_aggr->aggr_sw_eof_active);
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].cfg.aggr = *ep_aggr;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl, ep_aggr);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_route() - IPA end-point routing configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route)
+{
+	struct ipahal_reg_endp_init_route init_rt;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_route == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+			clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
+		IPAERR("ROUTE does not apply to IPA out EP %d\n",
+				clnt_hdl);
+		return -EINVAL;
+	}
+
+	/*
+	 * if DMA mode was configured previously for this EP, return with
+	 * success
+	 */
+	if (ipa3_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) {
+		IPADBG("DMA enabled for ep %d, dst pipe is part of DMA\n",
+				clnt_hdl);
+		return 0;
+	}
+
+	if (ep_route->rt_tbl_hdl)
+		IPAERR("client specified non-zero RT TBL hdl - ignore it\n");
+
+	IPADBG("pipe=%d, rt_tbl_hdl=%d\n",
+			clnt_hdl,
+			ep_route->rt_tbl_hdl);
+
+	/* always use "default" routing table when programming EP ROUTE reg */
+	ipa3_ctx->ep[clnt_hdl].rt_tbl_idx =
+		IPA_MEM_PART(v4_apps_rt_index_lo);
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	init_rt.route_table_index = ipa3_ctx->ep[clnt_hdl].rt_tbl_idx;
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_ROUTE_n, clnt_hdl, &init_rt);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_holb() - IPA end-point holb configuration
+ *
+ * If an IPA producer pipe is full, IPA HW by default will block
+ * indefinitely till space opens up. During this time no packets
+ * including those from unrelated pipes will be processed. Enabling
+ * HOLB means IPA HW will be allowed to drop packets as/when needed
+ * and indefinite blocking is avoided.
+ *
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb)
+{
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_holb == NULL ||
+	    ep_holb->tmr_val > ipa3_ctx->ctrl->max_holb_tmr_val ||
+	    ep_holb->en > 1) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_PROD(ipa3_ctx->ep[clnt_hdl].client)) {
+		IPAERR("HOLB does not apply to IPA in EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	ipa3_ctx->ep[clnt_hdl].holb = *ep_holb;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n, clnt_hdl,
+		ep_holb);
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, clnt_hdl,
+		ep_holb);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("cfg holb %u ep=%d tmr=%d\n", ep_holb->en, clnt_hdl,
+				ep_holb->tmr_val);
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_holb_by_client() - IPA end-point holb configuration
+ *
+ * Wrapper function for ipa3_cfg_ep_holb() with client name instead of
+ * client handle. This function is used for clients that does not have
+ * client handle.
+ *
+ * @client:	[in] client name
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_cfg_ep_holb_by_client(enum ipa_client_type client,
+				const struct ipa_ep_cfg_holb *ep_holb)
+{
+	return ipa3_cfg_ep_holb(ipa3_get_ep_mapping(client), ep_holb);
+}
+
+/**
+ * ipa3_cfg_ep_deaggr() -  IPA end-point deaggregation configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ep_deaggr:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_deaggr(u32 clnt_hdl,
+			const struct ipa_ep_cfg_deaggr *ep_deaggr)
+{
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_deaggr == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+				clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d deaggr_hdr_len=%d\n",
+		clnt_hdl,
+		ep_deaggr->deaggr_hdr_len);
+
+	IPADBG("packet_offset_valid=%d\n",
+		ep_deaggr->packet_offset_valid);
+
+	IPADBG("packet_offset_location=%d max_packet_len=%d\n",
+		ep_deaggr->packet_offset_location,
+		ep_deaggr->max_packet_len);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	/* copy over EP cfg */
+	ep->cfg.deaggr = *ep_deaggr;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_DEAGGR_n, clnt_hdl,
+		&ep->cfg.deaggr);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_metadata() - IPA end-point metadata configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md)
+{
+	u32 qmap_id = 0;
+	struct ipa_ep_cfg_metadata ep_md_reg_wrt;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_md == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+					clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d, mux id=%d\n", clnt_hdl, ep_md->qmap_id);
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].cfg.meta = *ep_md;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ep_md_reg_wrt = *ep_md;
+	qmap_id = (ep_md->qmap_id <<
+		IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT) &
+		IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK;
+
+	ep_md_reg_wrt.qmap_id = qmap_id;
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_METADATA_n, clnt_hdl,
+		&ep_md_reg_wrt);
+	ipa3_ctx->ep[clnt_hdl].cfg.hdr.hdr_metadata_reg_valid = 1;
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_n, clnt_hdl,
+		&ipa3_ctx->ep[clnt_hdl].cfg.hdr);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
+{
+	struct ipa_ep_cfg_metadata meta;
+	struct ipa3_ep_context *ep;
+	int ipa_ep_idx;
+	int result = -EINVAL;
+
+	if (param_in->client  >= IPA_CLIENT_MAX) {
+		IPAERR("bad parm client:%d\n", param_in->client);
+		goto fail;
+	}
+
+	ipa_ep_idx = ipa3_get_ep_mapping(param_in->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("Invalid client.\n");
+		goto fail;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	if (!ep->valid) {
+		IPAERR("EP not allocated.\n");
+		goto fail;
+	}
+
+	meta.qmap_id = param_in->qmap_id;
+	if (param_in->client == IPA_CLIENT_USB_PROD ||
+	    param_in->client == IPA_CLIENT_HSIC1_PROD ||
+	    param_in->client == IPA_CLIENT_ODU_PROD) {
+		result = ipa3_cfg_ep_metadata(ipa_ep_idx, &meta);
+	} else if (param_in->client == IPA_CLIENT_WLAN1_PROD) {
+		ipa3_ctx->ep[ipa_ep_idx].cfg.meta = meta;
+		result = ipa3_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id);
+		if (result)
+			IPAERR("qmap_id %d write failed on ep=%d\n",
+					meta.qmap_id, ipa_ep_idx);
+		result = 0;
+	}
+
+fail:
+	return result;
+}
+
+/**
+ * ipa3_dump_buff_internal() - dumps buffer for debug purposes
+ * @base: buffer base address
+ * @phy_base: buffer physical base address
+ * @size: size of the buffer
+ */
+void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size)
+{
+	int i;
+	u32 *cur = (u32 *)base;
+	u8 *byt;
+
+	IPADBG("system phys addr=%pa len=%u\n", &phy_base, size);
+	for (i = 0; i < size / 4; i++) {
+		byt = (u8 *)(cur + i);
+		IPADBG("%2d %08x   %02x %02x %02x %02x\n", i, *(cur + i),
+				byt[0], byt[1], byt[2], byt[3]);
+	}
+	IPADBG("END\n");
+}
+
+/**
+ * ipa3_pipe_mem_init() - initialize the pipe memory
+ * @start_ofst: start offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ * -ENOMEM: no memory
+ */
+int ipa3_pipe_mem_init(u32 start_ofst, u32 size)
+{
+	int res;
+	u32 aligned_start_ofst;
+	u32 aligned_size;
+	struct gen_pool *pool;
+
+	if (!size) {
+		IPAERR("no IPA pipe memory allocated\n");
+		goto fail;
+	}
+
+	aligned_start_ofst = IPA_PIPE_MEM_START_OFST_ALIGNMENT(start_ofst);
+	aligned_size = size - (aligned_start_ofst - start_ofst);
+
+	IPADBG("start_ofst=%u aligned_start_ofst=%u size=%u aligned_size=%u\n",
+	       start_ofst, aligned_start_ofst, size, aligned_size);
+
+	/* allocation order of 8 i.e. 128 bytes, global pool */
+	pool = gen_pool_create(8, -1);
+	if (!pool) {
+		IPAERR("Failed to create a new memory pool.\n");
+		goto fail;
+	}
+
+	res = gen_pool_add(pool, aligned_start_ofst, aligned_size, -1);
+	if (res) {
+		IPAERR("Failed to add memory to IPA pipe pool\n");
+		goto err_pool_add;
+	}
+
+	ipa3_ctx->pipe_mem_pool = pool;
+	return 0;
+
+err_pool_add:
+	gen_pool_destroy(pool);
+fail:
+	return -ENOMEM;
+}
+
+/**
+ * ipa3_pipe_mem_alloc() - allocate pipe memory
+ * @ofst: offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa3_pipe_mem_alloc(u32 *ofst, u32 size)
+{
+	u32 vaddr;
+	int res = -1;
+
+	if (!ipa3_ctx->pipe_mem_pool || !size) {
+		IPAERR("failed size=%u pipe_mem_pool=%p\n", size,
+				ipa3_ctx->pipe_mem_pool);
+		return res;
+	}
+
+	vaddr = gen_pool_alloc(ipa3_ctx->pipe_mem_pool, size);
+
+	if (vaddr) {
+		*ofst = vaddr;
+		res = 0;
+		IPADBG("size=%u ofst=%u\n", size, vaddr);
+	} else {
+		IPAERR("size=%u failed\n", size);
+	}
+
+	return res;
+}
+
+/**
+ * ipa3_pipe_mem_free() - free pipe memory
+ * @ofst: offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa3_pipe_mem_free(u32 ofst, u32 size)
+{
+	IPADBG("size=%u ofst=%u\n", size, ofst);
+	if (ipa3_ctx->pipe_mem_pool && size)
+		gen_pool_free(ipa3_ctx->pipe_mem_pool, ofst, size);
+	return 0;
+}
+
+/**
+ * ipa3_set_aggr_mode() - Set the aggregation mode which is a global setting
+ * @mode:	[in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
+ * etc
+ *
+ * Returns:	0 on success
+ */
+int ipa3_set_aggr_mode(enum ipa_aggr_mode mode)
+{
+	struct ipahal_reg_qcncm qcncm;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipahal_read_reg_fields(IPA_QCNCM, &qcncm);
+	qcncm.mode_en = mode;
+	ipahal_write_reg_fields(IPA_QCNCM, &qcncm);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+/**
+ * ipa3_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
+ * mode
+ * @sig:	[in] the first 3 bytes of QCNCM NDP signature (expected to be
+ * "QND")
+ *
+ * Set the NDP signature used for QCNCM aggregation mode. The fourth byte
+ * (expected to be 'P') needs to be set using the header addition mechanism
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_set_qcncm_ndp_sig(char sig[3])
+{
+	struct ipahal_reg_qcncm qcncm;
+
+	if (sig == NULL) {
+		IPAERR("bad argument for ipa3_set_qcncm_ndp_sig/n");
+		return -EINVAL;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipahal_read_reg_fields(IPA_QCNCM, &qcncm);
+	qcncm.mode_val = ((sig[0] << 16) | (sig[1] << 8) | sig[2]);
+	ipahal_write_reg_fields(IPA_QCNCM, &qcncm);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+/**
+ * ipa3_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
+ * configuration
+ * @enable:	[in] true for single NDP/MBIM; false otherwise
+ *
+ * Returns:	0 on success
+ */
+int ipa3_set_single_ndp_per_mbim(bool enable)
+{
+	struct ipahal_reg_single_ndp_mode mode;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipahal_read_reg_fields(IPA_SINGLE_NDP_MODE, &mode);
+	mode.single_ndp_en = enable;
+	ipahal_write_reg_fields(IPA_SINGLE_NDP_MODE, &mode);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+/**
+ * ipa3_straddle_boundary() - Checks whether a memory buffer straddles a
+ * boundary
+ * @start: start address of the memory buffer
+ * @end: end address of the memory buffer
+ * @boundary: boundary
+ *
+ * Return value:
+ * 1: if the interval [start, end] straddles boundary
+ * 0: otherwise
+ */
+int ipa3_straddle_boundary(u32 start, u32 end, u32 boundary)
+{
+	u32 next_start;
+	u32 prev_end;
+
+	IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary);
+
+	next_start = (start + (boundary - 1)) & ~(boundary - 1);
+	prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary;
+
+	while (next_start < prev_end)
+		next_start += boundary;
+
+	if (next_start == prev_end)
+		return 1;
+	else
+		return 0;
+}
+
+/**
+ * ipa3_bam_reg_dump() - Dump selected BAM registers for IPA.
+ * The API is right now used only to dump IPA registers towards USB.
+ *
+ * Function is rate limited to avoid flooding kernel log buffer
+ */
+void ipa3_bam_reg_dump(void)
+{
+	static DEFINE_RATELIMIT_STATE(_rs, 500*HZ, 1);
+
+	if (__ratelimit(&_rs)) {
+		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+		pr_err("IPA BAM START\n");
+		sps_get_bam_debug_info(ipa3_ctx->bam_handle, 93,
+			(SPS_BAM_PIPE(ipa3_get_ep_mapping(IPA_CLIENT_USB_CONS))
+			|
+			SPS_BAM_PIPE(ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD))),
+			0, 2);
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	}
+}
+
+/**
+ * ipa3_init_mem_partition() - Reads IPA memory map from DTS, performs alignment
+ * checks and logs the fetched values.
+ *
+ * Returns:	0 on success
+ */
+int ipa3_init_mem_partition(struct device_node *node)
+{
+	int result;
+
+	IPADBG("Reading from DTS as u32 array\n");
+	result = of_property_read_u32_array(node,
+		"qcom,ipa-ram-mmap", (u32 *)&ipa3_ctx->ctrl->mem_partition,
+		sizeof(ipa3_ctx->ctrl->mem_partition) / sizeof(u32));
+
+	if (result) {
+		IPAERR("Read operation failed\n");
+		return -ENODEV;
+	}
+
+	IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
+		IPA_MEM_PART(nat_size));
+
+	if (IPA_MEM_PART(uc_info_ofst) & 3) {
+		IPAERR("UC INFO OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(uc_info_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("UC INFO OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(uc_info_ofst), IPA_MEM_PART(uc_info_size));
+
+	IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start));
+
+	if (IPA_MEM_PART(v4_flt_hash_ofst) & 7) {
+		IPAERR("V4 FLT HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v4_flt_hash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V4 FLT HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v4_flt_hash_ofst),
+		IPA_MEM_PART(v4_flt_hash_size),
+		IPA_MEM_PART(v4_flt_hash_size_ddr));
+
+	if (IPA_MEM_PART(v4_flt_nhash_ofst) & 7) {
+		IPAERR("V4 FLT NON-HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v4_flt_nhash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V4 FLT NON-HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v4_flt_nhash_ofst),
+		IPA_MEM_PART(v4_flt_nhash_size),
+		IPA_MEM_PART(v4_flt_nhash_size_ddr));
+
+	if (IPA_MEM_PART(v6_flt_hash_ofst) & 7) {
+		IPAERR("V6 FLT HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v6_flt_hash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V6 FLT HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v6_flt_hash_ofst), IPA_MEM_PART(v6_flt_hash_size),
+		IPA_MEM_PART(v6_flt_hash_size_ddr));
+
+	if (IPA_MEM_PART(v6_flt_nhash_ofst) & 7) {
+		IPAERR("V6 FLT NON-HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v6_flt_nhash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V6 FLT NON-HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v6_flt_nhash_ofst),
+		IPA_MEM_PART(v6_flt_nhash_size),
+		IPA_MEM_PART(v6_flt_nhash_size_ddr));
+
+	IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_rt_num_index));
+
+	IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v4_modem_rt_index_lo),
+		IPA_MEM_PART(v4_modem_rt_index_hi));
+
+	IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v4_apps_rt_index_lo),
+		IPA_MEM_PART(v4_apps_rt_index_hi));
+
+	if (IPA_MEM_PART(v4_rt_hash_ofst) & 7) {
+		IPAERR("V4 RT HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v4_rt_hash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V4 RT HASHABLE OFST 0x%x\n", IPA_MEM_PART(v4_rt_hash_ofst));
+
+	IPADBG("V4 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v4_rt_hash_size),
+		IPA_MEM_PART(v4_rt_hash_size_ddr));
+
+	if (IPA_MEM_PART(v4_rt_nhash_ofst) & 7) {
+		IPAERR("V4 RT NON-HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v4_rt_nhash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V4 RT NON-HASHABLE OFST 0x%x\n",
+		IPA_MEM_PART(v4_rt_nhash_ofst));
+
+	IPADBG("V4 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v4_rt_nhash_size),
+		IPA_MEM_PART(v4_rt_nhash_size_ddr));
+
+	IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_rt_num_index));
+
+	IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v6_modem_rt_index_lo),
+		IPA_MEM_PART(v6_modem_rt_index_hi));
+
+	IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v6_apps_rt_index_lo),
+		IPA_MEM_PART(v6_apps_rt_index_hi));
+
+	if (IPA_MEM_PART(v6_rt_hash_ofst) & 7) {
+		IPAERR("V6 RT HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v6_rt_hash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V6 RT HASHABLE OFST 0x%x\n", IPA_MEM_PART(v6_rt_hash_ofst));
+
+	IPADBG("V6 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v6_rt_hash_size),
+		IPA_MEM_PART(v6_rt_hash_size_ddr));
+
+	if (IPA_MEM_PART(v6_rt_nhash_ofst) & 7) {
+		IPAERR("V6 RT NON-HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v6_rt_nhash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V6 RT NON-HASHABLE OFST 0x%x\n",
+		IPA_MEM_PART(v6_rt_nhash_ofst));
+
+	IPADBG("V6 RT NON-HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v6_rt_nhash_size),
+		IPA_MEM_PART(v6_rt_nhash_size_ddr));
+
+	if (IPA_MEM_PART(modem_hdr_ofst) & 7) {
+		IPAERR("MODEM HDR OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(modem_hdr_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size));
+
+	if (IPA_MEM_PART(apps_hdr_ofst) & 7) {
+		IPAERR("APPS HDR OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(apps_hdr_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size),
+		IPA_MEM_PART(apps_hdr_size_ddr));
+
+	if (IPA_MEM_PART(modem_hdr_proc_ctx_ofst) & 7) {
+		IPAERR("MODEM HDR PROC CTX OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("MODEM HDR PROC CTX OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst),
+		IPA_MEM_PART(modem_hdr_proc_ctx_size));
+
+	if (IPA_MEM_PART(apps_hdr_proc_ctx_ofst) & 7) {
+		IPAERR("APPS HDR PROC CTX OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(apps_hdr_proc_ctx_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("APPS HDR PROC CTX OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(apps_hdr_proc_ctx_ofst),
+		IPA_MEM_PART(apps_hdr_proc_ctx_size),
+		IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr));
+
+	if (IPA_MEM_PART(modem_ofst) & 7) {
+		IPAERR("MODEM OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(modem_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst),
+		IPA_MEM_PART(modem_size));
+
+	IPADBG("V4 APPS HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v4_flt_hash_ofst),
+		IPA_MEM_PART(apps_v4_flt_hash_size));
+
+	IPADBG("V4 APPS NON-HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v4_flt_nhash_ofst),
+		IPA_MEM_PART(apps_v4_flt_nhash_size));
+
+	IPADBG("V6 APPS HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v6_flt_hash_ofst),
+		IPA_MEM_PART(apps_v6_flt_hash_size));
+
+	IPADBG("V6 APPS NON-HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v6_flt_nhash_ofst),
+		IPA_MEM_PART(apps_v6_flt_nhash_size));
+
+	IPADBG("RAM END OFST 0x%x\n",
+		IPA_MEM_PART(end_ofst));
+
+	IPADBG("V4 APPS HASHABLE RT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v4_rt_hash_ofst),
+		IPA_MEM_PART(apps_v4_rt_hash_size));
+
+	IPADBG("V4 APPS NON-HASHABLE RT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v4_rt_nhash_ofst),
+		IPA_MEM_PART(apps_v4_rt_nhash_size));
+
+	IPADBG("V6 APPS HASHABLE RT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v6_rt_hash_ofst),
+		IPA_MEM_PART(apps_v6_rt_hash_size));
+
+	IPADBG("V6 APPS NON-HASHABLE RT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v6_rt_nhash_ofst),
+		IPA_MEM_PART(apps_v6_rt_nhash_size));
+
+	return 0;
+}
+
+/**
+ * ipa_ctrl_static_bind() - set the appropriate methods for
+ *  IPA Driver based on the HW version
+ *
+ *  @ctrl: data structure which holds the function pointers
+ *  @hw_type: the HW type in use
+ *
+ *  This function can avoid the runtime assignment by using C99 special
+ *  struct initialization - hard decision... time.vs.mem
+ */
+int ipa3_controller_static_bind(struct ipa3_controller *ctrl,
+		enum ipa_hw_type hw_type)
+{
+	ctrl->ipa_init_rt4 = _ipa_init_rt4_v3;
+	ctrl->ipa_init_rt6 = _ipa_init_rt6_v3;
+	ctrl->ipa_init_flt4 = _ipa_init_flt4_v3;
+	ctrl->ipa_init_flt6 = _ipa_init_flt6_v3;
+	ctrl->ipa_clk_rate_turbo = IPA_V3_0_CLK_RATE_TURBO;
+	ctrl->ipa_clk_rate_nominal = IPA_V3_0_CLK_RATE_NOMINAL;
+	ctrl->ipa_clk_rate_svs = IPA_V3_0_CLK_RATE_SVS;
+	ctrl->ipa3_read_ep_reg = _ipa_read_ep_reg_v3_0;
+	ctrl->ipa3_commit_flt = __ipa_commit_flt_v3;
+	ctrl->ipa3_commit_rt = __ipa_commit_rt_v3;
+	ctrl->ipa3_commit_hdr = __ipa_commit_hdr_v3_0;
+	ctrl->ipa3_enable_clks = _ipa_enable_clks_v3_0;
+	ctrl->ipa3_disable_clks = _ipa_disable_clks_v3_0;
+	ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v3_0;
+	ctrl->clock_scaling_bw_threshold_nominal =
+		IPA_V3_0_BW_THRESHOLD_NOMINAL_MBPS;
+	ctrl->clock_scaling_bw_threshold_turbo =
+		IPA_V3_0_BW_THRESHOLD_TURBO_MBPS;
+	ctrl->ipa_reg_base_ofst = ipahal_get_reg_base();
+	ctrl->ipa_init_sram = _ipa_init_sram_v3_0;
+	ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v3_0;
+
+	ctrl->ipa_init_hdr = _ipa_init_hdr_v3_0;
+
+	return 0;
+}
+
+void ipa3_skb_recycle(struct sk_buff *skb)
+{
+	struct skb_shared_info *shinfo;
+
+	shinfo = skb_shinfo(skb);
+	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+	atomic_set(&shinfo->dataref, 1);
+
+	memset(skb, 0, offsetof(struct sk_buff, tail));
+	skb->data = skb->head + NET_SKB_PAD;
+	skb_reset_tail_pointer(skb);
+}
+
+int ipa3_alloc_rule_id(struct idr *rule_ids)
+{
+	/* There is two groups of rule-Ids, Modem ones and Apps ones.
+	 * Distinction by high bit: Modem Ids are high bit asserted.
+	 */
+	return idr_alloc(rule_ids, NULL,
+		ipahal_get_low_rule_id(), ipahal_get_rule_id_hi_bit(),
+		GFP_KERNEL);
+}
+
+int ipa3_id_alloc(void *ptr)
+{
+	int id;
+
+	idr_preload(GFP_KERNEL);
+	spin_lock(&ipa3_ctx->idr_lock);
+	id = idr_alloc(&ipa3_ctx->ipa_idr, ptr, 0, 0, GFP_NOWAIT);
+	spin_unlock(&ipa3_ctx->idr_lock);
+	idr_preload_end();
+
+	return id;
+}
+
+void *ipa3_id_find(u32 id)
+{
+	void *ptr;
+
+	spin_lock(&ipa3_ctx->idr_lock);
+	ptr = idr_find(&ipa3_ctx->ipa_idr, id);
+	spin_unlock(&ipa3_ctx->idr_lock);
+
+	return ptr;
+}
+
+void ipa3_id_remove(u32 id)
+{
+	spin_lock(&ipa3_ctx->idr_lock);
+	idr_remove(&ipa3_ctx->ipa_idr, id);
+	spin_unlock(&ipa3_ctx->idr_lock);
+}
+
+void ipa3_tag_destroy_imm(void *user1, int user2)
+{
+	ipahal_destroy_imm_cmd(user1);
+}
+
+static void ipa3_tag_free_skb(void *user1, int user2)
+{
+	dev_kfree_skb_any((struct sk_buff *)user1);
+}
+
+#define REQUIRED_TAG_PROCESS_DESCRIPTORS 4
+
+/* ipa3_tag_process() - Initiates a tag process. Incorporates the input
+ * descriptors
+ *
+ * @desc:	descriptors with commands for IC
+ * @desc_size:	amount of descriptors in the above variable
+ *
+ * Note: The descriptors are copied (if there's room), the client needs to
+ * free his descriptors afterwards
+ *
+ * Return: 0 or negative in case of failure
+ */
+int ipa3_tag_process(struct ipa3_desc desc[],
+	int descs_num,
+	unsigned long timeout)
+{
+	struct ipa3_sys_context *sys;
+	struct ipa3_desc *tag_desc;
+	int desc_idx = 0;
+	struct ipahal_imm_cmd_ip_packet_init pktinit_cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+	struct ipahal_imm_cmd_ip_packet_tag_status status;
+	int i;
+	struct sk_buff *dummy_skb;
+	int res;
+	struct ipa3_tag_completion *comp;
+	int ep_idx;
+
+	/* Not enough room for the required descriptors for the tag process */
+	if (IPA_TAG_MAX_DESC - descs_num < REQUIRED_TAG_PROCESS_DESCRIPTORS) {
+		IPAERR("up to %d descriptors are allowed (received %d)\n",
+		       IPA_TAG_MAX_DESC - REQUIRED_TAG_PROCESS_DESCRIPTORS,
+		       descs_num);
+		return -ENOMEM;
+	}
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
+	if (-1 == ep_idx) {
+		IPAERR("Client %u is not mapped\n",
+			IPA_CLIENT_APPS_CMD_PROD);
+		return -EFAULT;
+	}
+	sys = ipa3_ctx->ep[ep_idx].sys;
+
+	tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, GFP_KERNEL);
+	if (!tag_desc) {
+		IPAERR("failed to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	/* Copy the required descriptors from the client now */
+	if (desc) {
+		memcpy(&(tag_desc[0]), desc, descs_num *
+			sizeof(tag_desc[0]));
+		desc_idx += descs_num;
+	}
+
+	/* NO-OP IC for ensuring that IPA pipeline is empty */
+	cmd_pyld = ipahal_construct_nop_imm_cmd(
+		false, IPAHAL_FULL_PIPELINE_CLEAR, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct NOP imm cmd\n");
+		res = -ENOMEM;
+		goto fail_free_tag_desc;
+	}
+	tag_desc[desc_idx].opcode =
+		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+	tag_desc[desc_idx].pyld = cmd_pyld->data;
+	tag_desc[desc_idx].len = cmd_pyld->len;
+	tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
+	tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
+	tag_desc[desc_idx].user1 = cmd_pyld;
+	desc_idx++;
+
+	/* IP_PACKET_INIT IC for tag status to be sent to apps */
+	pktinit_cmd.destination_pipe_index =
+		ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_PACKET_INIT, &pktinit_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct ip_packet_init imm cmd\n");
+		res = -ENOMEM;
+		goto fail_free_desc;
+	}
+	tag_desc[desc_idx].opcode =
+		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_INIT);
+	tag_desc[desc_idx].pyld = cmd_pyld->data;
+	tag_desc[desc_idx].len = cmd_pyld->len;
+	tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
+	tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
+	tag_desc[desc_idx].user1 = cmd_pyld;
+	desc_idx++;
+
+	/* status IC */
+	status.tag = IPA_COOKIE;
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_PACKET_TAG_STATUS, &status, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct ip_packet_tag_status imm cmd\n");
+		res = -ENOMEM;
+		goto fail_free_desc;
+	}
+	tag_desc[desc_idx].opcode =
+		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
+	tag_desc[desc_idx].pyld = cmd_pyld->data;
+	tag_desc[desc_idx].len = cmd_pyld->len;
+	tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
+	tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
+	tag_desc[desc_idx].user1 = cmd_pyld;
+	desc_idx++;
+
+	comp = kzalloc(sizeof(*comp), GFP_KERNEL);
+	if (!comp) {
+		IPAERR("no mem\n");
+		res = -ENOMEM;
+		goto fail_free_desc;
+	}
+	init_completion(&comp->comp);
+
+	/* completion needs to be released from both here and rx handler */
+	atomic_set(&comp->cnt, 2);
+
+	/* dummy packet to send to IPA. packet payload is a completion object */
+	dummy_skb = alloc_skb(sizeof(comp), GFP_KERNEL);
+	if (!dummy_skb) {
+		IPAERR("failed to allocate memory\n");
+		res = -ENOMEM;
+		goto fail_free_comp;
+	}
+
+	memcpy(skb_put(dummy_skb, sizeof(comp)), &comp, sizeof(comp));
+
+	tag_desc[desc_idx].pyld = dummy_skb->data;
+	tag_desc[desc_idx].len = dummy_skb->len;
+	tag_desc[desc_idx].type = IPA_DATA_DESC_SKB;
+	tag_desc[desc_idx].callback = ipa3_tag_free_skb;
+	tag_desc[desc_idx].user1 = dummy_skb;
+	desc_idx++;
+
+	/* send all descriptors to IPA with single EOT */
+	res = ipa3_send(sys, desc_idx, tag_desc, true);
+	if (res) {
+		IPAERR("failed to send TAG packets %d\n", res);
+		res = -ENOMEM;
+		goto fail_free_comp;
+	}
+	kfree(tag_desc);
+	tag_desc = NULL;
+
+	IPADBG("waiting for TAG response\n");
+	res = wait_for_completion_timeout(&comp->comp, timeout);
+	if (res == 0) {
+		IPAERR("timeout (%lu msec) on waiting for TAG response\n",
+			timeout);
+		WARN_ON(1);
+		if (atomic_dec_return(&comp->cnt) == 0)
+			kfree(comp);
+		return -ETIME;
+	}
+
+	IPADBG("TAG response arrived!\n");
+	if (atomic_dec_return(&comp->cnt) == 0)
+		kfree(comp);
+
+	/* sleep for short period to ensure IPA wrote all packets to BAM */
+	usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC);
+
+	return 0;
+
+fail_free_comp:
+	kfree(comp);
+fail_free_desc:
+	/*
+	 * Free only the first descriptors allocated here.
+	 * [nop, pkt_init, status, dummy_skb]
+	 * The user is responsible to free his allocations
+	 * in case of failure.
+	 * The min is required because we may fail during
+	 * of the initial allocations above
+	 */
+	for (i = descs_num;
+		i < min(REQUIRED_TAG_PROCESS_DESCRIPTORS, desc_idx); i++)
+		if (tag_desc[i].callback)
+			tag_desc[i].callback(tag_desc[i].user1,
+				tag_desc[i].user2);
+fail_free_tag_desc:
+	kfree(tag_desc);
+	return res;
+}
+
+/**
+ * ipa3_tag_generate_force_close_desc() - generate descriptors for force close
+ *					 immediate command
+ *
+ * @desc: descriptors for IC
+ * @desc_size: desc array size
+ * @start_pipe: first pipe to close aggregation
+ * @end_pipe: last (non-inclusive) pipe to close aggregation
+ *
+ * Return: number of descriptors written or negative in case of failure
+ */
+static int ipa3_tag_generate_force_close_desc(struct ipa3_desc desc[],
+	int desc_size, int start_pipe, int end_pipe)
+{
+	int i;
+	struct ipa_ep_cfg_aggr ep_aggr;
+	int desc_idx = 0;
+	int res;
+	struct ipahal_imm_cmd_register_write reg_write_agg_close;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_reg_valmask valmask;
+
+	for (i = start_pipe; i < end_pipe; i++) {
+		ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, i, &ep_aggr);
+		if (!ep_aggr.aggr_en)
+			continue;
+		IPADBG("Force close ep: %d\n", i);
+		if (desc_idx + 1 > desc_size) {
+			IPAERR("Internal error - no descriptors\n");
+			res = -EFAULT;
+			goto fail_no_desc;
+		}
+
+		reg_write_agg_close.skip_pipeline_clear = false;
+		reg_write_agg_close.pipeline_clear_options =
+			IPAHAL_FULL_PIPELINE_CLEAR;
+		reg_write_agg_close.offset =
+			ipahal_get_reg_ofst(IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(1<<i, &valmask);
+		reg_write_agg_close.value = valmask.val;
+		reg_write_agg_close.value_mask = valmask.mask;
+		cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_agg_close, false);
+		if (!cmd_pyld) {
+			IPAERR("failed to construct register_write imm cmd\n");
+			res = -ENOMEM;
+			goto fail_alloc_reg_write_agg_close;
+		}
+
+		desc[desc_idx].opcode =
+			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+		desc[desc_idx].pyld = cmd_pyld->data;
+		desc[desc_idx].len = cmd_pyld->len;
+		desc[desc_idx].type = IPA_IMM_CMD_DESC;
+		desc[desc_idx].callback = ipa3_tag_destroy_imm;
+		desc[desc_idx].user1 = cmd_pyld;
+		desc_idx++;
+	}
+
+	return desc_idx;
+
+fail_alloc_reg_write_agg_close:
+	for (i = 0; i < desc_idx; i++)
+		if (desc[desc_idx].callback)
+			desc[desc_idx].callback(desc[desc_idx].user1,
+				desc[desc_idx].user2);
+fail_no_desc:
+	return res;
+}
+
+/**
+ * ipa3_tag_aggr_force_close() - Force close aggregation
+ *
+ * @pipe_num: pipe number or -1 for all pipes
+ */
+int ipa3_tag_aggr_force_close(int pipe_num)
+{
+	struct ipa3_desc *desc;
+	int res = -1;
+	int start_pipe;
+	int end_pipe;
+	int num_descs;
+	int num_aggr_descs;
+
+	if (pipe_num < -1 || pipe_num >= (int)ipa3_ctx->ipa_num_pipes) {
+		IPAERR("Invalid pipe number %d\n", pipe_num);
+		return -EINVAL;
+	}
+
+	if (pipe_num == -1) {
+		start_pipe = 0;
+		end_pipe = ipa3_ctx->ipa_num_pipes;
+	} else {
+		start_pipe = pipe_num;
+		end_pipe = pipe_num + 1;
+	}
+
+	num_descs = end_pipe - start_pipe;
+
+	desc = kcalloc(num_descs, sizeof(*desc), GFP_KERNEL);
+	if (!desc) {
+		IPAERR("no mem\n");
+		return -ENOMEM;
+	}
+
+	/* Force close aggregation on all valid pipes with aggregation */
+	num_aggr_descs = ipa3_tag_generate_force_close_desc(desc, num_descs,
+						start_pipe, end_pipe);
+	if (num_aggr_descs < 0) {
+		IPAERR("ipa3_tag_generate_force_close_desc failed %d\n",
+			num_aggr_descs);
+		goto fail_free_desc;
+	}
+
+	res = ipa3_tag_process(desc, num_aggr_descs,
+			      IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT);
+
+fail_free_desc:
+	kfree(desc);
+
+	return res;
+}
+
+/**
+ * ipa3_is_ready() - check if IPA module was initialized
+ * successfully
+ *
+ * Return value: true for yes; false for no
+ */
+bool ipa3_is_ready(void)
+{
+	bool complete;
+
+	if (ipa3_ctx == NULL)
+		return false;
+	mutex_lock(&ipa3_ctx->lock);
+	complete = ipa3_ctx->ipa_initialization_complete;
+	mutex_unlock(&ipa3_ctx->lock);
+	return complete;
+}
+
+/**
+ * ipa3_is_client_handle_valid() - check if IPA client handle is valid handle
+ *
+ * Return value: true for yes; false for no
+ */
+bool ipa3_is_client_handle_valid(u32 clnt_hdl)
+{
+	if (clnt_hdl >= 0 && clnt_hdl < ipa3_ctx->ipa_num_pipes)
+		return true;
+	return false;
+}
+
+/**
+ * ipa3_proxy_clk_unvote() - called to remove IPA clock proxy vote
+ *
+ * Return value: none
+ */
+void ipa3_proxy_clk_unvote(void)
+{
+	if (ipa3_is_ready() && ipa3_ctx->q6_proxy_clk_vote_valid) {
+		IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE");
+		ipa3_ctx->q6_proxy_clk_vote_valid = false;
+	}
+}
+
+/**
+ * ipa3_proxy_clk_vote() - called to add IPA clock proxy vote
+ *
+ * Return value: none
+ */
+void ipa3_proxy_clk_vote(void)
+{
+	if (ipa3_is_ready() && !ipa3_ctx->q6_proxy_clk_vote_valid) {
+		IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE");
+		ipa3_ctx->q6_proxy_clk_vote_valid = true;
+	}
+}
+
+/**
+ * ipa3_get_smem_restr_bytes()- Return IPA smem restricted bytes
+ *
+ * Return value: u16 - number of IPA smem restricted bytes
+ */
+u16 ipa3_get_smem_restr_bytes(void)
+{
+	if (ipa3_ctx)
+		return ipa3_ctx->smem_restricted_bytes;
+
+	IPAERR("IPA Driver not initialized\n");
+
+	return 0;
+}
+
+/**
+ * ipa3_get_modem_cfg_emb_pipe_flt()- Return ipa3_ctx->modem_cfg_emb_pipe_flt
+ *
+ * Return value: true if modem configures embedded pipe flt, false otherwise
+ */
+bool ipa3_get_modem_cfg_emb_pipe_flt(void)
+{
+	if (ipa3_ctx)
+		return ipa3_ctx->modem_cfg_emb_pipe_flt;
+
+	IPAERR("IPA driver has not been initialized\n");
+
+	return false;
+}
+
+/**
+ * ipa3_get_transport_type()- Return ipa3_ctx->transport_prototype
+ *
+ * Return value: enum ipa_transport_type
+ */
+enum ipa_transport_type ipa3_get_transport_type(void)
+{
+	if (ipa3_ctx)
+		return ipa3_ctx->transport_prototype;
+
+	IPAERR("IPA driver has not been initialized\n");
+	return IPA_TRANSPORT_TYPE_GSI;
+}
+
+u32 ipa3_get_num_pipes(void)
+{
+	return ipahal_read_reg(IPA_ENABLED_PIPES);
+}
+
+/**
+ * ipa3_disable_apps_wan_cons_deaggr()-
+ * set ipa_ctx->ipa_client_apps_wan_cons_agg_gro
+ *
+ * Return value: 0 or negative in case of failure
+ */
+int ipa3_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count)
+{
+	int res = -1;
+	u32 limit;
+
+	/* checking if IPA-HW can support */
+	limit = ipahal_aggr_get_max_byte_limit();
+	if ((agg_size >> 10) > limit) {
+		IPAERR("IPA-AGG byte limit %d\n", limit);
+		IPAERR("exceed aggr_byte_limit\n");
+		return res;
+	}
+	limit = ipahal_aggr_get_max_pkt_limit();
+	if (agg_count > limit) {
+		IPAERR("IPA-AGG pkt limit %d\n", limit);
+		IPAERR("exceed aggr_pkt_limit\n");
+		return res;
+	}
+
+	if (ipa3_ctx) {
+		ipa3_ctx->ipa_client_apps_wan_cons_agg_gro = true;
+		return 0;
+	}
+	return res;
+}
+
+static void *ipa3_get_ipc_logbuf(void)
+{
+	if (ipa3_ctx)
+		return ipa3_ctx->logbuf;
+
+	return NULL;
+}
+
+static void *ipa3_get_ipc_logbuf_low(void)
+{
+	if (ipa3_ctx)
+		return ipa3_ctx->logbuf_low;
+
+	return NULL;
+}
+
+static void ipa3_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb)
+{
+	*holb = ipa3_ctx->ep[ep_idx].holb;
+}
+
+static void ipa3_set_tag_process_before_gating(bool val)
+{
+	ipa3_ctx->tag_process_before_gating = val;
+}
+
+int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
+	struct ipa_api_controller *api_ctrl)
+{
+	if (ipa_hw_type < IPA_HW_v3_0) {
+		IPAERR("Unsupported IPA HW version %d\n", ipa_hw_type);
+		WARN_ON(1);
+		return -EPERM;
+	}
+
+	api_ctrl->ipa_connect = ipa3_connect;
+	api_ctrl->ipa_disconnect = ipa3_disconnect;
+	api_ctrl->ipa_reset_endpoint = ipa3_reset_endpoint;
+	api_ctrl->ipa_clear_endpoint_delay = ipa3_clear_endpoint_delay;
+	api_ctrl->ipa_disable_endpoint = NULL;
+	api_ctrl->ipa_cfg_ep = ipa3_cfg_ep;
+	api_ctrl->ipa_cfg_ep_nat = ipa3_cfg_ep_nat;
+	api_ctrl->ipa_cfg_ep_hdr = ipa3_cfg_ep_hdr;
+	api_ctrl->ipa_cfg_ep_hdr_ext = ipa3_cfg_ep_hdr_ext;
+	api_ctrl->ipa_cfg_ep_mode = ipa3_cfg_ep_mode;
+	api_ctrl->ipa_cfg_ep_aggr = ipa3_cfg_ep_aggr;
+	api_ctrl->ipa_cfg_ep_deaggr = ipa3_cfg_ep_deaggr;
+	api_ctrl->ipa_cfg_ep_route = ipa3_cfg_ep_route;
+	api_ctrl->ipa_cfg_ep_holb = ipa3_cfg_ep_holb;
+	api_ctrl->ipa_get_holb = ipa3_get_holb;
+	api_ctrl->ipa_set_tag_process_before_gating =
+			ipa3_set_tag_process_before_gating;
+	api_ctrl->ipa_cfg_ep_cfg = ipa3_cfg_ep_cfg;
+	api_ctrl->ipa_cfg_ep_metadata_mask = ipa3_cfg_ep_metadata_mask;
+	api_ctrl->ipa_cfg_ep_holb_by_client = ipa3_cfg_ep_holb_by_client;
+	api_ctrl->ipa_cfg_ep_ctrl = ipa3_cfg_ep_ctrl;
+	api_ctrl->ipa_add_hdr = ipa3_add_hdr;
+	api_ctrl->ipa_del_hdr = ipa3_del_hdr;
+	api_ctrl->ipa_commit_hdr = ipa3_commit_hdr;
+	api_ctrl->ipa_reset_hdr = ipa3_reset_hdr;
+	api_ctrl->ipa_get_hdr = ipa3_get_hdr;
+	api_ctrl->ipa_put_hdr = ipa3_put_hdr;
+	api_ctrl->ipa_copy_hdr = ipa3_copy_hdr;
+	api_ctrl->ipa_add_hdr_proc_ctx = ipa3_add_hdr_proc_ctx;
+	api_ctrl->ipa_del_hdr_proc_ctx = ipa3_del_hdr_proc_ctx;
+	api_ctrl->ipa_add_rt_rule = ipa3_add_rt_rule;
+	api_ctrl->ipa_del_rt_rule = ipa3_del_rt_rule;
+	api_ctrl->ipa_commit_rt = ipa3_commit_rt;
+	api_ctrl->ipa_reset_rt = ipa3_reset_rt;
+	api_ctrl->ipa_get_rt_tbl = ipa3_get_rt_tbl;
+	api_ctrl->ipa_put_rt_tbl = ipa3_put_rt_tbl;
+	api_ctrl->ipa_query_rt_index = ipa3_query_rt_index;
+	api_ctrl->ipa_mdfy_rt_rule = ipa3_mdfy_rt_rule;
+	api_ctrl->ipa_add_flt_rule = ipa3_add_flt_rule;
+	api_ctrl->ipa_del_flt_rule = ipa3_del_flt_rule;
+	api_ctrl->ipa_mdfy_flt_rule = ipa3_mdfy_flt_rule;
+	api_ctrl->ipa_commit_flt = ipa3_commit_flt;
+	api_ctrl->ipa_reset_flt = ipa3_reset_flt;
+	api_ctrl->allocate_nat_device = ipa3_allocate_nat_device;
+	api_ctrl->ipa_nat_init_cmd = ipa3_nat_init_cmd;
+	api_ctrl->ipa_nat_dma_cmd = ipa3_nat_dma_cmd;
+	api_ctrl->ipa_nat_del_cmd = ipa3_nat_del_cmd;
+	api_ctrl->ipa_send_msg = ipa3_send_msg;
+	api_ctrl->ipa_register_pull_msg = ipa3_register_pull_msg;
+	api_ctrl->ipa_deregister_pull_msg = ipa3_deregister_pull_msg;
+	api_ctrl->ipa_register_intf = ipa3_register_intf;
+	api_ctrl->ipa_register_intf_ext = ipa3_register_intf_ext;
+	api_ctrl->ipa_deregister_intf = ipa3_deregister_intf;
+	api_ctrl->ipa_set_aggr_mode = ipa3_set_aggr_mode;
+	api_ctrl->ipa_set_qcncm_ndp_sig = ipa3_set_qcncm_ndp_sig;
+	api_ctrl->ipa_set_single_ndp_per_mbim = ipa3_set_single_ndp_per_mbim;
+	api_ctrl->ipa_tx_dp = ipa3_tx_dp;
+	api_ctrl->ipa_tx_dp_mul = ipa3_tx_dp_mul;
+	api_ctrl->ipa_free_skb = ipa3_free_skb;
+	api_ctrl->ipa_setup_sys_pipe = ipa3_setup_sys_pipe;
+	api_ctrl->ipa_teardown_sys_pipe = ipa3_teardown_sys_pipe;
+	api_ctrl->ipa_sys_setup = ipa3_sys_setup;
+	api_ctrl->ipa_sys_teardown = ipa3_sys_teardown;
+	api_ctrl->ipa_sys_update_gsi_hdls = ipa3_sys_update_gsi_hdls;
+	api_ctrl->ipa_connect_wdi_pipe = ipa3_connect_wdi_pipe;
+	api_ctrl->ipa_disconnect_wdi_pipe = ipa3_disconnect_wdi_pipe;
+	api_ctrl->ipa_enable_wdi_pipe = ipa3_enable_wdi_pipe;
+	api_ctrl->ipa_disable_wdi_pipe = ipa3_disable_wdi_pipe;
+	api_ctrl->ipa_resume_wdi_pipe = ipa3_resume_wdi_pipe;
+	api_ctrl->ipa_suspend_wdi_pipe = ipa3_suspend_wdi_pipe;
+	api_ctrl->ipa_get_wdi_stats = ipa3_get_wdi_stats;
+	api_ctrl->ipa_get_smem_restr_bytes = ipa3_get_smem_restr_bytes;
+	api_ctrl->ipa_uc_wdi_get_dbpa = ipa3_uc_wdi_get_dbpa;
+	api_ctrl->ipa_uc_reg_rdyCB = ipa3_uc_reg_rdyCB;
+	api_ctrl->ipa_uc_dereg_rdyCB = ipa3_uc_dereg_rdyCB;
+	api_ctrl->teth_bridge_init = ipa3_teth_bridge_init;
+	api_ctrl->teth_bridge_disconnect = ipa3_teth_bridge_disconnect;
+	api_ctrl->teth_bridge_connect = ipa3_teth_bridge_connect;
+	api_ctrl->ipa_set_client = ipa3_set_client;
+	api_ctrl->ipa_get_client = ipa3_get_client;
+	api_ctrl->ipa_get_client_uplink = ipa3_get_client_uplink;
+	api_ctrl->ipa_dma_init = ipa3_dma_init;
+	api_ctrl->ipa_dma_enable = ipa3_dma_enable;
+	api_ctrl->ipa_dma_disable = ipa3_dma_disable;
+	api_ctrl->ipa_dma_sync_memcpy = ipa3_dma_sync_memcpy;
+	api_ctrl->ipa_dma_async_memcpy = ipa3_dma_async_memcpy;
+	api_ctrl->ipa_dma_uc_memcpy = ipa3_dma_uc_memcpy;
+	api_ctrl->ipa_dma_destroy = ipa3_dma_destroy;
+	api_ctrl->ipa_mhi_init_engine = ipa3_mhi_init_engine;
+	api_ctrl->ipa_connect_mhi_pipe = ipa3_connect_mhi_pipe;
+	api_ctrl->ipa_disconnect_mhi_pipe = ipa3_disconnect_mhi_pipe;
+	api_ctrl->ipa_mhi_stop_gsi_channel = ipa3_mhi_stop_gsi_channel;
+	api_ctrl->ipa_uc_mhi_reset_channel = ipa3_uc_mhi_reset_channel;
+	api_ctrl->ipa_qmi_enable_force_clear_datapath_send =
+			ipa3_qmi_enable_force_clear_datapath_send;
+	api_ctrl->ipa_qmi_disable_force_clear_datapath_send =
+			ipa3_qmi_disable_force_clear_datapath_send;
+	api_ctrl->ipa_mhi_reset_channel_internal =
+			ipa3_mhi_reset_channel_internal;
+	api_ctrl->ipa_mhi_start_channel_internal =
+			ipa3_mhi_start_channel_internal;
+	api_ctrl->ipa_mhi_query_ch_info = ipa3_mhi_query_ch_info;
+	api_ctrl->ipa_mhi_resume_channels_internal =
+			ipa3_mhi_resume_channels_internal;
+	api_ctrl->ipa_has_open_aggr_frame = ipa3_has_open_aggr_frame;
+	api_ctrl->ipa_mhi_destroy_channel = ipa3_mhi_destroy_channel;
+	api_ctrl->ipa_uc_mhi_send_dl_ul_sync_info =
+			ipa3_uc_mhi_send_dl_ul_sync_info;
+	api_ctrl->ipa_uc_mhi_init = ipa3_uc_mhi_init;
+	api_ctrl->ipa_uc_mhi_suspend_channel = ipa3_uc_mhi_suspend_channel;
+	api_ctrl->ipa_uc_mhi_stop_event_update_channel =
+			ipa3_uc_mhi_stop_event_update_channel;
+	api_ctrl->ipa_uc_mhi_cleanup = ipa3_uc_mhi_cleanup;
+	api_ctrl->ipa_uc_state_check = ipa3_uc_state_check;
+	api_ctrl->ipa_write_qmap_id = ipa3_write_qmap_id;
+	api_ctrl->ipa_add_interrupt_handler = ipa3_add_interrupt_handler;
+	api_ctrl->ipa_remove_interrupt_handler = ipa3_remove_interrupt_handler;
+	api_ctrl->ipa_restore_suspend_handler = ipa3_restore_suspend_handler;
+	api_ctrl->ipa_bam_reg_dump = ipa3_bam_reg_dump;
+	api_ctrl->ipa_get_ep_mapping = ipa3_get_ep_mapping;
+	api_ctrl->ipa_is_ready = ipa3_is_ready;
+	api_ctrl->ipa_proxy_clk_vote = ipa3_proxy_clk_vote;
+	api_ctrl->ipa_proxy_clk_unvote = ipa3_proxy_clk_unvote;
+	api_ctrl->ipa_is_client_handle_valid = ipa3_is_client_handle_valid;
+	api_ctrl->ipa_get_client_mapping = ipa3_get_client_mapping;
+	api_ctrl->ipa_get_rm_resource_from_ep = ipa3_get_rm_resource_from_ep;
+	api_ctrl->ipa_get_modem_cfg_emb_pipe_flt =
+		ipa3_get_modem_cfg_emb_pipe_flt;
+	api_ctrl->ipa_get_transport_type = ipa3_get_transport_type;
+	api_ctrl->ipa_ap_suspend = ipa3_ap_suspend;
+	api_ctrl->ipa_ap_resume = ipa3_ap_resume;
+	api_ctrl->ipa_get_smmu_domain = ipa3_get_smmu_domain;
+	api_ctrl->ipa_disable_apps_wan_cons_deaggr =
+		ipa3_disable_apps_wan_cons_deaggr;
+	api_ctrl->ipa_get_dma_dev = ipa3_get_dma_dev;
+	api_ctrl->ipa_release_wdi_mapping = ipa3_release_wdi_mapping;
+	api_ctrl->ipa_create_wdi_mapping = ipa3_create_wdi_mapping;
+	api_ctrl->ipa_get_gsi_ep_info = ipa3_get_gsi_ep_info;
+	api_ctrl->ipa_stop_gsi_channel = ipa3_stop_gsi_channel;
+	api_ctrl->ipa_register_ipa_ready_cb = ipa3_register_ipa_ready_cb;
+	api_ctrl->ipa_inc_client_enable_clks = ipa3_inc_client_enable_clks;
+	api_ctrl->ipa_dec_client_disable_clks = ipa3_dec_client_disable_clks;
+	api_ctrl->ipa_inc_client_enable_clks_no_block =
+		ipa3_inc_client_enable_clks_no_block;
+	api_ctrl->ipa_suspend_resource_no_block =
+		ipa3_suspend_resource_no_block;
+	api_ctrl->ipa_resume_resource = ipa3_resume_resource;
+	api_ctrl->ipa_suspend_resource_sync = ipa3_suspend_resource_sync;
+	api_ctrl->ipa_set_required_perf_profile =
+		ipa3_set_required_perf_profile;
+	api_ctrl->ipa_get_ipc_logbuf = ipa3_get_ipc_logbuf;
+	api_ctrl->ipa_get_ipc_logbuf_low = ipa3_get_ipc_logbuf_low;
+	api_ctrl->ipa_rx_poll = ipa3_rx_poll;
+	api_ctrl->ipa_recycle_wan_skb = ipa3_recycle_wan_skb;
+	api_ctrl->ipa_setup_uc_ntn_pipes = ipa3_setup_uc_ntn_pipes;
+	api_ctrl->ipa_tear_down_uc_offload_pipes =
+		ipa3_tear_down_uc_offload_pipes;
+
+	return 0;
+}
+
+/**
+ * ipa_is_modem_pipe()- Checks if pipe is owned by the modem
+ *
+ * @pipe_idx: pipe number
+ * Return value: true if owned by modem, false otherwize
+ */
+bool ipa_is_modem_pipe(int pipe_idx)
+{
+	int client_idx;
+
+	if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
+		IPAERR("Bad pipe index!\n");
+		return false;
+	}
+
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+		if (!IPA_CLIENT_IS_Q6_CONS(client_idx) &&
+			!IPA_CLIENT_IS_Q6_PROD(client_idx))
+			continue;
+		if (ipa3_get_ep_mapping(client_idx) == pipe_idx)
+			return true;
+	}
+
+	return false;
+}
+
+static void ipa3_write_rsrc_grp_type_reg(int group_index,
+			enum ipa_rsrc_grp_type_src n, bool src,
+			struct ipahal_reg_rsrc_grp_cfg *val) {
+
+	if (src) {
+		switch (group_index) {
+		case IPA_GROUP_UL:
+		case IPA_GROUP_DL:
+			ipahal_write_reg_n_fields(
+				IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+				n, val);
+			break;
+		case IPA_GROUP_DIAG:
+		case IPA_GROUP_DMA:
+			ipahal_write_reg_n_fields(
+				IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+				n, val);
+			break;
+		case IPA_GROUP_Q6ZIP:
+		case IPA_GROUP_UC_RX_Q:
+			ipahal_write_reg_n_fields(
+				IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n,
+				n, val);
+			break;
+		default:
+			IPAERR(
+			" Invalid source resource group,index #%d\n",
+			group_index);
+			break;
+		}
+	} else {
+		switch (group_index) {
+		case IPA_GROUP_UL:
+		case IPA_GROUP_DL:
+			ipahal_write_reg_n_fields(
+				IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+				n, val);
+			break;
+		case IPA_GROUP_DIAG:
+		case IPA_GROUP_DMA:
+			ipahal_write_reg_n_fields(
+				IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+				n, val);
+			break;
+		case IPA_GROUP_Q6ZIP_GENERAL:
+		case IPA_GROUP_Q6ZIP_ENGINE:
+			ipahal_write_reg_n_fields(
+				IPA_DST_RSRC_GRP_45_RSRC_TYPE_n,
+				n, val);
+			break;
+		default:
+			IPAERR(
+			" Invalid destination resource group,index #%d\n",
+			group_index);
+			break;
+		}
+	}
+}
+
+static void ipa3_configure_rx_hps_clients(int depth, bool min)
+{
+	int i;
+	struct ipahal_reg_rx_hps_clients val;
+
+	/*
+	 * depth 0 contains 4 first clients out of 6
+	 * depth 1 contains 2 last clients out of 6
+	 */
+	for (i = 0 ; i < (depth ? 2 : 4) ; i++) {
+		if (min)
+			val.client_minmax[i] =
+				ipa3_rsrc_rx_grp_config
+				[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ]
+				[!depth ? i : 4 + i].min;
+		else
+			val.client_minmax[i] =
+				ipa3_rsrc_rx_grp_config
+				[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ]
+				[!depth ? i : 4 + i].max;
+	}
+	if (depth) {
+		ipahal_write_reg_fields(min ? IPA_RX_HPS_CLIENTS_MIN_DEPTH_1 :
+					IPA_RX_HPS_CLIENTS_MAX_DEPTH_1,
+					&val);
+	} else {
+		ipahal_write_reg_fields(min ? IPA_RX_HPS_CLIENTS_MIN_DEPTH_0 :
+					IPA_RX_HPS_CLIENTS_MAX_DEPTH_0,
+					&val);
+	}
+}
+
+void ipa3_set_resorce_groups_min_max_limits(void)
+{
+	int i;
+	int j;
+	struct ipahal_reg_rsrc_grp_cfg val;
+
+	IPADBG("ENTER\n");
+	IPADBG("Assign source rsrc groups min-max limits\n");
+
+	for (i = 0; i < IPA_RSRC_GRP_TYPE_SRC_MAX; i++) {
+		for (j = 0; j < IPA_GROUP_MAX; j = j + 2) {
+			val.x_min = ipa3_rsrc_src_grp_config[i][j].min;
+			val.x_max = ipa3_rsrc_src_grp_config[i][j].max;
+			val.y_min = ipa3_rsrc_src_grp_config[i][j + 1].min;
+			val.y_max = ipa3_rsrc_src_grp_config[i][j + 1].max;
+			ipa3_write_rsrc_grp_type_reg(j, i, true, &val);
+		}
+	}
+
+	IPADBG("Assign destination rsrc groups min-max limits\n");
+
+	for (i = 0; i < IPA_RSRC_GRP_TYPE_DST_MAX; i++) {
+		for (j = 0; j < IPA_GROUP_MAX; j = j + 2) {
+			val.x_min = ipa3_rsrc_dst_grp_config[i][j].min;
+			val.x_max = ipa3_rsrc_dst_grp_config[i][j].max;
+			val.y_min = ipa3_rsrc_dst_grp_config[i][j + 1].min;
+			val.y_max = ipa3_rsrc_dst_grp_config[i][j + 1].max;
+			ipa3_write_rsrc_grp_type_reg(j, i, false, &val);
+		}
+	}
+
+	/* move resource group configuration from HLOS to TZ */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) {
+		IPAERR("skip configuring ipa_rx_hps_clients from HLOS\n");
+		return;
+	}
+
+	IPADBG("Assign RX_HPS CMDQ rsrc groups min-max limits\n");
+
+	ipa3_configure_rx_hps_clients(0, true);
+	ipa3_configure_rx_hps_clients(1, true);
+	ipa3_configure_rx_hps_clients(0, false);
+	ipa3_configure_rx_hps_clients(1, false);
+
+	IPADBG("EXIT\n");
+}
+
+static void ipa3_gsi_poll_after_suspend(struct ipa3_ep_context *ep)
+{
+	bool empty;
+
+	IPADBG("switch ch %ld to poll\n", ep->gsi_chan_hdl);
+	gsi_config_channel_mode(ep->gsi_chan_hdl, GSI_CHAN_MODE_POLL);
+	gsi_is_channel_empty(ep->gsi_chan_hdl, &empty);
+	if (!empty) {
+		IPADBG("ch %ld not empty\n", ep->gsi_chan_hdl);
+		/* queue a work to start polling if don't have one */
+		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+		if (!atomic_read(&ep->sys->curr_polling_state)) {
+			atomic_set(&ep->sys->curr_polling_state, 1);
+			queue_work(ep->sys->wq, &ep->sys->work);
+		}
+	}
+}
+
+void ipa3_suspend_apps_pipes(bool suspend)
+{
+	struct ipa_ep_cfg_ctrl cfg;
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+
+	memset(&cfg, 0, sizeof(cfg));
+	cfg.ipa_ep_suspend = suspend;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	if (ep->valid) {
+		IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
+			ipa_ep_idx);
+		ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+		if (suspend)
+			ipa3_gsi_poll_after_suspend(ep);
+		else if (!atomic_read(&ep->sys->curr_polling_state))
+			gsi_config_channel_mode(ep->gsi_chan_hdl,
+				GSI_CHAN_MODE_CALLBACK);
+	}
+
+	ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+	/* Considering the case for SSR. */
+	if (ipa_ep_idx == -1) {
+		IPADBG("Invalid client.\n");
+		return;
+	}
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	if (ep->valid) {
+		IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
+			ipa_ep_idx);
+		ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+		if (suspend)
+			ipa3_gsi_poll_after_suspend(ep);
+		else if (!atomic_read(&ep->sys->curr_polling_state))
+			gsi_config_channel_mode(ep->gsi_chan_hdl,
+				GSI_CHAN_MODE_CALLBACK);
+	}
+}
+
+/**
+ * ipa3_inject_dma_task_for_gsi()- Send DMA_TASK to IPA for GSI stop channel
+ *
+ * Send a DMA_TASK of 1B to IPA to unblock GSI channel in STOP_IN_PROG.
+ * Return value: 0 on success, negative otherwise
+ */
+int ipa3_inject_dma_task_for_gsi(void)
+{
+	static struct ipa_mem_buffer mem = {0};
+	struct ipahal_imm_cmd_dma_task_32b_addr cmd = {0};
+	static struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipa3_desc desc = {0};
+
+	/* allocate the memory only for the very first time */
+	if (!mem.base) {
+		IPADBG("Allocate mem\n");
+		mem.size = IPA_GSI_CHANNEL_STOP_PKT_SIZE;
+		mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
+			mem.size,
+			&mem.phys_base,
+			GFP_KERNEL);
+		if (!mem.base) {
+			IPAERR("no mem\n");
+			return -EFAULT;
+		}
+	}
+	if (!cmd_pyld) {
+		cmd.flsh = 1;
+		cmd.size1 = mem.size;
+		cmd.addr1 = mem.phys_base;
+		cmd.packet_size = mem.size;
+		cmd_pyld = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_TASK_32B_ADDR, &cmd, false);
+		if (!cmd_pyld) {
+			IPAERR("failed to construct dma_task_32b_addr cmd\n");
+			return -EFAULT;
+		}
+	}
+
+	desc.opcode = ipahal_imm_cmd_get_opcode_param(
+		IPA_IMM_CMD_DMA_TASK_32B_ADDR, 1);
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	desc.type = IPA_IMM_CMD_DESC;
+
+	IPADBG("sending 1B packet to IPA\n");
+	if (ipa3_send_cmd(1, &desc)) {
+		IPAERR("ipa3_send_cmd failed\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_stop_gsi_channel()- Stops a GSI channel in IPA
+ * @chan_hdl: GSI channel handle
+ *
+ * This function implements the sequence to stop a GSI channel
+ * in IPA. This function returns when the channel is is STOP state.
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+int ipa3_stop_gsi_channel(u32 clnt_hdl)
+{
+	struct ipa_mem_buffer mem;
+	int res = 0;
+	int i;
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	memset(&mem, 0, sizeof(mem));
+
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		res = gsi_stop_channel(ep->gsi_chan_hdl);
+		goto end_sequence;
+	}
+
+	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
+		IPADBG("Calling gsi_stop_channel\n");
+		res = gsi_stop_channel(ep->gsi_chan_hdl);
+		IPADBG("gsi_stop_channel returned %d\n", res);
+		if (res != -GSI_STATUS_AGAIN && res != -GSI_STATUS_TIMED_OUT)
+			goto end_sequence;
+
+		IPADBG("Inject a DMA_TASK with 1B packet to IPA and retry\n");
+		/* Send a 1B packet DMA_RASK to IPA and try again*/
+		res = ipa3_inject_dma_task_for_gsi();
+		if (res) {
+			IPAERR("Failed to inject DMA TASk for GSI\n");
+			goto end_sequence;
+		}
+
+		/* sleep for short period to flush IPA */
+		usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
+			IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
+	}
+
+	IPAERR("Failed  to stop GSI channel with retries\n");
+	res = -EFAULT;
+end_sequence:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return res;
+}
+
+/**
+ * ipa3_load_fws() - Load the IPAv3 FWs into IPA&GSI SRAM.
+ *
+ * @firmware: Structure which contains the FW data from the user space.
+ *
+ * Return value: 0 on success, negative otherwise
+ *
+ */
+int ipa3_load_fws(const struct firmware *firmware)
+{
+	const struct elf32_hdr *ehdr;
+	const struct elf32_phdr *phdr;
+	const uint8_t *elf_phdr_ptr;
+	uint32_t *elf_data_ptr;
+	int phdr_idx, index;
+	uint32_t *fw_mem_base;
+
+	ehdr = (struct elf32_hdr *) firmware->data;
+
+	elf_phdr_ptr = firmware->data + sizeof(*ehdr);
+
+	for (phdr_idx = 0; phdr_idx < ehdr->e_phnum; phdr_idx++) {
+		/*
+		 * The ELF program header will contain the starting
+		 * address to which the firmware needs to copied.
+		 */
+		phdr = (struct elf32_phdr *)elf_phdr_ptr;
+
+		/*
+		 * p_vaddr will contain the starting address to which the
+		 * FW needs to be loaded.
+		 * p_memsz will contain the size of the IRAM.
+		 * p_filesz will contain the size of the FW image.
+		 */
+		fw_mem_base = ioremap(phdr->p_vaddr, phdr->p_memsz);
+		if (!fw_mem_base) {
+			IPAERR("Failed to map 0x%x for the size of %u\n",
+				phdr->p_vaddr, phdr->p_memsz);
+				return -ENOMEM;
+		}
+
+		/* Set the entire region to 0s */
+		memset(fw_mem_base, 0, phdr->p_memsz);
+
+		/*
+		 * p_offset will contain and absolute offset from the beginning
+		 * of the ELF file.
+		 */
+		elf_data_ptr = (uint32_t *)
+				((uint8_t *)firmware->data + phdr->p_offset);
+
+		if (phdr->p_memsz % sizeof(uint32_t)) {
+			IPAERR("FW size %u doesn't align to 32bit\n",
+				phdr->p_memsz);
+			return -EFAULT;
+		}
+
+		/* Write the FW */
+		for (index = 0; index < phdr->p_filesz/sizeof(uint32_t);
+			index++) {
+			writel_relaxed(*elf_data_ptr, &fw_mem_base[index]);
+			elf_data_ptr++;
+		}
+
+		iounmap(fw_mem_base);
+
+		elf_phdr_ptr = elf_phdr_ptr + sizeof(*phdr);
+	}
+	IPADBG("IPA FWs (GSI FW, HPS and DPS) were loaded\n");
+	return 0;
+}
+
+/**
+ * ipa3_is_msm_device() - Is the running device a MSM or MDM?
+ *  Determine according to IPA version
+ *
+ * Return value: true if MSM, false if MDM
+ *
+ */
+bool ipa3_is_msm_device(void)
+{
+	switch (ipa3_ctx->ipa_hw_type) {
+	case IPA_HW_v3_0:
+	case IPA_HW_v3_5:
+		return false;
+	case IPA_HW_v3_1:
+	case IPA_HW_v3_5_1:
+		return true;
+	default:
+		IPAERR("unknown HW type %d\n", ipa3_ctx->ipa_hw_type);
+		ipa_assert();
+	}
+
+	return false;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
new file mode 100644
index 0000000..b945eb06
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_IPA3) += ipa_hal.o
+
+ipa_hal-y := ipahal.o ipahal_reg.o ipahal_fltrt.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
new file mode 100644
index 0000000..c88b104
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -0,0 +1,1359 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include "ipahal.h"
+#include "ipahal_i.h"
+#include "ipahal_reg_i.h"
+#include "ipahal_fltrt_i.h"
+
+struct ipahal_context *ipahal_ctx;
+
+static const char *ipahal_imm_cmd_name_to_str[IPA_IMM_CMD_MAX] = {
+	__stringify(IPA_IMM_CMD_IP_V4_FILTER_INIT),
+	__stringify(IPA_IMM_CMD_IP_V6_FILTER_INIT),
+	__stringify(IPA_IMM_CMD_IP_V4_NAT_INIT),
+	__stringify(IPA_IMM_CMD_IP_V4_ROUTING_INIT),
+	__stringify(IPA_IMM_CMD_IP_V6_ROUTING_INIT),
+	__stringify(IPA_IMM_CMD_HDR_INIT_LOCAL),
+	__stringify(IPA_IMM_CMD_HDR_INIT_SYSTEM),
+	__stringify(IPA_IMM_CMD_REGISTER_WRITE),
+	__stringify(IPA_IMM_CMD_NAT_DMA),
+	__stringify(IPA_IMM_CMD_IP_PACKET_INIT),
+	__stringify(IPA_IMM_CMD_DMA_SHARED_MEM),
+	__stringify(IPA_IMM_CMD_IP_PACKET_TAG_STATUS),
+	__stringify(IPA_IMM_CMD_DMA_TASK_32B_ADDR),
+};
+
+static const char *ipahal_pkt_status_exception_to_str
+	[IPAHAL_PKT_STATUS_EXCEPTION_MAX] = {
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_NONE),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_PACKET_THRESHOLD),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_NAT),
+};
+
+#define IPAHAL_MEM_ALLOC(__size, __is_atomic_ctx) \
+		(kzalloc((__size), ((__is_atomic_ctx)?GFP_ATOMIC:GFP_KERNEL)))
+
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_task_32b_addr(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_dma_task_32b_addr *data;
+	struct ipahal_imm_cmd_dma_task_32b_addr *dma_params =
+		(struct ipahal_imm_cmd_dma_task_32b_addr *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_dma_task_32b_addr *)pyld->data;
+
+	if (unlikely(dma_params->size1 & ~0xFFFF)) {
+		IPAHAL_ERR("Size1 is bigger than 16bit width 0x%x\n",
+			dma_params->size1);
+		WARN_ON(1);
+	}
+	if (unlikely(dma_params->packet_size & ~0xFFFF)) {
+		IPAHAL_ERR("Pkt size is bigger than 16bit width 0x%x\n",
+			dma_params->packet_size);
+		WARN_ON(1);
+	}
+	data->cmplt = dma_params->cmplt ? 1 : 0;
+	data->eof = dma_params->eof ? 1 : 0;
+	data->flsh = dma_params->flsh ? 1 : 0;
+	data->lock = dma_params->lock ? 1 : 0;
+	data->unlock = dma_params->unlock ? 1 : 0;
+	data->size1 = dma_params->size1;
+	data->addr1 = dma_params->addr1;
+	data->packet_size = dma_params->packet_size;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_tag_status(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_packet_tag_status *data;
+	struct ipahal_imm_cmd_ip_packet_tag_status *tag_params =
+		(struct ipahal_imm_cmd_ip_packet_tag_status *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_packet_tag_status *)pyld->data;
+
+	if (unlikely(tag_params->tag & ~0xFFFFFFFFFFFF)) {
+		IPAHAL_ERR("tag is bigger than 48bit width 0x%llx\n",
+			tag_params->tag);
+		WARN_ON(1);
+	}
+	data->tag = tag_params->tag;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_shared_mem(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_dma_shared_mem *data;
+	struct ipahal_imm_cmd_dma_shared_mem *mem_params =
+		(struct ipahal_imm_cmd_dma_shared_mem *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_dma_shared_mem *)pyld->data;
+
+	if (unlikely(mem_params->size & ~0xFFFF)) {
+		IPAHAL_ERR("Size is bigger than 16bit width 0x%x\n",
+			mem_params->size);
+		WARN_ON(1);
+	}
+	if (unlikely(mem_params->local_addr & ~0xFFFF)) {
+		IPAHAL_ERR("Local addr is bigger than 16bit width 0x%x\n",
+			mem_params->local_addr);
+		WARN_ON(1);
+	}
+	data->direction = mem_params->is_read ? 1 : 0;
+	data->size = mem_params->size;
+	data->local_addr = mem_params->local_addr;
+	data->system_addr = mem_params->system_addr;
+	data->skip_pipeline_clear = mem_params->skip_pipeline_clear ? 1 : 0;
+	switch (mem_params->pipeline_clear_options) {
+	case IPAHAL_HPS_CLEAR:
+		data->pipeline_clear_options = 0;
+		break;
+	case IPAHAL_SRC_GRP_CLEAR:
+		data->pipeline_clear_options = 1;
+		break;
+	case IPAHAL_FULL_PIPELINE_CLEAR:
+		data->pipeline_clear_options = 2;
+		break;
+	default:
+		IPAHAL_ERR("unsupported pipline clear option %d\n",
+			mem_params->pipeline_clear_options);
+		WARN_ON(1);
+	};
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_register_write(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_register_write *data;
+	struct ipahal_imm_cmd_register_write *regwrt_params =
+		(struct ipahal_imm_cmd_register_write *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_register_write *)pyld->data;
+
+	if (unlikely(regwrt_params->offset & ~0xFFFF)) {
+		IPAHAL_ERR("Offset is bigger than 16bit width 0x%x\n",
+			regwrt_params->offset);
+		WARN_ON(1);
+	}
+	data->offset = regwrt_params->offset;
+	data->value = regwrt_params->value;
+	data->value_mask = regwrt_params->value_mask;
+
+	data->skip_pipeline_clear = regwrt_params->skip_pipeline_clear ? 1 : 0;
+	switch (regwrt_params->pipeline_clear_options) {
+	case IPAHAL_HPS_CLEAR:
+		data->pipeline_clear_options = 0;
+		break;
+	case IPAHAL_SRC_GRP_CLEAR:
+		data->pipeline_clear_options = 1;
+		break;
+	case IPAHAL_FULL_PIPELINE_CLEAR:
+		data->pipeline_clear_options = 2;
+		break;
+	default:
+		IPAHAL_ERR("unsupported pipline clear option %d\n",
+			regwrt_params->pipeline_clear_options);
+		WARN_ON(1);
+	};
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_init(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_packet_init *data;
+	struct ipahal_imm_cmd_ip_packet_init *pktinit_params =
+		(struct ipahal_imm_cmd_ip_packet_init *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_packet_init *)pyld->data;
+
+	if (unlikely(pktinit_params->destination_pipe_index & ~0x1F)) {
+		IPAHAL_ERR("Dst pipe idx is bigger than 5bit width 0x%x\n",
+			pktinit_params->destination_pipe_index);
+		WARN_ON(1);
+	}
+	data->destination_pipe_index = pktinit_params->destination_pipe_index;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_nat_dma(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_nat_dma *data;
+	struct ipahal_imm_cmd_nat_dma *nat_params =
+		(struct ipahal_imm_cmd_nat_dma *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_nat_dma *)pyld->data;
+
+	data->table_index = nat_params->table_index;
+	data->base_addr = nat_params->base_addr;
+	data->offset = nat_params->offset;
+	data->data = nat_params->data;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_system(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_hdr_init_system *data;
+	struct ipahal_imm_cmd_hdr_init_system *syshdr_params =
+		(struct ipahal_imm_cmd_hdr_init_system *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_hdr_init_system *)pyld->data;
+
+	data->hdr_table_addr = syshdr_params->hdr_table_addr;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_local(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_hdr_init_local *data;
+	struct ipahal_imm_cmd_hdr_init_local *lclhdr_params =
+		(struct ipahal_imm_cmd_hdr_init_local *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_hdr_init_local *)pyld->data;
+
+	if (unlikely(lclhdr_params->size_hdr_table & ~0xFFF)) {
+		IPAHAL_ERR("Hdr tble size is bigger than 12bit width 0x%x\n",
+			lclhdr_params->size_hdr_table);
+		WARN_ON(1);
+	}
+	data->hdr_table_addr = lclhdr_params->hdr_table_addr;
+	data->size_hdr_table = lclhdr_params->size_hdr_table;
+	data->hdr_addr = lclhdr_params->hdr_addr;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_routing_init(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_v6_routing_init *data;
+	struct ipahal_imm_cmd_ip_v6_routing_init *rt6_params =
+		(struct ipahal_imm_cmd_ip_v6_routing_init *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_v6_routing_init *)pyld->data;
+
+	data->hash_rules_addr = rt6_params->hash_rules_addr;
+	data->hash_rules_size = rt6_params->hash_rules_size;
+	data->hash_local_addr = rt6_params->hash_local_addr;
+	data->nhash_rules_addr = rt6_params->nhash_rules_addr;
+	data->nhash_rules_size = rt6_params->nhash_rules_size;
+	data->nhash_local_addr = rt6_params->nhash_local_addr;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_routing_init(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_v4_routing_init *data;
+	struct ipahal_imm_cmd_ip_v4_routing_init *rt4_params =
+		(struct ipahal_imm_cmd_ip_v4_routing_init *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_v4_routing_init *)pyld->data;
+
+	data->hash_rules_addr = rt4_params->hash_rules_addr;
+	data->hash_rules_size = rt4_params->hash_rules_size;
+	data->hash_local_addr = rt4_params->hash_local_addr;
+	data->nhash_rules_addr = rt4_params->nhash_rules_addr;
+	data->nhash_rules_size = rt4_params->nhash_rules_size;
+	data->nhash_local_addr = rt4_params->nhash_local_addr;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_nat_init(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_v4_nat_init *data;
+	struct ipahal_imm_cmd_ip_v4_nat_init *nat4_params =
+		(struct ipahal_imm_cmd_ip_v4_nat_init *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_v4_nat_init *)pyld->data;
+
+	data->ipv4_rules_addr = nat4_params->ipv4_rules_addr;
+	data->ipv4_expansion_rules_addr =
+		nat4_params->ipv4_expansion_rules_addr;
+	data->index_table_addr = nat4_params->index_table_addr;
+	data->index_table_expansion_addr =
+		nat4_params->index_table_expansion_addr;
+	data->table_index = nat4_params->table_index;
+	data->ipv4_rules_addr_type =
+		nat4_params->ipv4_rules_addr_shared ? 1 : 0;
+	data->ipv4_expansion_rules_addr_type =
+		nat4_params->ipv4_expansion_rules_addr_shared ? 1 : 0;
+	data->index_table_addr_type =
+		nat4_params->index_table_addr_shared ? 1 : 0;
+	data->index_table_expansion_addr_type =
+		nat4_params->index_table_expansion_addr_shared ? 1 : 0;
+	data->size_base_tables = nat4_params->size_base_tables;
+	data->size_expansion_tables = nat4_params->size_expansion_tables;
+	data->public_ip_addr = nat4_params->public_ip_addr;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_filter_init(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_v6_filter_init *data;
+	struct ipahal_imm_cmd_ip_v6_filter_init *flt6_params =
+		(struct ipahal_imm_cmd_ip_v6_filter_init *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_v6_filter_init *)pyld->data;
+
+	data->hash_rules_addr = flt6_params->hash_rules_addr;
+	data->hash_rules_size = flt6_params->hash_rules_size;
+	data->hash_local_addr = flt6_params->hash_local_addr;
+	data->nhash_rules_addr = flt6_params->nhash_rules_addr;
+	data->nhash_rules_size = flt6_params->nhash_rules_size;
+	data->nhash_local_addr = flt6_params->nhash_local_addr;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_filter_init(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_v4_filter_init *data;
+	struct ipahal_imm_cmd_ip_v4_filter_init *flt4_params =
+		(struct ipahal_imm_cmd_ip_v4_filter_init *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_v4_filter_init *)pyld->data;
+
+	data->hash_rules_addr = flt4_params->hash_rules_addr;
+	data->hash_rules_size = flt4_params->hash_rules_size;
+	data->hash_local_addr = flt4_params->hash_local_addr;
+	data->nhash_rules_addr = flt4_params->nhash_rules_addr;
+	data->nhash_rules_size = flt4_params->nhash_rules_size;
+	data->nhash_local_addr = flt4_params->nhash_local_addr;
+
+	return pyld;
+}
+
+/*
+ * struct ipahal_imm_cmd_obj - immediate command H/W information for
+ *  specific IPA version
+ * @construct - CB to construct imm command payload from abstracted structure
+ * @opcode - Immediate command OpCode
+ * @dyn_op - Does this command supports Dynamic opcode?
+ *  Some commands opcode are dynamic where the part of the opcode is
+ *  supplied as param. This flag indicates if the specific command supports it
+ *  or not.
+ */
+struct ipahal_imm_cmd_obj {
+	struct ipahal_imm_cmd_pyld *(*construct)(enum ipahal_imm_cmd_name cmd,
+		const void *params, bool is_atomic_ctx);
+	u16 opcode;
+	bool dyn_op;
+};
+
+/*
+ * This table contains the info regard each immediate command for IPAv3
+ *  and later.
+ * Information like: opcode and construct functions.
+ * All the information on the IMM on IPAv3 are statically defined below.
+ * If information is missing regard some IMM on some IPA version,
+ *  the init function will fill it with the information from the previous
+ *  IPA version.
+ * Information is considered missing if all of the fields are 0
+ * If opcode is -1, this means that the IMM is removed on the
+ *  specific version
+ */
+static struct ipahal_imm_cmd_obj
+		ipahal_imm_cmd_objs[IPA_HW_MAX][IPA_IMM_CMD_MAX] = {
+	/* IPAv3 */
+	[IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_FILTER_INIT] = {
+		ipa_imm_cmd_construct_ip_v4_filter_init,
+		3, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_FILTER_INIT] = {
+		ipa_imm_cmd_construct_ip_v6_filter_init,
+		4, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_NAT_INIT] = {
+		ipa_imm_cmd_construct_ip_v4_nat_init,
+		5, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_ROUTING_INIT] = {
+		ipa_imm_cmd_construct_ip_v4_routing_init,
+		7, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_ROUTING_INIT] = {
+		ipa_imm_cmd_construct_ip_v6_routing_init,
+		8, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_LOCAL] = {
+		ipa_imm_cmd_construct_hdr_init_local,
+		9, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_SYSTEM] = {
+		ipa_imm_cmd_construct_hdr_init_system,
+		10, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_REGISTER_WRITE] = {
+		ipa_imm_cmd_construct_register_write,
+		12, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_NAT_DMA] = {
+		ipa_imm_cmd_construct_nat_dma,
+		14, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_INIT] = {
+		ipa_imm_cmd_construct_ip_packet_init,
+		16, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_DMA_TASK_32B_ADDR] = {
+		ipa_imm_cmd_construct_dma_task_32b_addr,
+		17, true},
+	[IPA_HW_v3_0][IPA_IMM_CMD_DMA_SHARED_MEM] = {
+		ipa_imm_cmd_construct_dma_shared_mem,
+		19, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_TAG_STATUS] = {
+		ipa_imm_cmd_construct_ip_packet_tag_status,
+		20, false},
+};
+
+/*
+ * ipahal_imm_cmd_init() - Build the Immediate command information table
+ *  See ipahal_imm_cmd_objs[][] comments
+ */
+static int ipahal_imm_cmd_init(enum ipa_hw_type ipa_hw_type)
+{
+	int i;
+	int j;
+	struct ipahal_imm_cmd_obj zero_obj;
+
+	IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+	if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
+		IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+		return -EINVAL;
+	}
+
+	memset(&zero_obj, 0, sizeof(zero_obj));
+	for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+		for (j = 0; j < IPA_IMM_CMD_MAX ; j++) {
+			if (!memcmp(&ipahal_imm_cmd_objs[i+1][j], &zero_obj,
+				sizeof(struct ipahal_imm_cmd_obj))) {
+				memcpy(&ipahal_imm_cmd_objs[i+1][j],
+					&ipahal_imm_cmd_objs[i][j],
+					sizeof(struct ipahal_imm_cmd_obj));
+			} else {
+				/*
+				 * explicitly overridden immediate command.
+				 * Check validity
+				 */
+				if (!ipahal_imm_cmd_objs[i+1][j].opcode) {
+					IPAHAL_ERR(
+					  "imm_cmd=%s with zero opcode ipa_ver=%d\n",
+					  ipahal_imm_cmd_name_str(j), i+1);
+					WARN_ON(1);
+				}
+				if (!ipahal_imm_cmd_objs[i+1][j].construct) {
+					IPAHAL_ERR(
+					  "imm_cmd=%s with NULL construct func ipa_ver=%d\n",
+					  ipahal_imm_cmd_name_str(j), i+1);
+					WARN_ON(1);
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * ipahal_imm_cmd_name_str() - returns string that represent the imm cmd
+ * @cmd_name: [in] Immediate command name
+ */
+const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name)
+{
+	if (cmd_name < 0 || cmd_name >= IPA_IMM_CMD_MAX) {
+		IPAHAL_ERR("requested name of invalid imm_cmd=%d\n", cmd_name);
+		return "Invalid IMM_CMD";
+	}
+
+	return ipahal_imm_cmd_name_to_str[cmd_name];
+}
+
+/*
+ * ipahal_imm_cmd_get_opcode() - Get the fixed opcode of the immediate command
+ */
+u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd)
+{
+	u32 opcode;
+
+	if (cmd >= IPA_IMM_CMD_MAX) {
+		IPAHAL_ERR("Invalid immediate command imm_cmd=%u\n", cmd);
+		ipa_assert();
+		return -EFAULT;
+	}
+
+	IPAHAL_DBG_LOW("Get opcode of IMM_CMD=%s\n",
+		ipahal_imm_cmd_name_str(cmd));
+	opcode = ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].opcode;
+	if (opcode == -1) {
+		IPAHAL_ERR("Try to get opcode of obsolete IMM_CMD=%s\n",
+			ipahal_imm_cmd_name_str(cmd));
+		ipa_assert();
+		return -EFAULT;
+	}
+
+	return opcode;
+}
+
+/*
+ * ipahal_imm_cmd_get_opcode_param() - Get the opcode of an immediate command
+ *  that supports dynamic opcode
+ * Some commands opcode are not totaly fixed, but part of it is
+ *  a supplied parameter. E.g. Low-Byte is fixed and Hi-Byte
+ *  is a given parameter.
+ * This API will return the composed opcode of the command given
+ *  the parameter
+ * Note: Use this API only for immediate comamnds that support Dynamic Opcode
+ */
+u16 ipahal_imm_cmd_get_opcode_param(enum ipahal_imm_cmd_name cmd, int param)
+{
+	u32 opcode;
+
+	if (cmd >= IPA_IMM_CMD_MAX) {
+		IPAHAL_ERR("Invalid immediate command IMM_CMD=%u\n", cmd);
+		ipa_assert();
+		return -EFAULT;
+	}
+
+	IPAHAL_DBG_LOW("Get opcode of IMM_CMD=%s\n",
+		ipahal_imm_cmd_name_str(cmd));
+
+	if (!ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].dyn_op) {
+		IPAHAL_ERR("IMM_CMD=%s does not support dynamic opcode\n",
+			ipahal_imm_cmd_name_str(cmd));
+		ipa_assert();
+		return -EFAULT;
+	}
+
+	/* Currently, dynamic opcode commands uses params to be set
+	 *  on the Opcode hi-byte (lo-byte is fixed).
+	 * If this to be changed in the future, make the opcode calculation
+	 *  a CB per command
+	 */
+	if (param & ~0xFFFF) {
+		IPAHAL_ERR("IMM_CMD=%s opcode param is invalid\n",
+			ipahal_imm_cmd_name_str(cmd));
+		ipa_assert();
+		return -EFAULT;
+	}
+	opcode = ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].opcode;
+	if (opcode == -1) {
+		IPAHAL_ERR("Try to get opcode of obsolete IMM_CMD=%s\n",
+			ipahal_imm_cmd_name_str(cmd));
+		ipa_assert();
+		return -EFAULT;
+	}
+	if (opcode & ~0xFFFF) {
+		IPAHAL_ERR("IMM_CMD=%s opcode will be overridden\n",
+			ipahal_imm_cmd_name_str(cmd));
+		ipa_assert();
+		return -EFAULT;
+	}
+	return (opcode + (param<<8));
+}
+
+/*
+ * ipahal_construct_imm_cmd() - Construct immdiate command
+ * This function builds imm cmd bulk that can be be sent to IPA
+ * The command will be allocated dynamically.
+ * After done using it, call ipahal_destroy_imm_cmd() to release it
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_imm_cmd(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	if (!params) {
+		IPAHAL_ERR("Input error: params=%p\n", params);
+		ipa_assert();
+		return NULL;
+	}
+
+	if (cmd >= IPA_IMM_CMD_MAX) {
+		IPAHAL_ERR("Invalid immediate command %u\n", cmd);
+		ipa_assert();
+		return NULL;
+	}
+
+	IPAHAL_DBG_LOW("construct IMM_CMD:%s\n", ipahal_imm_cmd_name_str(cmd));
+	return ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].construct(
+		cmd, params, is_atomic_ctx);
+}
+
+/*
+ * ipahal_construct_nop_imm_cmd() - Construct immediate comamnd for NO-Op
+ * Core driver may want functionality to inject NOP commands to IPA
+ *  to ensure e.g., PIPLINE clear before someother operation.
+ * The functionality given by this function can be reached by
+ *  ipahal_construct_imm_cmd(). This function is helper to the core driver
+ *  to reach this NOP functionlity easily.
+ * @skip_pipline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipline_clr_opt: options for pipeline clear waiting
+ * @is_atomic_ctx: is called in atomic context or can sleep?
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd(
+	bool skip_pipline_clear,
+	enum ipahal_pipeline_clear_option pipline_clr_opt,
+	bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_register_write cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.skip_pipeline_clear = skip_pipline_clear;
+	cmd.pipeline_clear_options = pipline_clr_opt;
+	cmd.value_mask = 0x0;
+
+	cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&cmd, is_atomic_ctx);
+
+	if (!cmd_pyld)
+		IPAHAL_ERR("failed to construct register_write imm cmd\n");
+
+	return cmd_pyld;
+}
+
+
+/* IPA Packet Status Logic */
+
+#define IPA_PKT_STATUS_SET_MSK(__hw_bit_msk, __shft) \
+	(status->status_mask |= \
+		((hw_status->status_mask & (__hw_bit_msk) ? 1 : 0) << (__shft)))
+
+static void ipa_pkt_status_parse(
+	const void *unparsed_status, struct ipahal_pkt_status *status)
+{
+	enum ipahal_pkt_status_opcode opcode = 0;
+	enum ipahal_pkt_status_exception exception_type = 0;
+
+	struct ipa_pkt_status_hw *hw_status =
+		(struct ipa_pkt_status_hw *)unparsed_status;
+
+	status->pkt_len = hw_status->pkt_len;
+	status->endp_src_idx = hw_status->endp_src_idx;
+	status->endp_dest_idx = hw_status->endp_dest_idx;
+	status->metadata = hw_status->metadata;
+	status->flt_local = hw_status->flt_local;
+	status->flt_hash = hw_status->flt_hash;
+	status->flt_global = hw_status->flt_hash;
+	status->flt_ret_hdr = hw_status->flt_ret_hdr;
+	status->flt_miss = ~(hw_status->flt_rule_id) ? false : true;
+	status->flt_rule_id = hw_status->flt_rule_id;
+	status->rt_local = hw_status->rt_local;
+	status->rt_hash = hw_status->rt_hash;
+	status->ucp = hw_status->ucp;
+	status->rt_tbl_idx = hw_status->rt_tbl_idx;
+	status->rt_miss = ~(hw_status->rt_rule_id) ? false : true;
+	status->rt_rule_id = hw_status->rt_rule_id;
+	status->nat_hit = hw_status->nat_hit;
+	status->nat_entry_idx = hw_status->nat_entry_idx;
+	status->tag_info = hw_status->tag_info;
+	status->seq_num = hw_status->seq_num;
+	status->time_of_day_ctr = hw_status->time_of_day_ctr;
+	status->hdr_local = hw_status->hdr_local;
+	status->hdr_offset = hw_status->hdr_offset;
+	status->frag_hit = hw_status->frag_hit;
+	status->frag_rule = hw_status->frag_rule;
+
+	switch (hw_status->status_opcode) {
+	case 0x1:
+		opcode = IPAHAL_PKT_STATUS_OPCODE_PACKET;
+		break;
+	case 0x2:
+		opcode = IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE;
+		break;
+	case 0x4:
+		opcode = IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET;
+		break;
+	case 0x8:
+		opcode = IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET;
+		break;
+	case 0x10:
+		opcode = IPAHAL_PKT_STATUS_OPCODE_LOG;
+		break;
+	case 0x20:
+		opcode = IPAHAL_PKT_STATUS_OPCODE_DCMP;
+		break;
+	case 0x40:
+		opcode = IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS;
+		break;
+	default:
+		IPAHAL_ERR("unsupported Status Opcode 0x%x\n",
+			hw_status->status_opcode);
+		WARN_ON(1);
+	};
+	status->status_opcode = opcode;
+
+	switch (hw_status->nat_type) {
+	case 0:
+		status->nat_type = IPAHAL_PKT_STATUS_NAT_NONE;
+		break;
+	case 1:
+		status->nat_type = IPAHAL_PKT_STATUS_NAT_SRC;
+		break;
+	case 2:
+		status->nat_type = IPAHAL_PKT_STATUS_NAT_DST;
+		break;
+	default:
+		IPAHAL_ERR("unsupported Status NAT type 0x%x\n",
+			hw_status->nat_type);
+		WARN_ON(1);
+	};
+
+	switch (hw_status->exception) {
+	case 0:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NONE;
+		break;
+	case 1:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR;
+		break;
+	case 4:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE;
+		break;
+	case 8:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH;
+		break;
+	case 16:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS;
+		break;
+	case 32:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT;
+		break;
+	case 64:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NAT;
+		break;
+	default:
+		IPAHAL_ERR("unsupported Status Exception type 0x%x\n",
+			hw_status->exception);
+		WARN_ON(1);
+	};
+	status->exception = exception_type;
+
+	IPA_PKT_STATUS_SET_MSK(0x1, IPAHAL_PKT_STATUS_MASK_FRAG_PROCESS_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x2, IPAHAL_PKT_STATUS_MASK_FILT_PROCESS_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x4, IPAHAL_PKT_STATUS_MASK_NAT_PROCESS_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x8, IPAHAL_PKT_STATUS_MASK_ROUTE_PROCESS_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x10, IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x20, IPAHAL_PKT_STATUS_MASK_FRAGMENT_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x40,
+		IPAHAL_PKT_STATUS_MASK_FIRST_FRAGMENT_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x80, IPAHAL_PKT_STATUS_MASK_V4_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x100,
+		IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x200, IPAHAL_PKT_STATUS_MASK_AGGR_PROCESS_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x400, IPAHAL_PKT_STATUS_MASK_DEST_EOT_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x800,
+		IPAHAL_PKT_STATUS_MASK_DEAGGR_PROCESS_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x1000, IPAHAL_PKT_STATUS_MASK_DEAGG_FIRST_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x2000, IPAHAL_PKT_STATUS_MASK_SRC_EOT_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x4000, IPAHAL_PKT_STATUS_MASK_PREV_EOT_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x8000, IPAHAL_PKT_STATUS_MASK_BYTE_LIMIT_SHFT);
+	status->status_mask &= 0xFFFF;
+}
+
+/*
+ * struct ipahal_pkt_status_obj - Pakcet Status H/W information for
+ *  specific IPA version
+ * @size: H/W size of the status packet
+ * @parse: CB that parses the H/W packet status into the abstracted structure
+ */
+struct ipahal_pkt_status_obj {
+	u32 size;
+	void (*parse)(const void *unparsed_status,
+		struct ipahal_pkt_status *status);
+};
+
+/*
+ * This table contains the info regard packet status for IPAv3 and later
+ * Information like: size of packet status and parsing function
+ * All the information on the pkt Status on IPAv3 are statically defined below.
+ * If information is missing regard some IPA version, the init function
+ *  will fill it with the information from the previous IPA version.
+ * Information is considered missing if all of the fields are 0
+ */
+static struct ipahal_pkt_status_obj ipahal_pkt_status_objs[IPA_HW_MAX] = {
+	/* IPAv3 */
+	[IPA_HW_v3_0] = {
+		IPA3_0_PKT_STATUS_SIZE,
+		ipa_pkt_status_parse,
+		},
+};
+
+/*
+ * ipahal_pkt_status_init() - Build the packet status information array
+ *  for the different IPA versions
+ *  See ipahal_pkt_status_objs[] comments
+ */
+static int ipahal_pkt_status_init(enum ipa_hw_type ipa_hw_type)
+{
+	int i;
+	struct ipahal_pkt_status_obj zero_obj;
+
+	IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+	if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
+		IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+		return -EINVAL;
+	}
+
+	/*
+	 * Since structure alignment is implementation dependent,
+	 * add test to avoid different and incompatible data layouts.
+	 *
+	 * In case new H/W has different size or structure of status packet,
+	 * add a compile time validty check for it like below (as well as
+	 * the new defines and/or the new strucutre in the internal header).
+	 */
+	BUILD_BUG_ON(sizeof(struct ipa_pkt_status_hw) !=
+		IPA3_0_PKT_STATUS_SIZE);
+
+	memset(&zero_obj, 0, sizeof(zero_obj));
+	for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+		if (!memcmp(&ipahal_pkt_status_objs[i+1], &zero_obj,
+			sizeof(struct ipahal_pkt_status_obj))) {
+			memcpy(&ipahal_pkt_status_objs[i+1],
+				&ipahal_pkt_status_objs[i],
+				sizeof(struct ipahal_pkt_status_obj));
+		} else {
+			/*
+			 * explicitly overridden Packet Status info
+			 * Check validity
+			 */
+			if (!ipahal_pkt_status_objs[i+1].size) {
+				IPAHAL_ERR(
+				  "Packet Status with zero size ipa_ver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_pkt_status_objs[i+1].parse) {
+				IPAHAL_ERR(
+				  "Packet Status without Parse func ipa_ver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * ipahal_pkt_status_get_size() - Get H/W size of packet status
+ */
+u32 ipahal_pkt_status_get_size(void)
+{
+	return ipahal_pkt_status_objs[ipahal_ctx->hw_type].size;
+}
+
+/*
+ * ipahal_pkt_status_parse() - Parse Packet Status payload to abstracted form
+ * @unparsed_status: Pointer to H/W format of the packet status as read from H/W
+ * @status: Pointer to pre-allocated buffer where the parsed info will be stored
+ */
+void ipahal_pkt_status_parse(const void *unparsed_status,
+	struct ipahal_pkt_status *status)
+{
+	if (!unparsed_status || !status) {
+		IPAHAL_ERR("Input Error: unparsed_status=%p status=%p\n",
+			unparsed_status, status);
+		return;
+	}
+
+	IPAHAL_DBG_LOW("Parse Status Packet\n");
+	memset(status, 0, sizeof(*status));
+	ipahal_pkt_status_objs[ipahal_ctx->hw_type].parse(unparsed_status,
+		status);
+}
+
+/*
+ * ipahal_pkt_status_exception_str() - returns string represents exception type
+ * @exception: [in] The exception type
+ */
+const char *ipahal_pkt_status_exception_str(
+	enum ipahal_pkt_status_exception exception)
+{
+	if (exception < 0 || exception >= IPAHAL_PKT_STATUS_EXCEPTION_MAX) {
+		IPAHAL_ERR(
+			"requested string of invalid pkt_status exception=%d\n",
+			exception);
+		return "Invalid PKT_STATUS_EXCEPTION";
+	}
+
+	return ipahal_pkt_status_exception_to_str[exception];
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void ipahal_debugfs_init(void)
+{
+	ipahal_ctx->dent = debugfs_create_dir("ipahal", 0);
+	if (!ipahal_ctx->dent || IS_ERR(ipahal_ctx->dent)) {
+		IPAHAL_ERR("fail to create ipahal debugfs folder\n");
+		goto fail;
+	}
+
+	return;
+fail:
+	debugfs_remove_recursive(ipahal_ctx->dent);
+	ipahal_ctx->dent = NULL;
+}
+
+static void ipahal_debugfs_remove(void)
+{
+	if (!ipahal_ctx)
+		return;
+
+	if (IS_ERR(ipahal_ctx->dent)) {
+		IPAHAL_ERR("ipahal debugfs folder was not created\n");
+		return;
+	}
+
+	debugfs_remove_recursive(ipahal_ctx->dent);
+}
+#else /* CONFIG_DEBUG_FS */
+static void ipahal_debugfs_init(void) {}
+static void ipahal_debugfs_remove(void) {}
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * ipahal_cp_hdr_to_hw_buff_v3() - copy header to hardware buffer according to
+ * base address and offset given.
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr: the header to be copied
+ * @hdr_len: the length of the header
+ */
+static void ipahal_cp_hdr_to_hw_buff_v3(void *const base, u32 offset,
+		u8 *const hdr, u32 hdr_len)
+{
+	memcpy(base + offset, hdr, hdr_len);
+}
+
+/*
+ * ipahal_cp_proc_ctx_to_hw_buff_v3() - copy processing context to
+ * base address and offset given.
+ * @type: header processing context type (no processing context,
+ *	IPA_HDR_PROC_ETHII_TO_ETHII etc.)
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr_len: the length of the header
+ * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
+ * @phys_base: memory location in DDR
+ * @hdr_base_addr: base address in table
+ * @offset_entry: offset from hdr_base_addr in table
+ */
+static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
+		void *const base, u32 offset,
+		u32 hdr_len, bool is_hdr_proc_ctx,
+		dma_addr_t phys_base, u32 hdr_base_addr,
+		struct ipa_hdr_offset_entry *offset_entry){
+	if (type == IPA_HDR_PROC_NONE) {
+		struct ipa_hw_hdr_proc_ctx_add_hdr_seq *ctx;
+
+		ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_seq *)
+			(base + offset);
+		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+		ctx->hdr_add.tlv.length = 1;
+		ctx->hdr_add.tlv.value = hdr_len;
+		ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+			hdr_base_addr + offset_entry->offset;
+		IPAHAL_DBG("header address 0x%x\n",
+			ctx->hdr_add.hdr_addr);
+		ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+		ctx->end.length = 0;
+		ctx->end.value = 0;
+	} else {
+		struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *ctx;
+
+		ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *)
+			(base + offset);
+		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+		ctx->hdr_add.tlv.length = 1;
+		ctx->hdr_add.tlv.value = hdr_len;
+		ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+			hdr_base_addr + offset_entry->offset;
+		IPAHAL_DBG("header address 0x%x\n",
+			ctx->hdr_add.hdr_addr);
+		ctx->cmd.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
+		ctx->cmd.length = 0;
+		switch (type) {
+		case IPA_HDR_PROC_ETHII_TO_ETHII:
+			ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_ETHII;
+			break;
+		case IPA_HDR_PROC_ETHII_TO_802_3:
+			ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_802_3;
+			break;
+		case IPA_HDR_PROC_802_3_TO_ETHII:
+			ctx->cmd.value = IPA_HDR_UCP_802_3_TO_ETHII;
+			break;
+		case IPA_HDR_PROC_802_3_TO_802_3:
+			ctx->cmd.value = IPA_HDR_UCP_802_3_TO_802_3;
+			break;
+		default:
+			IPAHAL_ERR("unknown ipa_hdr_proc_type %d", type);
+			WARN_ON(1);
+			return -EINVAL;
+		}
+		IPAHAL_DBG("command id %d\n", ctx->cmd.value);
+		ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+		ctx->end.length = 0;
+		ctx->end.value = 0;
+	}
+
+	return 0;
+}
+
+/*
+ * ipahal_get_proc_ctx_needed_len_v3() - calculates the needed length for
+ * addition of header processing context according to the type of processing
+ * context.
+ * @type: header processing context type (no processing context,
+ *	IPA_HDR_PROC_ETHII_TO_ETHII etc.)
+ */
+static int ipahal_get_proc_ctx_needed_len_v3(enum ipa_hdr_proc_type type)
+{
+	return (type == IPA_HDR_PROC_NONE) ?
+			sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_seq) :
+			sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq);
+}
+
+/*
+ * struct ipahal_hdr_funcs - headers handling functions for specific IPA
+ * version
+ * @ipahal_cp_hdr_to_hw_buff - copy function for regular headers
+ */
+struct ipahal_hdr_funcs {
+	void (*ipahal_cp_hdr_to_hw_buff)(void *const base, u32 offset,
+			u8 *const hdr, u32 hdr_len);
+
+	int (*ipahal_cp_proc_ctx_to_hw_buff)(enum ipa_hdr_proc_type type,
+			void *const base, u32 offset, u32 hdr_len,
+			bool is_hdr_proc_ctx, dma_addr_t phys_base,
+			u32 hdr_base_addr,
+			struct ipa_hdr_offset_entry *offset_entry);
+
+	int (*ipahal_get_proc_ctx_needed_len)(enum ipa_hdr_proc_type type);
+};
+
+static struct ipahal_hdr_funcs hdr_funcs;
+
+static void ipahal_hdr_init(enum ipa_hw_type ipa_hw_type)
+{
+
+	IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+	/*
+	 * once there are changes in HW and need to use different case, insert
+	 * new case for the new h/w. put the default always for the latest HW
+	 * and make sure all previous supported versions have their cases.
+	 */
+	switch (ipa_hw_type) {
+	case IPA_HW_v3_0:
+	default:
+		hdr_funcs.ipahal_cp_hdr_to_hw_buff =
+				ipahal_cp_hdr_to_hw_buff_v3;
+		hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff =
+				ipahal_cp_proc_ctx_to_hw_buff_v3;
+		hdr_funcs.ipahal_get_proc_ctx_needed_len =
+				ipahal_get_proc_ctx_needed_len_v3;
+	}
+	IPAHAL_DBG("Exit\n");
+}
+
+/*
+ * ipahal_cp_hdr_to_hw_buff() - copy header to hardware buffer according to
+ * base address and offset given.
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr: the header to be copied
+ * @hdr_len: the length of the header
+ */
+void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *const hdr,
+		u32 hdr_len)
+{
+	IPAHAL_DBG_LOW("Entry\n");
+	IPAHAL_DBG("base %p, offset %d, hdr %p, hdr_len %d\n", base,
+			offset, hdr, hdr_len);
+	if (!base || !hdr_len || !hdr) {
+		IPAHAL_ERR("failed on validating params");
+		return;
+	}
+
+	hdr_funcs.ipahal_cp_hdr_to_hw_buff(base, offset, hdr, hdr_len);
+
+	IPAHAL_DBG_LOW("Exit\n");
+}
+
+/*
+ * ipahal_cp_proc_ctx_to_hw_buff() - copy processing context to
+ * base address and offset given.
+ * @type: type of header processing context
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr_len: the length of the header
+ * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
+ * @phys_base: memory location in DDR
+ * @hdr_base_addr: base address in table
+ * @offset_entry: offset from hdr_base_addr in table
+ */
+int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
+		void *const base, u32 offset, u32 hdr_len,
+		bool is_hdr_proc_ctx, dma_addr_t phys_base,
+		u32 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry)
+{
+	IPAHAL_DBG(
+		"type %d, base %p, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %d, offset_entry %p\n"
+			, type, base, offset, hdr_len, is_hdr_proc_ctx,
+			hdr_base_addr, offset_entry);
+
+	if (!base ||
+		!hdr_len ||
+		(!phys_base && !hdr_base_addr) ||
+		!hdr_base_addr ||
+		((is_hdr_proc_ctx == false) && !offset_entry)) {
+		IPAHAL_ERR(
+			"invalid input: hdr_len:%u phys_base:%pad hdr_base_addr:%u is_hdr_proc_ctx:%d offset_entry:%pK\n"
+			, hdr_len, &phys_base, hdr_base_addr
+			, is_hdr_proc_ctx, offset_entry);
+		return -EINVAL;
+	}
+
+	return hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff(type, base, offset,
+			hdr_len, is_hdr_proc_ctx, phys_base,
+			hdr_base_addr, offset_entry);
+}
+
+/*
+ * ipahal_get_proc_ctx_needed_len() - calculates the needed length for
+ * addition of header processing context according to the type of processing
+ * context
+ * @type: header processing context type (no processing context,
+ *	IPA_HDR_PROC_ETHII_TO_ETHII etc.)
+ */
+int ipahal_get_proc_ctx_needed_len(enum ipa_hdr_proc_type type)
+{
+	int res;
+
+	IPAHAL_DBG("entry\n");
+
+	res = hdr_funcs.ipahal_get_proc_ctx_needed_len(type);
+
+	IPAHAL_DBG("Exit\n");
+
+	return res;
+}
+
+
+int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base,
+	struct device *ipa_pdev)
+{
+	int result;
+
+	IPAHAL_DBG("Entry - IPA HW TYPE=%d base=%p ipa_pdev=%p\n",
+		ipa_hw_type, base, ipa_pdev);
+
+	ipahal_ctx = kzalloc(sizeof(*ipahal_ctx), GFP_KERNEL);
+	if (!ipahal_ctx) {
+		IPAHAL_ERR("kzalloc err for ipahal_ctx\n");
+		result = -ENOMEM;
+		goto bail_err_exit;
+	}
+
+	if (ipa_hw_type < IPA_HW_v3_0) {
+		IPAHAL_ERR("ipahal supported on IPAv3 and later only\n");
+		result = -EINVAL;
+		goto bail_free_ctx;
+	}
+
+	if (ipa_hw_type >= IPA_HW_MAX) {
+		IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+		result = -EINVAL;
+		goto bail_free_ctx;
+	}
+
+	if (!base) {
+		IPAHAL_ERR("invalid memory io mapping addr\n");
+		result = -EINVAL;
+		goto bail_free_ctx;
+	}
+
+	if (!ipa_pdev) {
+		IPAHAL_ERR("invalid IPA platform device\n");
+		result = -EINVAL;
+		goto bail_free_ctx;
+	}
+
+	ipahal_ctx->hw_type = ipa_hw_type;
+	ipahal_ctx->base = base;
+	ipahal_ctx->ipa_pdev = ipa_pdev;
+
+	if (ipahal_reg_init(ipa_hw_type)) {
+		IPAHAL_ERR("failed to init ipahal reg\n");
+		result = -EFAULT;
+		goto bail_free_ctx;
+	}
+
+	if (ipahal_imm_cmd_init(ipa_hw_type)) {
+		IPAHAL_ERR("failed to init ipahal imm cmd\n");
+		result = -EFAULT;
+		goto bail_free_ctx;
+	}
+
+	if (ipahal_pkt_status_init(ipa_hw_type)) {
+		IPAHAL_ERR("failed to init ipahal pkt status\n");
+		result = -EFAULT;
+		goto bail_free_ctx;
+	}
+
+	ipahal_hdr_init(ipa_hw_type);
+
+	if (ipahal_fltrt_init(ipa_hw_type)) {
+		IPAHAL_ERR("failed to init ipahal flt rt\n");
+		result = -EFAULT;
+		goto bail_free_ctx;
+	}
+
+	ipahal_debugfs_init();
+
+	return 0;
+
+bail_free_ctx:
+	kfree(ipahal_ctx);
+	ipahal_ctx = NULL;
+bail_err_exit:
+	return result;
+}
+
+void ipahal_destroy(void)
+{
+	IPAHAL_DBG("Entry\n");
+	ipahal_fltrt_destroy();
+	ipahal_debugfs_remove();
+	kfree(ipahal_ctx);
+	ipahal_ctx = NULL;
+}
+
+void ipahal_free_dma_mem(struct ipa_mem_buffer *mem)
+{
+	if (likely(mem)) {
+		dma_free_coherent(ipahal_ctx->ipa_pdev, mem->size, mem->base,
+			mem->phys_base);
+		mem->size = 0;
+		mem->base = NULL;
+		mem->phys_base = 0;
+	}
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
new file mode 100644
index 0000000..6549775
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -0,0 +1,642 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_H_
+#define _IPAHAL_H_
+
+#include <linux/msm_ipa.h>
+#include "../../ipa_common_i.h"
+
+/*
+ * Immediate command names
+ *
+ * NOTE:: Any change to this enum, need to change to ipahal_imm_cmd_name_to_str
+ *	array as well.
+ */
+enum ipahal_imm_cmd_name {
+	IPA_IMM_CMD_IP_V4_FILTER_INIT,
+	IPA_IMM_CMD_IP_V6_FILTER_INIT,
+	IPA_IMM_CMD_IP_V4_NAT_INIT,
+	IPA_IMM_CMD_IP_V4_ROUTING_INIT,
+	IPA_IMM_CMD_IP_V6_ROUTING_INIT,
+	IPA_IMM_CMD_HDR_INIT_LOCAL,
+	IPA_IMM_CMD_HDR_INIT_SYSTEM,
+	IPA_IMM_CMD_REGISTER_WRITE,
+	IPA_IMM_CMD_NAT_DMA,
+	IPA_IMM_CMD_IP_PACKET_INIT,
+	IPA_IMM_CMD_DMA_SHARED_MEM,
+	IPA_IMM_CMD_IP_PACKET_TAG_STATUS,
+	IPA_IMM_CMD_DMA_TASK_32B_ADDR,
+	IPA_IMM_CMD_MAX,
+};
+
+/* Immediate commands abstracted structures */
+
+/*
+ * struct ipahal_imm_cmd_ip_v4_filter_init - IP_V4_FILTER_INIT cmd payload
+ * Inits IPv4 filter block.
+ * @hash_rules_addr: Addr in sys mem where ipv4 hashable flt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable flt tbl should
+ *  be copied to
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable flt tbl should
+ *  be copied to
+ */
+struct ipahal_imm_cmd_ip_v4_filter_init {
+	u64 hash_rules_addr;
+	u32 hash_rules_size;
+	u32 hash_local_addr;
+	u64 nhash_rules_addr;
+	u32 nhash_rules_size;
+	u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v6_filter_init - IP_V6_FILTER_INIT cmd payload
+ * Inits IPv6 filter block.
+ * @hash_rules_addr: Addr in sys mem where ipv6 hashable flt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable flt tbl should
+ *  be copied to
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable flt tbl should
+ *  be copied to
+ */
+struct ipahal_imm_cmd_ip_v6_filter_init {
+	u64 hash_rules_addr;
+	u32 hash_rules_size;
+	u32 hash_local_addr;
+	u64 nhash_rules_addr;
+	u32 nhash_rules_size;
+	u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v4_nat_init - IP_V4_NAT_INIT cmd payload
+ * Inits IPv4 NAT block. Initiate NAT table with it dimensions, location
+ *  cache address abd itger related parameters.
+ * @table_index: For future support of multiple NAT tables
+ * @ipv4_rules_addr: Addr in sys/shared mem where ipv4 NAT rules start
+ * @ipv4_rules_addr_shared: ipv4_rules_addr in shared mem (if not, then sys)
+ * @ipv4_expansion_rules_addr: Addr in sys/shared mem where expantion NAT
+ *  table starts. IPv4 NAT rules that result in NAT collision are located
+ *  in this table.
+ * @ipv4_expansion_rules_addr_shared: ipv4_expansion_rules_addr in
+ *  shared mem (if not, then sys)
+ * @index_table_addr: Addr in sys/shared mem where index table, which points
+ *  to NAT table starts
+ * @index_table_addr_shared: index_table_addr in shared mem (if not, then sys)
+ * @index_table_expansion_addr: Addr in sys/shared mem where expansion index
+ *  table starts
+ * @index_table_expansion_addr_shared: index_table_expansion_addr in
+ *  shared mem (if not, then sys)
+ * @size_base_tables: Num of entries in NAT tbl and idx tbl (each)
+ * @size_expansion_tables: Num of entries in NAT expantion tbl and expantion
+ *  idx tbl (each)
+ * @public_ip_addr: public IP address
+ */
+struct ipahal_imm_cmd_ip_v4_nat_init {
+	u8 table_index;
+	u64 ipv4_rules_addr;
+	bool ipv4_rules_addr_shared;
+	u64 ipv4_expansion_rules_addr;
+	bool ipv4_expansion_rules_addr_shared;
+	u64 index_table_addr;
+	bool index_table_addr_shared;
+	u64 index_table_expansion_addr;
+	bool index_table_expansion_addr_shared;
+	u16 size_base_tables;
+	u16 size_expansion_tables;
+	u32 public_ip_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v4_routing_init - IP_V4_ROUTING_INIT cmd payload
+ * Inits IPv4 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in sys mem where ipv4 hashable rt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable rt tbl should
+ *  be copied to
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable rt tbl should
+ *  be copied to
+ */
+struct ipahal_imm_cmd_ip_v4_routing_init {
+	u64 hash_rules_addr;
+	u32 hash_rules_size;
+	u32 hash_local_addr;
+	u64 nhash_rules_addr;
+	u32 nhash_rules_size;
+	u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v6_routing_init - IP_V6_ROUTING_INIT cmd payload
+ * Inits IPv6 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in sys mem where ipv6 hashable rt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable rt tbl should
+ *  be copied to
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable rt tbl should
+ *  be copied to
+ */
+struct ipahal_imm_cmd_ip_v6_routing_init {
+	u64 hash_rules_addr;
+	u32 hash_rules_size;
+	u32 hash_local_addr;
+	u64 nhash_rules_addr;
+	u32 nhash_rules_size;
+	u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_hdr_init_local - HDR_INIT_LOCAL cmd payload
+ * Inits hdr table within local mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in sys mem where the table starts (SRC)
+ * @size_hdr_table: Size of the above (in bytes)
+ * @hdr_addr: header address in IPA sram (used as DST for memory copy)
+ * @rsvd: reserved
+ */
+struct ipahal_imm_cmd_hdr_init_local {
+	u64 hdr_table_addr;
+	u32 size_hdr_table;
+	u32 hdr_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_hdr_init_system - HDR_INIT_SYSTEM cmd payload
+ * Inits hdr table within sys mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in system memory where the hdrs tbl starts.
+ */
+struct ipahal_imm_cmd_hdr_init_system {
+	u64 hdr_table_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_nat_dma - NAT_DMA cmd payload
+ * Perform DMA operation on NAT related mem addressess. Copy data into
+ *  different locations within NAT associated tbls. (For add/remove NAT rules)
+ * @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op.
+ * @base_addr: Base addr to which the DMA operation should be performed.
+ * @offset: offset in bytes from base addr to write 'data' to
+ * @data: data to be written
+ */
+struct ipahal_imm_cmd_nat_dma {
+	u8 table_index;
+	u8 base_addr;
+	u32 offset;
+	u16 data;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_packet_init - IP_PACKET_INIT cmd payload
+ * Configuration for specific IP pkt. Shall be called prior to an IP pkt
+ *  data. Pkt will not go through IP pkt processing.
+ * @destination_pipe_index: Destination pipe index  (in case routing
+ *  is enabled, this field will overwrite the rt  rule)
+ */
+struct ipahal_imm_cmd_ip_packet_init {
+	u32 destination_pipe_index;
+};
+
+/*
+ * enum ipa_pipeline_clear_option - Values for pipeline clear waiting options
+ * @IPAHAL_HPS_CLEAR: Wait for HPS clear. All queues except high priority queue
+ *  shall not be serviced until HPS is clear of packets or immediate commands.
+ *  The high priority Rx queue / Q6ZIP group shall still be serviced normally.
+ *
+ * @IPAHAL_SRC_GRP_CLEAR: Wait for originating source group to be clear
+ *  (for no packet contexts allocated to the originating source group).
+ *  The source group / Rx queue shall not be serviced until all previously
+ *  allocated packet contexts are released. All other source groups/queues shall
+ *  be serviced normally.
+ *
+ * @IPAHAL_FULL_PIPELINE_CLEAR: Wait for full pipeline to be clear.
+ *  All groups / Rx queues shall not be serviced until IPA pipeline is fully
+ *  clear. This should be used for debug only.
+ */
+enum ipahal_pipeline_clear_option {
+	IPAHAL_HPS_CLEAR,
+	IPAHAL_SRC_GRP_CLEAR,
+	IPAHAL_FULL_PIPELINE_CLEAR
+};
+
+/*
+ * struct ipahal_imm_cmd_register_write - REGISTER_WRITE cmd payload
+ * Write value to register. Allows reg changes to be synced with data packet
+ *  and other immediate commands. Can be used to access the sram
+ * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
+ * @value: value to write to register
+ * @value_mask: mask specifying which value bits to write to the register
+ * @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipeline_clear_option: options for pipeline clear waiting
+ */
+struct ipahal_imm_cmd_register_write {
+	u32 offset;
+	u32 value;
+	u32 value_mask;
+	bool skip_pipeline_clear;
+	enum ipahal_pipeline_clear_option pipeline_clear_options;
+};
+
+/*
+ * struct ipahal_imm_cmd_dma_shared_mem - DMA_SHARED_MEM cmd payload
+ * Perform mem copy into or out of the SW area of IPA local mem
+ * @size: Size in bytes of data to copy. Expected size is up to 2K bytes
+ * @local_addr: Address in IPA local memory
+ * @is_read: Read operation from local memory? If not, then write.
+ * @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipeline_clear_option: options for pipeline clear waiting
+ * @system_addr: Address in system memory
+ */
+struct ipahal_imm_cmd_dma_shared_mem {
+	u32 size;
+	u32 local_addr;
+	bool is_read;
+	bool skip_pipeline_clear;
+	enum ipahal_pipeline_clear_option pipeline_clear_options;
+	u64 system_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_packet_tag_status - IP_PACKET_TAG_STATUS cmd payload
+ * This cmd is used for to allow SW to track HW processing by setting a TAG
+ *  value that is passed back to SW inside Packet Status information.
+ *  TAG info will be provided as part of Packet Status info generated for
+ *  the next pkt transferred over the pipe.
+ *  This immediate command must be followed by a packet in the same transfer.
+ * @tag: Tag that is provided back to SW
+ */
+struct ipahal_imm_cmd_ip_packet_tag_status {
+	u64 tag;
+};
+
+/*
+ * struct ipahal_imm_cmd_dma_task_32b_addr - IPA_DMA_TASK_32B_ADDR cmd payload
+ * Used by clients using 32bit addresses. Used to perform DMA operation on
+ *  multiple descriptors.
+ *  The Opcode is dynamic, where it holds the number of buffer to process
+ * @cmplt: Complete flag: If true, IPA interrupt SW when the entire
+ *  DMA related data was completely xfered to its destination.
+ * @eof: Enf Of Frame flag: If true, IPA assert the EOT to the
+ *  dest client. This is used used for aggr sequence
+ * @flsh: Flush flag: If true pkt will go through the IPA blocks but
+ *  will not be xfered to dest client but rather will be discarded
+ * @lock: Lock pipe flag: If true, IPA will stop processing descriptors
+ *  from other EPs in the same src grp (RX queue)
+ * @unlock: Unlock pipe flag: If true, IPA will stop exclusively
+ *  servicing current EP out of the src EPs of the grp (RX queue)
+ * @size1: Size of buffer1 data
+ * @addr1: Pointer to buffer1 data
+ * @packet_size: Total packet size. If a pkt send using multiple DMA_TASKs,
+ *  only the first one needs to have this field set. It will be ignored
+ *  in subsequent DMA_TASKs until the packet ends (EOT). First DMA_TASK
+ *  must contain this field (2 or more buffers) or EOT.
+ */
+struct ipahal_imm_cmd_dma_task_32b_addr {
+	bool cmplt;
+	bool eof;
+	bool flsh;
+	bool lock;
+	bool unlock;
+	u32 size1;
+	u32 addr1;
+	u32 packet_size;
+};
+
+/*
+ * struct ipahal_imm_cmd_pyld - Immediate cmd payload information
+ * @len: length of the buffer
+ * @data: buffer contains the immediate command payload. Buffer goes
+ *  back to back with this structure
+ */
+struct ipahal_imm_cmd_pyld {
+	u16 len;
+	u8 data[0];
+};
+
+
+/* Immediate command Function APIs */
+
+/*
+ * ipahal_imm_cmd_name_str() - returns string that represent the imm cmd
+ * @cmd_name: [in] Immediate command name
+ */
+const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name);
+
+/*
+ * ipahal_imm_cmd_get_opcode() - Get the fixed opcode of the immediate command
+ */
+u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd);
+
+/*
+ * ipahal_imm_cmd_get_opcode_param() - Get the opcode of an immediate command
+ *  that supports dynamic opcode
+ * Some commands opcode are not totaly fixed, but part of it is
+ *  a supplied parameter. E.g. Low-Byte is fixed and Hi-Byte
+ *  is a given parameter.
+ * This API will return the composed opcode of the command given
+ *  the parameter
+ * Note: Use this API only for immediate comamnds that support Dynamic Opcode
+ */
+u16 ipahal_imm_cmd_get_opcode_param(enum ipahal_imm_cmd_name cmd, int param);
+
+/*
+ * ipahal_construct_imm_cmd() - Construct immdiate command
+ * This function builds imm cmd bulk that can be be sent to IPA
+ * The command will be allocated dynamically.
+ * After done using it, call ipahal_destroy_imm_cmd() to release it
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_imm_cmd(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx);
+
+/*
+ * ipahal_construct_nop_imm_cmd() - Construct immediate comamnd for NO-Op
+ * Core driver may want functionality to inject NOP commands to IPA
+ *  to ensure e.g., PIPLINE clear before someother operation.
+ * The functionality given by this function can be reached by
+ *  ipahal_construct_imm_cmd(). This function is helper to the core driver
+ *  to reach this NOP functionlity easily.
+ * @skip_pipline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipline_clr_opt: options for pipeline clear waiting
+ * @is_atomic_ctx: is called in atomic context or can sleep?
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd(
+	bool skip_pipline_clear,
+	enum ipahal_pipeline_clear_option pipline_clr_opt,
+	bool is_atomic_ctx);
+
+/*
+ * ipahal_destroy_imm_cmd() - Destroy/Release bulk that was built
+ *  by the construction functions
+ */
+static inline void ipahal_destroy_imm_cmd(struct ipahal_imm_cmd_pyld *pyld)
+{
+	kfree(pyld);
+}
+
+
+/* IPA Status packet Structures and Function APIs */
+
+/*
+ * enum ipahal_pkt_status_opcode - Packet Status Opcode
+ * @IPAHAL_STATUS_OPCODE_PACKET_2ND_PASS: Packet Status generated as part of
+ *  IPA second processing pass for a packet (i.e. IPA XLAT processing for
+ *  the translated packet).
+ */
+enum ipahal_pkt_status_opcode {
+	IPAHAL_PKT_STATUS_OPCODE_PACKET = 0,
+	IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE,
+	IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET,
+	IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET,
+	IPAHAL_PKT_STATUS_OPCODE_LOG,
+	IPAHAL_PKT_STATUS_OPCODE_DCMP,
+	IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS,
+};
+
+/*
+ * enum ipahal_pkt_status_exception - Packet Status exception type
+ * @IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH: formerly IHL exception.
+ *
+ * Note: IPTYPE, PACKET_LENGTH and PACKET_THRESHOLD exceptions means that
+ *  partial / no IP processing took place and corresponding Status Mask
+ *  fields should be ignored. Flt and rt info is not valid.
+ *
+ * NOTE:: Any change to this enum, need to change to
+ *	ipahal_pkt_status_exception_to_str array as well.
+ */
+enum ipahal_pkt_status_exception {
+	IPAHAL_PKT_STATUS_EXCEPTION_NONE = 0,
+	IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR,
+	IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE,
+	IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH,
+	IPAHAL_PKT_STATUS_EXCEPTION_PACKET_THRESHOLD,
+	IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS,
+	IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT,
+	IPAHAL_PKT_STATUS_EXCEPTION_NAT,
+	IPAHAL_PKT_STATUS_EXCEPTION_MAX,
+};
+
+/*
+ * enum ipahal_pkt_status_mask - Packet Status bitmask shift values of
+ *  the contained flags. This bitmask indicates flags on the properties of
+ *  the packet as well as IPA processing it may had.
+ * @FRAG_PROCESS: Frag block processing flag: Was pkt processed by frag block?
+ *  Also means the frag info is valid unless exception or first frag
+ * @FILT_PROCESS: Flt block processing flag: Was pkt processed by flt block?
+ *  Also means that flt info is valid.
+ * @NAT_PROCESS: NAT block processing flag: Was pkt processed by NAT block?
+ *  Also means that NAT info is valid, unless exception.
+ * @ROUTE_PROCESS: Rt block processing flag: Was pkt processed by rt block?
+ *  Also means that rt info is valid, unless exception.
+ * @TAG_VALID: Flag specifying if TAG and TAG info valid?
+ * @FRAGMENT: Flag specifying if pkt is IP fragment.
+ * @FIRST_FRAGMENT: Flag specifying if pkt is first fragment. In this case, frag
+ *  info is invalid
+ * @V4: Flag specifying pkt is IPv4 or IPv6
+ * @CKSUM_PROCESS: CSUM block processing flag: Was pkt processed by csum block?
+ *  If so, csum trailer exists
+ * @AGGR_PROCESS: Aggr block processing flag: Was pkt processed by aggr block?
+ * @DEST_EOT: Flag specifying if EOT was asserted for the pkt on dest endp
+ * @DEAGGR_PROCESS: Deaggr block processing flag: Was pkt processed by deaggr
+ *  block?
+ * @DEAGG_FIRST: Flag specifying if this is the first pkt in deaggr frame
+ * @SRC_EOT: Flag specifying if EOT asserted by src endp when sending the buffer
+ * @PREV_EOT: Flag specifying if EOT was sent just before the pkt as part of
+ *  aggr hard-byte-limit
+ * @BYTE_LIMIT: Flag specifying if pkt is over a configured byte limit.
+ */
+enum ipahal_pkt_status_mask {
+	IPAHAL_PKT_STATUS_MASK_FRAG_PROCESS_SHFT = 0,
+	IPAHAL_PKT_STATUS_MASK_FILT_PROCESS_SHFT,
+	IPAHAL_PKT_STATUS_MASK_NAT_PROCESS_SHFT,
+	IPAHAL_PKT_STATUS_MASK_ROUTE_PROCESS_SHFT,
+	IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT,
+	IPAHAL_PKT_STATUS_MASK_FRAGMENT_SHFT,
+	IPAHAL_PKT_STATUS_MASK_FIRST_FRAGMENT_SHFT,
+	IPAHAL_PKT_STATUS_MASK_V4_SHFT,
+	IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT,
+	IPAHAL_PKT_STATUS_MASK_AGGR_PROCESS_SHFT,
+	IPAHAL_PKT_STATUS_MASK_DEST_EOT_SHFT,
+	IPAHAL_PKT_STATUS_MASK_DEAGGR_PROCESS_SHFT,
+	IPAHAL_PKT_STATUS_MASK_DEAGG_FIRST_SHFT,
+	IPAHAL_PKT_STATUS_MASK_SRC_EOT_SHFT,
+	IPAHAL_PKT_STATUS_MASK_PREV_EOT_SHFT,
+	IPAHAL_PKT_STATUS_MASK_BYTE_LIMIT_SHFT,
+};
+
+/*
+ * Returns boolean value representing a property of the a packet.
+ * @__flag_shft: The shift value of the flag of the status bitmask of
+ * @__status: Pointer to abstracrted status structure
+ *  the needed property. See enum ipahal_pkt_status_mask
+ */
+#define IPAHAL_PKT_STATUS_MASK_FLAG_VAL(__flag_shft, __status) \
+	(((__status)->status_mask) & ((u32)0x1<<(__flag_shft)) ? true : false)
+
+/*
+ * enum ipahal_pkt_status_nat_type - Type of NAT
+ */
+enum ipahal_pkt_status_nat_type {
+	IPAHAL_PKT_STATUS_NAT_NONE,
+	IPAHAL_PKT_STATUS_NAT_SRC,
+	IPAHAL_PKT_STATUS_NAT_DST,
+};
+
+/*
+ * struct ipahal_pkt_status - IPA status packet abstracted payload.
+ *  This structure describes the status packet fields for the
+ *   following statuses: IPA_STATUS_PACKET, IPA_STATUS_DROPPED_PACKET,
+ *   IPA_STATUS_SUSPENDED_PACKET.
+ *  Other statuses types has different status packet structure.
+ * @status_opcode: The Type of the status (Opcode).
+ * @exception: The first exception that took place.
+ *  In case of exception, src endp and pkt len are always valid.
+ * @status_mask: Bit mask for flags on several properties on the packet
+ *  and processing it may passed at IPA. See enum ipahal_pkt_status_mask
+ * @pkt_len: Pkt pyld len including hdr and retained hdr if used. Does
+ *  not include padding or checksum trailer len.
+ * @endp_src_idx: Source end point index.
+ * @endp_dest_idx: Destination end point index.
+ *  Not valid in case of exception
+ * @metadata: meta data value used by packet
+ * @flt_local: Filter table location flag: Does matching flt rule belongs to
+ *  flt tbl that resides in lcl memory? (if not, then system mem)
+ * @flt_hash: Filter hash hit flag: Does matching flt rule was in hash tbl?
+ * @flt_global: Global filter rule flag: Does matching flt rule belongs to
+ *  the global flt tbl? (if not, then the per endp tables)
+ * @flt_ret_hdr: Retain header in filter rule flag: Does matching flt rule
+ *  specifies to retain header?
+ * @flt_miss: Filtering miss flag: Was their a filtering rule miss?
+ *   In case of miss, all flt info to be ignored
+ * @flt_rule_id: The ID of the matching filter rule (if no miss).
+ *  This info can be combined with endp_src_idx to locate the exact rule.
+ * @rt_local: Route table location flag: Does matching rt rule belongs to
+ *  rt tbl that resides in lcl memory? (if not, then system mem)
+ * @rt_hash: Route hash hit flag: Does matching rt rule was in hash tbl?
+ * @ucp: UC Processing flag
+ * @rt_tbl_idx: Index of rt tbl that contains the rule on which was a match
+ * @rt_miss: Routing miss flag: Was their a routing rule miss?
+ * @rt_rule_id: The ID of the matching rt rule. (if no miss). This info
+ *  can be combined with rt_tbl_idx to locate the exact rule.
+ * @nat_hit: NAT hit flag: Was their NAT hit?
+ * @nat_entry_idx: Index of the NAT entry used of NAT processing
+ * @nat_type: Defines the type of the NAT operation:
+ * @tag_info: S/W defined value provided via immediate command
+ * @seq_num: Per source endp unique packet sequence number
+ * @time_of_day_ctr: running counter from IPA clock
+ * @hdr_local: Header table location flag: In header insertion, was the header
+ *  taken from the table resides in local memory? (If no, then system mem)
+ * @hdr_offset: Offset of used header in the header table
+ * @frag_hit: Frag hit flag: Was their frag rule hit in H/W frag table?
+ * @frag_rule: Frag rule index in H/W frag table in case of frag hit
+ */
+struct ipahal_pkt_status {
+	enum ipahal_pkt_status_opcode status_opcode;
+	enum ipahal_pkt_status_exception exception;
+	u32 status_mask;
+	u32 pkt_len;
+	u8 endp_src_idx;
+	u8 endp_dest_idx;
+	u32 metadata;
+	bool flt_local;
+	bool flt_hash;
+	bool flt_global;
+	bool flt_ret_hdr;
+	bool flt_miss;
+	u16 flt_rule_id;
+	bool rt_local;
+	bool rt_hash;
+	bool ucp;
+	u8 rt_tbl_idx;
+	bool rt_miss;
+	u16 rt_rule_id;
+	bool nat_hit;
+	u16 nat_entry_idx;
+	enum ipahal_pkt_status_nat_type nat_type;
+	u64 tag_info;
+	u8 seq_num;
+	u32 time_of_day_ctr;
+	bool hdr_local;
+	u16 hdr_offset;
+	bool frag_hit;
+	u8 frag_rule;
+};
+
+/*
+ * ipahal_pkt_status_get_size() - Get H/W size of packet status
+ */
+u32 ipahal_pkt_status_get_size(void);
+
+/*
+ * ipahal_pkt_status_parse() - Parse Packet Status payload to abstracted form
+ * @unparsed_status: Pointer to H/W format of the packet status as read from H/W
+ * @status: Pointer to pre-allocated buffer where the parsed info will be stored
+ */
+void ipahal_pkt_status_parse(const void *unparsed_status,
+	struct ipahal_pkt_status *status);
+
+/*
+ * ipahal_pkt_status_exception_str() - returns string represents exception type
+ * @exception: [in] The exception type
+ */
+const char *ipahal_pkt_status_exception_str(
+	enum ipahal_pkt_status_exception exception);
+
+/*
+ * ipahal_cp_hdr_to_hw_buff() - copy header to hardware buffer according to
+ * base address and offset given.
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr: the header to be copied
+ * @hdr_len: the length of the header
+ */
+void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *hdr, u32 hdr_len);
+
+/*
+ * ipahal_cp_proc_ctx_to_hw_buff() - copy processing context to
+ * base address and offset given.
+ * @type: type of header processing context
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr_len: the length of the header
+ * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
+ * @phys_base: memory location in DDR
+ * @hdr_base_addr: base address in table
+ * @offset_entry: offset from hdr_base_addr in table
+ */
+int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
+		void *base, u32 offset, u32 hdr_len,
+		bool is_hdr_proc_ctx, dma_addr_t phys_base,
+		u32 hdr_base_addr,
+		struct ipa_hdr_offset_entry *offset_entry);
+
+/*
+ * ipahal_get_proc_ctx_needed_len() - calculates the needed length for addition
+ * of header processing context according to the type of processing context
+ * @type: header processing context type (no processing context,
+ *	IPA_HDR_PROC_ETHII_TO_ETHII etc.)
+ */
+int ipahal_get_proc_ctx_needed_len(enum ipa_hdr_proc_type type);
+
+int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base,
+	struct device *ipa_pdev);
+void ipahal_destroy(void);
+void ipahal_free_dma_mem(struct ipa_mem_buffer *mem);
+
+#endif /* _IPAHAL_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
new file mode 100644
index 0000000..e355d9d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -0,0 +1,3200 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipc_logging.h>
+#include <linux/debugfs.h>
+#include <linux/ipa.h>
+#include "ipahal.h"
+#include "ipahal_fltrt.h"
+#include "ipahal_fltrt_i.h"
+#include "ipahal_i.h"
+#include "../../ipa_common_i.h"
+
+/*
+ * struct ipahal_fltrt_obj - Flt/Rt H/W information for specific IPA version
+ * @support_hash: Is hashable tables supported
+ * @tbl_width: Width of table in bytes
+ * @sysaddr_alignment: System table address alignment
+ * @lcladdr_alignment: Local table offset alignment
+ * @blk_sz_alignment: Rules block size alignment
+ * @rule_start_alignment: Rule start address alignment
+ * @tbl_hdr_width: Width of the header structure in bytes
+ * @tbl_addr_mask: Masking for Table address
+ * @rule_max_prio: Max possible priority of a rule
+ * @rule_min_prio: Min possible priority of a rule
+ * @low_rule_id: Low value of Rule ID that can be used
+ * @rule_id_bit_len: Rule is high (MSB) bit len
+ * @rule_buf_size: Max size rule may utilize.
+ * @write_val_to_hdr: Write address or offset to header entry
+ * @create_flt_bitmap: Create bitmap in H/W format using given bitmap
+ * @create_tbl_addr: Given raw table address, create H/W formated one
+ * @parse_tbl_addr: Parse the given H/W address (hdr format)
+ * @rt_generate_hw_rule: Generate RT rule in H/W format
+ * @flt_generate_hw_rule: Generate FLT rule in H/W format
+ * @flt_generate_eq: Generate flt equation attributes from rule attributes
+ * @rt_parse_hw_rule: Parse rt rule read from H/W
+ * @flt_parse_hw_rule: Parse flt rule read from H/W
+ * @eq_bitfield: Array of the bit fields of the support equations
+ */
+struct ipahal_fltrt_obj {
+	bool support_hash;
+	u32 tbl_width;
+	u32 sysaddr_alignment;
+	u32 lcladdr_alignment;
+	u32 blk_sz_alignment;
+	u32 rule_start_alignment;
+	u32 tbl_hdr_width;
+	u32 tbl_addr_mask;
+	int rule_max_prio;
+	int rule_min_prio;
+	u32 low_rule_id;
+	u32 rule_id_bit_len;
+	u32 rule_buf_size;
+	u8* (*write_val_to_hdr)(u64 val, u8 *hdr);
+	u64 (*create_flt_bitmap)(u64 ep_bitmap);
+	u64 (*create_tbl_addr)(bool is_sys, u64 addr);
+	void (*parse_tbl_addr)(u64 hwaddr, u64 *addr, bool *is_sys);
+	int (*rt_generate_hw_rule)(struct ipahal_rt_rule_gen_params *params,
+		u32 *hw_len, u8 *buf);
+	int (*flt_generate_hw_rule)(struct ipahal_flt_rule_gen_params *params,
+		u32 *hw_len, u8 *buf);
+	int (*flt_generate_eq)(enum ipa_ip_type ipt,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb);
+	int (*rt_parse_hw_rule)(u8 *addr, struct ipahal_rt_rule_entry *rule);
+	int (*flt_parse_hw_rule)(u8 *addr, struct ipahal_flt_rule_entry *rule);
+	u8 eq_bitfield[IPA_EQ_MAX];
+};
+
+
+static u64 ipa_fltrt_create_flt_bitmap(u64 ep_bitmap)
+{
+	/* At IPA3, there global configuration is possible but not used */
+	return (ep_bitmap << 1) & ~0x1;
+}
+
+static u64 ipa_fltrt_create_tbl_addr(bool is_sys, u64 addr)
+{
+	if (is_sys) {
+		if (addr & IPA3_0_HW_TBL_SYSADDR_ALIGNMENT) {
+			IPAHAL_ERR(
+				"sys addr is not aligned accordingly addr=0x%pad\n",
+				&addr);
+			ipa_assert();
+			return 0;
+		}
+	} else {
+		if (addr & IPA3_0_HW_TBL_LCLADDR_ALIGNMENT) {
+			IPAHAL_ERR("addr/ofst isn't lcl addr aligned %llu\n",
+				addr);
+			ipa_assert();
+			return 0;
+		}
+		/*
+		 * for local tables (at sram) offsets is used as tables
+		 * addresses. offset need to be in 8B units
+		 * (local address aligned) and left shifted to its place.
+		 * Local bit need to be enabled.
+		 */
+		addr /= IPA3_0_HW_TBL_LCLADDR_ALIGNMENT + 1;
+		addr *= IPA3_0_HW_TBL_ADDR_MASK + 1;
+		addr += 1;
+	}
+
+	return addr;
+}
+
+static void ipa_fltrt_parse_tbl_addr(u64 hwaddr, u64 *addr, bool *is_sys)
+{
+	IPAHAL_DBG("Parsing hwaddr 0x%llx\n", hwaddr);
+
+	*is_sys = !(hwaddr & 0x1);
+	hwaddr &= (~0ULL - 1);
+	if (hwaddr & IPA3_0_HW_TBL_SYSADDR_ALIGNMENT) {
+		IPAHAL_ERR(
+			"sys addr is not aligned accordingly addr=0x%pad\n",
+			&hwaddr);
+		ipa_assert();
+		return;
+	}
+
+	if (!*is_sys) {
+		hwaddr /= IPA3_0_HW_TBL_ADDR_MASK + 1;
+		hwaddr *= IPA3_0_HW_TBL_LCLADDR_ALIGNMENT + 1;
+	}
+
+	*addr = hwaddr;
+}
+
+/* Update these tables of the number of equations changes */
+static const int ipa3_0_ofst_meq32[] = { IPA_OFFSET_MEQ32_0,
+					IPA_OFFSET_MEQ32_1};
+static const int ipa3_0_ofst_meq128[] = { IPA_OFFSET_MEQ128_0,
+					IPA_OFFSET_MEQ128_1};
+static const int ipa3_0_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0,
+					IPA_IHL_OFFSET_RANGE16_1};
+static const int ipa3_0_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0,
+					IPA_IHL_OFFSET_MEQ32_1};
+
+static int ipa_fltrt_generate_hw_rule_bdy(enum ipa_ip_type ipt,
+	const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule);
+static int ipa_fltrt_generate_hw_rule_bdy_from_eq(
+		const struct ipa_ipfltri_rule_eq *attrib, u8 **buf);
+static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb);
+static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb);
+static int ipa_flt_generate_eq(enum ipa_ip_type ipt,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb);
+static int ipa_rt_parse_hw_rule(u8 *addr,
+		struct ipahal_rt_rule_entry *rule);
+static int ipa_flt_parse_hw_rule(u8 *addr,
+		struct ipahal_flt_rule_entry *rule);
+
+#define IPA_IS_RAN_OUT_OF_EQ(__eq_array, __eq_index) \
+	(ARRAY_SIZE(__eq_array) <= (__eq_index))
+
+#define IPA_GET_RULE_EQ_BIT_PTRN(__eq) \
+	(BIT(ipahal_fltrt_objs[ipahal_ctx->hw_type].eq_bitfield[(__eq)]))
+
+/*
+ * ipa_fltrt_rule_generation_err_check() - check basic validity on the rule
+ *  attribs before starting building it
+ *  checks if not not using ipv4 attribs on ipv6 and vice-versa
+ * @ip: IP address type
+ * @attrib: IPA rule attribute
+ *
+ * Return: 0 on success, -EPERM on failure
+ */
+static int ipa_fltrt_rule_generation_err_check(
+	enum ipa_ip_type ipt, const struct ipa_rule_attrib *attrib)
+{
+	if (ipt == IPA_IP_v4) {
+		if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
+		    attrib->attrib_mask & IPA_FLT_TC ||
+		    attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+			IPAHAL_ERR("v6 attrib's specified for v4 rule\n");
+			return -EPERM;
+		}
+	} else if (ipt == IPA_IP_v6) {
+		if (attrib->attrib_mask & IPA_FLT_TOS ||
+		    attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+			IPAHAL_ERR("v4 attrib's specified for v6 rule\n");
+			return -EPERM;
+		}
+	} else {
+		IPAHAL_ERR("unsupported ip %d\n", ipt);
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+static int ipa_rt_gen_hw_rule(struct ipahal_rt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf)
+{
+	struct ipa3_0_rt_rule_hw_hdr *rule_hdr;
+	u8 *start;
+	u16 en_rule = 0;
+
+	start = buf;
+	rule_hdr = (struct ipa3_0_rt_rule_hw_hdr *)buf;
+
+	ipa_assert_on(params->dst_pipe_idx & ~0x1F);
+	rule_hdr->u.hdr.pipe_dest_idx = params->dst_pipe_idx;
+	switch (params->hdr_type) {
+	case IPAHAL_RT_RULE_HDR_PROC_CTX:
+		rule_hdr->u.hdr.system = !params->hdr_lcl;
+		rule_hdr->u.hdr.proc_ctx = 1;
+		ipa_assert_on(params->hdr_ofst & 31);
+		rule_hdr->u.hdr.hdr_offset = (params->hdr_ofst) >> 5;
+		break;
+	case IPAHAL_RT_RULE_HDR_RAW:
+		rule_hdr->u.hdr.system = !params->hdr_lcl;
+		rule_hdr->u.hdr.proc_ctx = 0;
+		ipa_assert_on(params->hdr_ofst & 3);
+		rule_hdr->u.hdr.hdr_offset = (params->hdr_ofst) >> 2;
+		break;
+	case IPAHAL_RT_RULE_HDR_NONE:
+		rule_hdr->u.hdr.system = !params->hdr_lcl;
+		rule_hdr->u.hdr.proc_ctx = 0;
+		rule_hdr->u.hdr.hdr_offset = 0;
+		break;
+	default:
+		IPAHAL_ERR("Invalid HDR type %d\n", params->hdr_type);
+		WARN_ON(1);
+		return -EINVAL;
+	};
+
+	ipa_assert_on(params->priority & ~0x3FF);
+	rule_hdr->u.hdr.priority = params->priority;
+	rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0;
+	ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	rule_hdr->u.hdr.rule_id = params->id;
+
+	buf += sizeof(struct ipa3_0_rt_rule_hw_hdr);
+
+	if (ipa_fltrt_generate_hw_rule_bdy(params->ipt, &params->rule->attrib,
+		&buf, &en_rule)) {
+		IPAHAL_ERR("fail to generate hw rule\n");
+		return -EPERM;
+	}
+	rule_hdr->u.hdr.en_rule = en_rule;
+
+	IPAHAL_DBG("en_rule 0x%x\n", en_rule);
+	ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
+
+	if (*hw_len == 0) {
+		*hw_len = buf - start;
+	} else if (*hw_len != (buf - start)) {
+		IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n",
+			*hw_len, (buf - start));
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+static int ipa_flt_gen_hw_rule(struct ipahal_flt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf)
+{
+	struct ipa3_0_flt_rule_hw_hdr *rule_hdr;
+	u8 *start;
+	u16 en_rule = 0;
+
+	start = buf;
+	rule_hdr = (struct ipa3_0_flt_rule_hw_hdr *)buf;
+
+	switch (params->rule->action) {
+	case IPA_PASS_TO_ROUTING:
+		rule_hdr->u.hdr.action = 0x0;
+		break;
+	case IPA_PASS_TO_SRC_NAT:
+		rule_hdr->u.hdr.action = 0x1;
+		break;
+	case IPA_PASS_TO_DST_NAT:
+		rule_hdr->u.hdr.action = 0x2;
+		break;
+	case IPA_PASS_TO_EXCEPTION:
+		rule_hdr->u.hdr.action = 0x3;
+		break;
+	default:
+		IPAHAL_ERR("Invalid Rule Action %d\n", params->rule->action);
+		WARN_ON(1);
+		return -EINVAL;
+	}
+	ipa_assert_on(params->rt_tbl_idx & ~0x1F);
+	rule_hdr->u.hdr.rt_tbl_idx = params->rt_tbl_idx;
+	rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0;
+	rule_hdr->u.hdr.rsvd1 = 0;
+	rule_hdr->u.hdr.rsvd2 = 0;
+	rule_hdr->u.hdr.rsvd3 = 0;
+
+	ipa_assert_on(params->priority & ~0x3FF);
+	rule_hdr->u.hdr.priority = params->priority;
+	ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	rule_hdr->u.hdr.rule_id = params->id;
+
+	buf += sizeof(struct ipa3_0_flt_rule_hw_hdr);
+
+	if (params->rule->eq_attrib_type) {
+		if (ipa_fltrt_generate_hw_rule_bdy_from_eq(
+			&params->rule->eq_attrib, &buf)) {
+			IPAHAL_ERR("fail to generate hw rule from eq\n");
+			return -EPERM;
+		}
+		en_rule = params->rule->eq_attrib.rule_eq_bitmap;
+	} else {
+		if (ipa_fltrt_generate_hw_rule_bdy(params->ipt,
+			&params->rule->attrib, &buf, &en_rule)) {
+			IPAHAL_ERR("fail to generate hw rule\n");
+			return -EPERM;
+		}
+	}
+	rule_hdr->u.hdr.en_rule = en_rule;
+
+	IPAHAL_DBG("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n",
+		en_rule,
+		rule_hdr->u.hdr.action,
+		rule_hdr->u.hdr.rt_tbl_idx,
+		rule_hdr->u.hdr.retain_hdr);
+	IPAHAL_DBG("priority=%d, rule_id=%d\n",
+		rule_hdr->u.hdr.priority,
+		rule_hdr->u.hdr.rule_id);
+
+	ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
+
+	if (*hw_len == 0) {
+		*hw_len = buf - start;
+	} else if (*hw_len != (buf - start)) {
+		IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n",
+			*hw_len, (buf - start));
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+/*
+ * This array contains the FLT/RT info for IPAv3 and later.
+ * All the information on IPAv3 are statically defined below.
+ * If information is missing regarding on some IPA version,
+ *  the init function will fill it with the information from the previous
+ *  IPA version.
+ * Information is considered missing if all of the fields are 0.
+ */
+static struct ipahal_fltrt_obj ipahal_fltrt_objs[IPA_HW_MAX] = {
+	/* IPAv3 */
+	[IPA_HW_v3_0] = {
+		true,
+		IPA3_0_HW_TBL_WIDTH,
+		IPA3_0_HW_TBL_SYSADDR_ALIGNMENT,
+		IPA3_0_HW_TBL_LCLADDR_ALIGNMENT,
+		IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT,
+		IPA3_0_HW_RULE_START_ALIGNMENT,
+		IPA3_0_HW_TBL_HDR_WIDTH,
+		IPA3_0_HW_TBL_ADDR_MASK,
+		IPA3_0_RULE_MAX_PRIORITY,
+		IPA3_0_RULE_MIN_PRIORITY,
+		IPA3_0_LOW_RULE_ID,
+		IPA3_0_RULE_ID_BIT_LEN,
+		IPA3_0_HW_RULE_BUF_SIZE,
+		ipa_write_64,
+		ipa_fltrt_create_flt_bitmap,
+		ipa_fltrt_create_tbl_addr,
+		ipa_fltrt_parse_tbl_addr,
+		ipa_rt_gen_hw_rule,
+		ipa_flt_gen_hw_rule,
+		ipa_flt_generate_eq,
+		ipa_rt_parse_hw_rule,
+		ipa_flt_parse_hw_rule,
+		{
+			[IPA_TOS_EQ]			= 0,
+			[IPA_PROTOCOL_EQ]		= 1,
+			[IPA_TC_EQ]			= 2,
+			[IPA_OFFSET_MEQ128_0]		= 3,
+			[IPA_OFFSET_MEQ128_1]		= 4,
+			[IPA_OFFSET_MEQ32_0]		= 5,
+			[IPA_OFFSET_MEQ32_1]		= 6,
+			[IPA_IHL_OFFSET_MEQ32_0]	= 7,
+			[IPA_IHL_OFFSET_MEQ32_1]	= 8,
+			[IPA_METADATA_COMPARE]		= 9,
+			[IPA_IHL_OFFSET_RANGE16_0]	= 10,
+			[IPA_IHL_OFFSET_RANGE16_1]	= 11,
+			[IPA_IHL_OFFSET_EQ_32]		= 12,
+			[IPA_IHL_OFFSET_EQ_16]		= 13,
+			[IPA_FL_EQ]			= 14,
+			[IPA_IS_FRAG]			= 15,
+		},
+	},
+};
+
+static int ipa_flt_generate_eq(enum ipa_ip_type ipt,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+	if (ipa_fltrt_rule_generation_err_check(ipt, attrib))
+		return -EPERM;
+
+	if (ipt == IPA_IP_v4) {
+		if (ipa_flt_generate_eq_ip4(ipt, attrib, eq_atrb)) {
+			IPAHAL_ERR("failed to build ipv4 flt eq rule\n");
+			return -EPERM;
+		}
+	} else if (ipt == IPA_IP_v6) {
+		if (ipa_flt_generate_eq_ip6(ipt, attrib, eq_atrb)) {
+			IPAHAL_ERR("failed to build ipv6 flt eq rule\n");
+			return -EPERM;
+		}
+	} else {
+		IPAHAL_ERR("unsupported ip %d\n", ipt);
+		return  -EPERM;
+	}
+
+	/*
+	 * default "rule" means no attributes set -> map to
+	 * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
+	 */
+	if (attrib->attrib_mask == 0) {
+		eq_atrb->rule_eq_bitmap = 0;
+		eq_atrb->rule_eq_bitmap |= IPA_GET_RULE_EQ_BIT_PTRN(
+			IPA_OFFSET_MEQ32_0);
+		eq_atrb->offset_meq_32[0].offset = 0;
+		eq_atrb->offset_meq_32[0].mask = 0;
+		eq_atrb->offset_meq_32[0].value = 0;
+	}
+
+	return 0;
+}
+
+static void ipa_fltrt_generate_mac_addr_hw_rule(u8 **extra, u8 **rest,
+	u8 hdr_mac_addr_offset,
+	const uint8_t mac_addr_mask[ETH_ALEN],
+	const uint8_t mac_addr[ETH_ALEN])
+{
+	int i;
+
+	*extra = ipa_write_8(hdr_mac_addr_offset, *extra);
+
+	/* LSB MASK and ADDR */
+	*rest = ipa_write_64(0, *rest);
+	*rest = ipa_write_64(0, *rest);
+
+	/* MSB MASK and ADDR */
+	*rest = ipa_write_16(0, *rest);
+	for (i = 5; i >= 0; i--)
+		*rest = ipa_write_8(mac_addr_mask[i], *rest);
+	*rest = ipa_write_16(0, *rest);
+	for (i = 5; i >= 0; i--)
+		*rest = ipa_write_8(mac_addr[i], *rest);
+}
+
+static int ipa_fltrt_generate_hw_rule_bdy_ip4(u16 *en_rule,
+	const struct ipa_rule_attrib *attrib,
+	u8 **extra_wrds, u8 **rest_wrds)
+{
+	u8 *extra = *extra_wrds;
+	u8 *rest = *rest_wrds;
+	u8 ofst_meq32 = 0;
+	u8 ihl_ofst_rng16 = 0;
+	u8 ihl_ofst_meq32 = 0;
+	u8 ofst_meq128 = 0;
+	int rc = 0;
+
+	if (attrib->attrib_mask & IPA_FLT_TOS) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ);
+		extra = ipa_write_8(attrib->u.v4.tos, extra);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ);
+		extra = ipa_write_8(attrib->u.v4.protocol, extra);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -14 => offset of dst mac addr in Ethernet II hdr */
+		ipa_fltrt_generate_mac_addr_hw_rule(
+			&extra,
+			&rest,
+			-14,
+			attrib->dst_mac_addr_mask,
+			attrib->dst_mac_addr);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -8 => offset of src mac addr in Ethernet II hdr */
+		ipa_fltrt_generate_mac_addr_hw_rule(
+			&extra,
+			&rest,
+			-8,
+			attrib->src_mac_addr_mask,
+			attrib->src_mac_addr);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -22 => offset of dst mac addr in 802.3 hdr */
+		ipa_fltrt_generate_mac_addr_hw_rule(
+			&extra,
+			&rest,
+			-22,
+			attrib->dst_mac_addr_mask,
+			attrib->dst_mac_addr);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -16 => offset of src mac addr in 802.3 hdr */
+		ipa_fltrt_generate_mac_addr_hw_rule(
+			&extra,
+			&rest,
+			-16,
+			attrib->src_mac_addr_mask,
+			attrib->src_mac_addr);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		/* 0 => offset of TOS in v4 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_32((attrib->tos_mask << 16), rest);
+		rest = ipa_write_32((attrib->tos_value << 16), rest);
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		/* 12 => offset of src ip in v4 header */
+		extra = ipa_write_8(12, extra);
+		rest = ipa_write_32(attrib->u.v4.src_addr_mask, rest);
+		rest = ipa_write_32(attrib->u.v4.src_addr, rest);
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		/* 16 => offset of dst ip in v4 header */
+		extra = ipa_write_8(16, extra);
+		rest = ipa_write_32(attrib->u.v4.dst_addr_mask, rest);
+		rest = ipa_write_32(attrib->u.v4.dst_addr, rest);
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		/* -2 => offset of ether type in L2 hdr */
+		extra = ipa_write_8((u8)-2, extra);
+		rest = ipa_write_16(0, rest);
+		rest = ipa_write_16(htons(attrib->ether_type), rest);
+		rest = ipa_write_16(0, rest);
+		rest = ipa_write_16(htons(attrib->ether_type), rest);
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 0  => offset of type after v4 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_32(0xFF, rest);
+		rest = ipa_write_32(attrib->type, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_CODE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 1  => offset of code after v4 header */
+		extra = ipa_write_8(1, extra);
+		rest = ipa_write_32(0xFF, rest);
+		rest = ipa_write_32(attrib->code, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SPI) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 0  => offset of SPI after v4 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_32(0xFFFFFFFF, rest);
+		rest = ipa_write_32(attrib->spi, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
+		rest = ipa_write_32(attrib->meta_data_mask, rest);
+		rest = ipa_write_32(attrib->meta_data, rest);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		if (attrib->src_port_hi < attrib->src_port_lo) {
+			IPAHAL_ERR("bad src port range param\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 0  => offset of src port after v4 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_16(attrib->src_port_hi, rest);
+		rest = ipa_write_16(attrib->src_port_lo, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		if (attrib->dst_port_hi < attrib->dst_port_lo) {
+			IPAHAL_ERR("bad dst port range param\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 2  => offset of dst port after v4 header */
+		extra = ipa_write_8(2, extra);
+		rest = ipa_write_16(attrib->dst_port_hi, rest);
+		rest = ipa_write_16(attrib->dst_port_lo, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 0  => offset of src port after v4 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_16(attrib->src_port, rest);
+		rest = ipa_write_16(attrib->src_port, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 2  => offset of dst port after v4 header */
+		extra = ipa_write_8(2, extra);
+		rest = ipa_write_16(attrib->dst_port, rest);
+		rest = ipa_write_16(attrib->dst_port, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG);
+
+	goto done;
+
+err:
+	rc = -EPERM;
+done:
+	*extra_wrds = extra;
+	*rest_wrds = rest;
+	return rc;
+}
+
+static int ipa_fltrt_generate_hw_rule_bdy_ip6(u16 *en_rule,
+	const struct ipa_rule_attrib *attrib,
+	u8 **extra_wrds, u8 **rest_wrds)
+{
+	u8 *extra = *extra_wrds;
+	u8 *rest = *rest_wrds;
+	u8 ofst_meq32 = 0;
+	u8 ihl_ofst_rng16 = 0;
+	u8 ihl_ofst_meq32 = 0;
+	u8 ofst_meq128 = 0;
+	int rc = 0;
+
+	/* v6 code below assumes no extension headers TODO: fix this */
+
+	if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ);
+		extra = ipa_write_8(attrib->u.v6.next_hdr, extra);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TC) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TC_EQ);
+		extra = ipa_write_8(attrib->u.v6.tc, extra);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+		/* 8 => offset of src ip in v6 header */
+		extra = ipa_write_8(8, extra);
+		rest = ipa_write_32(attrib->u.v6.src_addr_mask[3], rest);
+		rest = ipa_write_32(attrib->u.v6.src_addr_mask[2], rest);
+		rest = ipa_write_32(attrib->u.v6.src_addr[3], rest);
+		rest = ipa_write_32(attrib->u.v6.src_addr[2], rest);
+		rest = ipa_write_32(attrib->u.v6.src_addr_mask[1], rest);
+		rest = ipa_write_32(attrib->u.v6.src_addr_mask[0], rest);
+		rest = ipa_write_32(attrib->u.v6.src_addr[1], rest);
+		rest = ipa_write_32(attrib->u.v6.src_addr[0], rest);
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+		/* 24 => offset of dst ip in v6 header */
+		extra = ipa_write_8(24, extra);
+		rest = ipa_write_32(attrib->u.v6.dst_addr_mask[3], rest);
+		rest = ipa_write_32(attrib->u.v6.dst_addr_mask[2], rest);
+		rest = ipa_write_32(attrib->u.v6.dst_addr[3], rest);
+		rest = ipa_write_32(attrib->u.v6.dst_addr[2], rest);
+		rest = ipa_write_32(attrib->u.v6.dst_addr_mask[1], rest);
+		rest = ipa_write_32(attrib->u.v6.dst_addr_mask[0], rest);
+		rest = ipa_write_32(attrib->u.v6.dst_addr[1], rest);
+		rest = ipa_write_32(attrib->u.v6.dst_addr[0], rest);
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+		/* 0 => offset of TOS in v6 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_64(0, rest);
+		rest = ipa_write_64(0, rest);
+		rest = ipa_write_32(0, rest);
+		rest = ipa_write_32((attrib->tos_mask << 20), rest);
+		rest = ipa_write_32(0, rest);
+		rest = ipa_write_32((attrib->tos_value << 20), rest);
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -14 => offset of dst mac addr in Ethernet II hdr */
+		ipa_fltrt_generate_mac_addr_hw_rule(
+			&extra,
+			&rest,
+			-14,
+			attrib->dst_mac_addr_mask,
+			attrib->dst_mac_addr);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -8 => offset of src mac addr in Ethernet II hdr */
+		ipa_fltrt_generate_mac_addr_hw_rule(
+			&extra,
+			&rest,
+			-8,
+			attrib->src_mac_addr_mask,
+			attrib->src_mac_addr);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -22 => offset of dst mac addr in 802.3 hdr */
+		ipa_fltrt_generate_mac_addr_hw_rule(
+			&extra,
+			&rest,
+			-22,
+			attrib->dst_mac_addr_mask,
+			attrib->dst_mac_addr);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -16 => offset of src mac addr in 802.3 hdr */
+		ipa_fltrt_generate_mac_addr_hw_rule(
+			&extra,
+			&rest,
+			-16,
+			attrib->src_mac_addr_mask,
+			attrib->src_mac_addr);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		/* -2 => offset of ether type in L2 hdr */
+		extra = ipa_write_8((u8)-2, extra);
+		rest = ipa_write_16(0, rest);
+		rest = ipa_write_16(htons(attrib->ether_type), rest);
+		rest = ipa_write_16(0, rest);
+		rest = ipa_write_16(htons(attrib->ether_type), rest);
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 0  => offset of type after v6 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_32(0xFF, rest);
+		rest = ipa_write_32(attrib->type, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_CODE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 1  => offset of code after v6 header */
+		extra = ipa_write_8(1, extra);
+		rest = ipa_write_32(0xFF, rest);
+		rest = ipa_write_32(attrib->code, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SPI) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 0  => offset of SPI after v6 header FIXME */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_32(0xFFFFFFFF, rest);
+		rest = ipa_write_32(attrib->spi, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
+		rest = ipa_write_32(attrib->meta_data_mask, rest);
+		rest = ipa_write_32(attrib->meta_data, rest);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 0  => offset of src port after v6 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_16(attrib->src_port, rest);
+		rest = ipa_write_16(attrib->src_port, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 2  => offset of dst port after v6 header */
+		extra = ipa_write_8(2, extra);
+		rest = ipa_write_16(attrib->dst_port, rest);
+		rest = ipa_write_16(attrib->dst_port, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		if (attrib->src_port_hi < attrib->src_port_lo) {
+			IPAHAL_ERR("bad src port range param\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 0  => offset of src port after v6 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_16(attrib->src_port_hi, rest);
+		rest = ipa_write_16(attrib->src_port_lo, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		if (attrib->dst_port_hi < attrib->dst_port_lo) {
+			IPAHAL_ERR("bad dst port range param\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 2  => offset of dst port after v6 header */
+		extra = ipa_write_8(2, extra);
+		rest = ipa_write_16(attrib->dst_port_hi, rest);
+		rest = ipa_write_16(attrib->dst_port_lo, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ);
+		rest = ipa_write_32(attrib->u.v6.flow_label & 0xFFFFF,
+			rest);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG);
+
+	goto done;
+
+err:
+	rc = -EPERM;
+done:
+	*extra_wrds = extra;
+	*rest_wrds = rest;
+	return rc;
+}
+
+static u8 *ipa_fltrt_copy_mem(u8 *src, u8 *dst, int cnt)
+{
+	while (cnt--)
+		*dst++ = *src++;
+
+	return dst;
+}
+
+/*
+ * ipa_fltrt_generate_hw_rule_bdy() - generate HW rule body (w/o header)
+ * @ip: IP address type
+ * @attrib: IPA rule attribute
+ * @buf: output buffer. Advance it after building the rule
+ * @en_rule: enable rule
+ *
+ * Return codes:
+ * 0: success
+ * -EPERM: wrong input
+ */
+static int ipa_fltrt_generate_hw_rule_bdy(enum ipa_ip_type ipt,
+	const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule)
+{
+	int sz;
+	int rc = 0;
+	u8 *extra_wrd_buf;
+	u8 *rest_wrd_buf;
+	u8 *extra_wrd_start;
+	u8 *rest_wrd_start;
+	u8 *extra_wrd_i;
+	u8 *rest_wrd_i;
+
+	sz = IPA3_0_HW_TBL_WIDTH * 2 + IPA3_0_HW_RULE_START_ALIGNMENT;
+	extra_wrd_buf = kzalloc(sz, GFP_KERNEL);
+	if (!extra_wrd_buf) {
+		IPAHAL_ERR("failed to allocate %d bytes\n", sz);
+		rc = -ENOMEM;
+		goto fail_extra_alloc;
+	}
+
+	sz = IPA3_0_HW_RULE_BUF_SIZE + IPA3_0_HW_RULE_START_ALIGNMENT;
+	rest_wrd_buf = kzalloc(sz, GFP_KERNEL);
+	if (!rest_wrd_buf) {
+		IPAHAL_ERR("failed to allocate %d bytes\n", sz);
+		rc = -ENOMEM;
+		goto fail_rest_alloc;
+	}
+
+	extra_wrd_start = extra_wrd_buf + IPA3_0_HW_RULE_START_ALIGNMENT;
+	extra_wrd_start = (u8 *)((long)extra_wrd_start &
+		~IPA3_0_HW_RULE_START_ALIGNMENT);
+
+	rest_wrd_start = rest_wrd_buf + IPA3_0_HW_RULE_START_ALIGNMENT;
+	rest_wrd_start = (u8 *)((long)rest_wrd_start &
+		~IPA3_0_HW_RULE_START_ALIGNMENT);
+
+	extra_wrd_i = extra_wrd_start;
+	rest_wrd_i = rest_wrd_start;
+
+	rc = ipa_fltrt_rule_generation_err_check(ipt, attrib);
+	if (rc) {
+		IPAHAL_ERR("rule generation err check failed\n");
+		goto fail_err_check;
+	}
+
+	if (ipt == IPA_IP_v4) {
+		if (ipa_fltrt_generate_hw_rule_bdy_ip4(en_rule, attrib,
+			&extra_wrd_i, &rest_wrd_i)) {
+			IPAHAL_ERR("failed to build ipv4 hw rule\n");
+			rc = -EPERM;
+			goto fail_err_check;
+		}
+
+	} else if (ipt == IPA_IP_v6) {
+		if (ipa_fltrt_generate_hw_rule_bdy_ip6(en_rule, attrib,
+			&extra_wrd_i, &rest_wrd_i)) {
+			IPAHAL_ERR("failed to build ipv6 hw rule\n");
+			rc = -EPERM;
+			goto fail_err_check;
+		}
+	} else {
+		IPAHAL_ERR("unsupported ip %d\n", ipt);
+		goto fail_err_check;
+	}
+
+	/*
+	 * default "rule" means no attributes set -> map to
+	 * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
+	 */
+	if (attrib->attrib_mask == 0) {
+		IPAHAL_DBG("building default rule\n");
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(ipa3_0_ofst_meq32[0]);
+		extra_wrd_i = ipa_write_8(0, extra_wrd_i);  /* offset */
+		rest_wrd_i = ipa_write_32(0, rest_wrd_i);   /* mask */
+		rest_wrd_i = ipa_write_32(0, rest_wrd_i);   /* val */
+	}
+
+	IPAHAL_DBG("extra_word_1 0x%llx\n", *(u64 *)extra_wrd_start);
+	IPAHAL_DBG("extra_word_2 0x%llx\n",
+		*(u64 *)(extra_wrd_start + IPA3_0_HW_TBL_WIDTH));
+
+	extra_wrd_i = ipa_pad_to_64(extra_wrd_i);
+	sz = extra_wrd_i - extra_wrd_start;
+	IPAHAL_DBG("extra words params sz %d\n", sz);
+	*buf = ipa_fltrt_copy_mem(extra_wrd_start, *buf, sz);
+
+	rest_wrd_i = ipa_pad_to_64(rest_wrd_i);
+	sz = rest_wrd_i - rest_wrd_start;
+	IPAHAL_DBG("non extra words params sz %d\n", sz);
+	*buf = ipa_fltrt_copy_mem(rest_wrd_start, *buf, sz);
+
+fail_err_check:
+	kfree(rest_wrd_buf);
+fail_rest_alloc:
+	kfree(extra_wrd_buf);
+fail_extra_alloc:
+	return rc;
+}
+
+
+/**
+ * ipa_fltrt_calc_extra_wrd_bytes()- Calculate the number of extra words for eq
+ * @attrib: equation attribute
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+static int ipa_fltrt_calc_extra_wrd_bytes(
+	const struct ipa_ipfltri_rule_eq *attrib)
+{
+	int num = 0;
+
+	if (attrib->tos_eq_present)
+		num++;
+	if (attrib->protocol_eq_present)
+		num++;
+	if (attrib->tc_eq_present)
+		num++;
+	num += attrib->num_offset_meq_128;
+	num += attrib->num_offset_meq_32;
+	num += attrib->num_ihl_offset_meq_32;
+	num += attrib->num_ihl_offset_range_16;
+	if (attrib->ihl_offset_eq_32_present)
+		num++;
+	if (attrib->ihl_offset_eq_16_present)
+		num++;
+
+	IPAHAL_DBG("extra bytes number %d\n", num);
+
+	return num;
+}
+
+static int ipa_fltrt_generate_hw_rule_bdy_from_eq(
+		const struct ipa_ipfltri_rule_eq *attrib, u8 **buf)
+{
+	int num_offset_meq_32 = attrib->num_offset_meq_32;
+	int num_ihl_offset_range_16 = attrib->num_ihl_offset_range_16;
+	int num_ihl_offset_meq_32 = attrib->num_ihl_offset_meq_32;
+	int num_offset_meq_128 = attrib->num_offset_meq_128;
+	int i;
+	int extra_bytes;
+	u8 *extra;
+	u8 *rest;
+
+	extra_bytes = ipa_fltrt_calc_extra_wrd_bytes(attrib);
+	/* only 3 eq does not have extra word param, 13 out of 16 is the number
+	 * of equations that needs extra word param
+	 */
+	if (extra_bytes > 13) {
+		IPAHAL_ERR("too much extra bytes\n");
+		return -EPERM;
+	} else if (extra_bytes > IPA3_0_HW_TBL_HDR_WIDTH) {
+		/* two extra words */
+		extra = *buf;
+		rest = *buf + IPA3_0_HW_TBL_HDR_WIDTH * 2;
+	} else if (extra_bytes > 0) {
+		/* single exra word */
+		extra = *buf;
+		rest = *buf + IPA3_0_HW_TBL_HDR_WIDTH;
+	} else {
+		/* no extra words */
+		extra = NULL;
+		rest = *buf;
+	}
+
+	if (attrib->tos_eq_present)
+		extra = ipa_write_8(attrib->tos_eq, extra);
+
+	if (attrib->protocol_eq_present)
+		extra = ipa_write_8(attrib->protocol_eq, extra);
+
+	if (attrib->tc_eq_present)
+		extra = ipa_write_8(attrib->tc_eq, extra);
+
+	if (num_offset_meq_128) {
+		extra = ipa_write_8(attrib->offset_meq_128[0].offset, extra);
+		for (i = 0; i < 8; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[0].mask[i],
+				rest);
+		for (i = 0; i < 8; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[0].value[i],
+				rest);
+		for (i = 8; i < 16; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[0].mask[i],
+				rest);
+		for (i = 8; i < 16; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[0].value[i],
+				rest);
+		num_offset_meq_128--;
+	}
+
+	if (num_offset_meq_128) {
+		extra = ipa_write_8(attrib->offset_meq_128[1].offset, extra);
+		for (i = 0; i < 8; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[1].mask[i],
+				rest);
+		for (i = 0; i < 8; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[1].value[i],
+				rest);
+		for (i = 8; i < 16; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[1].mask[i],
+				rest);
+		for (i = 8; i < 16; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[1].value[i],
+				rest);
+		num_offset_meq_128--;
+	}
+
+	if (num_offset_meq_32) {
+		extra = ipa_write_8(attrib->offset_meq_32[0].offset, extra);
+		rest = ipa_write_32(attrib->offset_meq_32[0].mask, rest);
+		rest = ipa_write_32(attrib->offset_meq_32[0].value, rest);
+		num_offset_meq_32--;
+	}
+
+	if (num_offset_meq_32) {
+		extra = ipa_write_8(attrib->offset_meq_32[1].offset, extra);
+		rest = ipa_write_32(attrib->offset_meq_32[1].mask, rest);
+		rest = ipa_write_32(attrib->offset_meq_32[1].value, rest);
+		num_offset_meq_32--;
+	}
+
+	if (num_ihl_offset_meq_32) {
+		extra = ipa_write_8(attrib->ihl_offset_meq_32[0].offset,
+		extra);
+
+		rest = ipa_write_32(attrib->ihl_offset_meq_32[0].mask, rest);
+		rest = ipa_write_32(attrib->ihl_offset_meq_32[0].value, rest);
+		num_ihl_offset_meq_32--;
+	}
+
+	if (num_ihl_offset_meq_32) {
+		extra = ipa_write_8(attrib->ihl_offset_meq_32[1].offset,
+		extra);
+
+		rest = ipa_write_32(attrib->ihl_offset_meq_32[1].mask, rest);
+		rest = ipa_write_32(attrib->ihl_offset_meq_32[1].value, rest);
+		num_ihl_offset_meq_32--;
+	}
+
+	if (attrib->metadata_meq32_present) {
+		rest = ipa_write_32(attrib->metadata_meq32.mask, rest);
+		rest = ipa_write_32(attrib->metadata_meq32.value, rest);
+	}
+
+	if (num_ihl_offset_range_16) {
+		extra = ipa_write_8(attrib->ihl_offset_range_16[0].offset,
+		extra);
+
+		rest = ipa_write_16(attrib->ihl_offset_range_16[0].range_high,
+				rest);
+		rest = ipa_write_16(attrib->ihl_offset_range_16[0].range_low,
+				rest);
+		num_ihl_offset_range_16--;
+	}
+
+	if (num_ihl_offset_range_16) {
+		extra = ipa_write_8(attrib->ihl_offset_range_16[1].offset,
+		extra);
+
+		rest = ipa_write_16(attrib->ihl_offset_range_16[1].range_high,
+				rest);
+		rest = ipa_write_16(attrib->ihl_offset_range_16[1].range_low,
+				rest);
+		num_ihl_offset_range_16--;
+	}
+
+	if (attrib->ihl_offset_eq_32_present) {
+		extra = ipa_write_8(attrib->ihl_offset_eq_32.offset, extra);
+		rest = ipa_write_32(attrib->ihl_offset_eq_32.value, rest);
+	}
+
+	if (attrib->ihl_offset_eq_16_present) {
+		extra = ipa_write_8(attrib->ihl_offset_eq_16.offset, extra);
+		rest = ipa_write_16(attrib->ihl_offset_eq_16.value, rest);
+		rest = ipa_write_16(0, rest);
+	}
+
+	if (attrib->fl_eq_present)
+		rest = ipa_write_32(attrib->fl_eq & 0xFFFFF, rest);
+
+	extra = ipa_pad_to_64(extra);
+	rest = ipa_pad_to_64(rest);
+	*buf = rest;
+
+	return 0;
+}
+
+static void ipa_flt_generate_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb,
+	u8 hdr_mac_addr_offset,	const uint8_t mac_addr_mask[ETH_ALEN],
+	const uint8_t mac_addr[ETH_ALEN], u8 ofst_meq128)
+{
+	int i;
+
+	eq_atrb->offset_meq_128[ofst_meq128].offset = hdr_mac_addr_offset;
+
+	/* LSB MASK and ADDR */
+	memset(eq_atrb->offset_meq_128[ofst_meq128].mask, 0, 8);
+	memset(eq_atrb->offset_meq_128[ofst_meq128].value, 0, 8);
+
+	/* MSB MASK and ADDR */
+	memset(eq_atrb->offset_meq_128[ofst_meq128].mask + 8, 0, 2);
+	for (i = 0; i <= 5; i++)
+		eq_atrb->offset_meq_128[ofst_meq128].mask[15 - i] =
+			mac_addr_mask[i];
+
+	memset(eq_atrb->offset_meq_128[ofst_meq128].value + 8, 0, 2);
+	for (i = 0; i <= 5; i++)
+		eq_atrb->offset_meq_128[ofst_meq128].value[15 - i] =
+			mac_addr[i];
+}
+
+static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+	u8 ofst_meq32 = 0;
+	u8 ihl_ofst_rng16 = 0;
+	u8 ihl_ofst_meq32 = 0;
+	u8 ofst_meq128 = 0;
+	u16 eq_bitmap = 0;
+	u16 *en_rule = &eq_bitmap;
+
+	if (attrib->attrib_mask & IPA_FLT_TOS) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ);
+		eq_atrb->tos_eq_present = 1;
+		eq_atrb->tos_eq = attrib->u.v4.tos;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ);
+		eq_atrb->protocol_eq_present = 1;
+		eq_atrb->protocol_eq = attrib->u.v4.protocol;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -14 => offset of dst mac addr in Ethernet II hdr */
+		ipa_flt_generate_mac_addr_eq(eq_atrb, -14,
+			attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+			ofst_meq128);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -8 => offset of src mac addr in Ethernet II hdr */
+		ipa_flt_generate_mac_addr_eq(eq_atrb, -8,
+			attrib->src_mac_addr_mask, attrib->src_mac_addr,
+			ofst_meq128);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -22 => offset of dst mac addr in 802.3 hdr */
+		ipa_flt_generate_mac_addr_eq(eq_atrb, -22,
+			attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+			ofst_meq128);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -16 => offset of src mac addr in 802.3 hdr */
+		ipa_flt_generate_mac_addr_eq(eq_atrb, -16,
+			attrib->src_mac_addr_mask, attrib->src_mac_addr,
+			ofst_meq128);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		eq_atrb->offset_meq_32[ofst_meq32].offset = 0;
+		eq_atrb->offset_meq_32[ofst_meq32].mask =
+			attrib->tos_mask << 16;
+		eq_atrb->offset_meq_32[ofst_meq32].value =
+			attrib->tos_value << 16;
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		eq_atrb->offset_meq_32[ofst_meq32].offset = 12;
+		eq_atrb->offset_meq_32[ofst_meq32].mask =
+			attrib->u.v4.src_addr_mask;
+		eq_atrb->offset_meq_32[ofst_meq32].value =
+			attrib->u.v4.src_addr;
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		eq_atrb->offset_meq_32[ofst_meq32].offset = 16;
+		eq_atrb->offset_meq_32[ofst_meq32].mask =
+			attrib->u.v4.dst_addr_mask;
+		eq_atrb->offset_meq_32[ofst_meq32].value =
+			attrib->u.v4.dst_addr;
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
+		eq_atrb->offset_meq_32[ofst_meq32].mask =
+			htons(attrib->ether_type);
+		eq_atrb->offset_meq_32[ofst_meq32].value =
+			htons(attrib->ether_type);
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			attrib->type;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_CODE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			attrib->code;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SPI) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+			0xFFFFFFFF;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			attrib->spi;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			IPA_METADATA_COMPARE);
+		eq_atrb->metadata_meq32_present = 1;
+		eq_atrb->metadata_meq32.offset = 0;
+		eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
+		eq_atrb->metadata_meq32.value = attrib->meta_data;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		if (attrib->src_port_hi < attrib->src_port_lo) {
+			IPAHAL_ERR("bad src port range param\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->src_port_lo;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->src_port_hi;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		if (attrib->dst_port_hi < attrib->dst_port_lo) {
+			IPAHAL_ERR("bad dst port range param\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->dst_port_lo;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->dst_port_hi;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->src_port;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->src_port;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->dst_port;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->dst_port;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG);
+		eq_atrb->ipv4_frag_eq_present = 1;
+	}
+
+	eq_atrb->rule_eq_bitmap = *en_rule;
+	eq_atrb->num_offset_meq_32 = ofst_meq32;
+	eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16;
+	eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32;
+	eq_atrb->num_offset_meq_128 = ofst_meq128;
+
+	return 0;
+}
+
+static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+	u8 ofst_meq32 = 0;
+	u8 ihl_ofst_rng16 = 0;
+	u8 ihl_ofst_meq32 = 0;
+	u8 ofst_meq128 = 0;
+	u16 eq_bitmap = 0;
+	u16 *en_rule = &eq_bitmap;
+
+	if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			IPA_PROTOCOL_EQ);
+		eq_atrb->protocol_eq_present = 1;
+		eq_atrb->protocol_eq = attrib->u.v6.next_hdr;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TC) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			IPA_TC_EQ);
+		eq_atrb->tc_eq_present = 1;
+		eq_atrb->tc_eq = attrib->u.v6.tc;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+		/* use the same word order as in ipa v2 */
+		eq_atrb->offset_meq_128[ofst_meq128].offset = 8;
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
+			= attrib->u.v6.src_addr_mask[0];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
+			= attrib->u.v6.src_addr_mask[1];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
+			= attrib->u.v6.src_addr_mask[2];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+			= attrib->u.v6.src_addr_mask[3];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
+			= attrib->u.v6.src_addr[0];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
+			= attrib->u.v6.src_addr[1];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
+			= attrib->u.v6.src_addr[2];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+				12) = attrib->u.v6.src_addr[3];
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+		eq_atrb->offset_meq_128[ofst_meq128].offset = 24;
+		/* use the same word order as in ipa v2 */
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
+			= attrib->u.v6.dst_addr_mask[0];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
+			= attrib->u.v6.dst_addr_mask[1];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
+			= attrib->u.v6.dst_addr_mask[2];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+			= attrib->u.v6.dst_addr_mask[3];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
+			= attrib->u.v6.dst_addr[0];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
+			= attrib->u.v6.dst_addr[1];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
+			= attrib->u.v6.dst_addr[2];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+				12) = attrib->u.v6.dst_addr[3];
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+		eq_atrb->offset_meq_128[ofst_meq128].offset = 0;
+		memset(eq_atrb->offset_meq_128[ofst_meq128].mask, 0, 12);
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+			= attrib->tos_mask << 20;
+		memset(eq_atrb->offset_meq_128[ofst_meq128].value, 0, 12);
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+				12) = attrib->tos_value << 20;
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -14 => offset of dst mac addr in Ethernet II hdr */
+		ipa_flt_generate_mac_addr_eq(eq_atrb, -14,
+			attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+			ofst_meq128);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -8 => offset of src mac addr in Ethernet II hdr */
+		ipa_flt_generate_mac_addr_eq(eq_atrb, -8,
+			attrib->src_mac_addr_mask, attrib->src_mac_addr,
+			ofst_meq128);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -22 => offset of dst mac addr in 802.3 hdr */
+		ipa_flt_generate_mac_addr_eq(eq_atrb, -22,
+			attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+			ofst_meq128);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -16 => offset of src mac addr in 802.3 hdr */
+		ipa_flt_generate_mac_addr_eq(eq_atrb, -16,
+			attrib->src_mac_addr_mask, attrib->src_mac_addr,
+			ofst_meq128);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
+		eq_atrb->offset_meq_32[ofst_meq32].mask =
+			htons(attrib->ether_type);
+		eq_atrb->offset_meq_32[ofst_meq32].value =
+			htons(attrib->ether_type);
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			attrib->type;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_CODE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			attrib->code;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SPI) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+			0xFFFFFFFF;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			attrib->spi;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			IPA_METADATA_COMPARE);
+		eq_atrb->metadata_meq32_present = 1;
+		eq_atrb->metadata_meq32.offset = 0;
+		eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
+		eq_atrb->metadata_meq32.value = attrib->meta_data;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->src_port;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->src_port;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->dst_port;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->dst_port;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		if (attrib->src_port_hi < attrib->src_port_lo) {
+			IPAHAL_ERR("bad src port range param\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->src_port_lo;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->src_port_hi;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		if (attrib->dst_port_hi < attrib->dst_port_lo) {
+			IPAHAL_ERR("bad dst port range param\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->dst_port_lo;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->dst_port_hi;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ);
+		eq_atrb->fl_eq_present = 1;
+		eq_atrb->fl_eq = attrib->u.v6.flow_label;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			IPA_IS_FRAG);
+		eq_atrb->ipv4_frag_eq_present = 1;
+	}
+
+	eq_atrb->rule_eq_bitmap = *en_rule;
+	eq_atrb->num_offset_meq_32 = ofst_meq32;
+	eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16;
+	eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32;
+	eq_atrb->num_offset_meq_128 = ofst_meq128;
+
+	return 0;
+}
+
+static int ipa_fltrt_parse_hw_rule_eq(u8 *addr, u32 hdr_sz,
+	struct ipa_ipfltri_rule_eq *atrb, u32 *rule_size)
+{
+	u16 eq_bitmap;
+	int extra_bytes;
+	u8 *extra;
+	u8 *rest;
+	int i;
+	u8 dummy_extra_wrd;
+
+	if (!addr || !atrb || !rule_size) {
+		IPAHAL_ERR("Input error: addr=%p atrb=%p rule_size=%p\n",
+			addr, atrb, rule_size);
+		return -EINVAL;
+	}
+
+	eq_bitmap = atrb->rule_eq_bitmap;
+
+	IPAHAL_DBG("eq_bitmap=0x%x\n", eq_bitmap);
+
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ))
+		atrb->tos_eq_present = true;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ))
+		atrb->protocol_eq_present = true;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_TC_EQ))
+		atrb->tc_eq_present = true;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ128_0))
+		atrb->num_offset_meq_128++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ128_1))
+		atrb->num_offset_meq_128++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ32_0))
+		atrb->num_offset_meq_32++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ32_1))
+		atrb->num_offset_meq_32++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_MEQ32_0))
+		atrb->num_ihl_offset_meq_32++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_MEQ32_1))
+		atrb->num_ihl_offset_meq_32++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE))
+		atrb->metadata_meq32_present = true;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_RANGE16_0))
+		atrb->num_ihl_offset_range_16++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_RANGE16_1))
+		atrb->num_ihl_offset_range_16++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_EQ_32))
+		atrb->ihl_offset_eq_32_present = true;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_EQ_16))
+		atrb->ihl_offset_eq_16_present = true;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ))
+		atrb->fl_eq_present = true;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG))
+		atrb->ipv4_frag_eq_present = true;
+
+	extra_bytes = ipa_fltrt_calc_extra_wrd_bytes(atrb);
+	/* only 3 eq does not have extra word param, 13 out of 16 is the number
+	 * of equations that needs extra word param
+	 */
+	if (extra_bytes > 13) {
+		IPAHAL_ERR("too much extra bytes\n");
+		return -EPERM;
+	} else if (extra_bytes > IPA3_0_HW_TBL_HDR_WIDTH) {
+		/* two extra words */
+		extra = addr + hdr_sz;
+		rest = extra + IPA3_0_HW_TBL_HDR_WIDTH * 2;
+	} else if (extra_bytes > 0) {
+		/* single extra word */
+		extra = addr + hdr_sz;
+		rest = extra + IPA3_0_HW_TBL_HDR_WIDTH;
+	} else {
+		/* no extra words */
+		dummy_extra_wrd = 0;
+		extra = &dummy_extra_wrd;
+		rest = addr + hdr_sz;
+	}
+	IPAHAL_DBG("addr=0x%p extra=0x%p rest=0x%p\n", addr, extra, rest);
+
+	if (atrb->tos_eq_present)
+		atrb->tos_eq = *extra++;
+	if (atrb->protocol_eq_present)
+		atrb->protocol_eq = *extra++;
+	if (atrb->tc_eq_present)
+		atrb->tc_eq = *extra++;
+
+	if (atrb->num_offset_meq_128 > 0) {
+		atrb->offset_meq_128[0].offset = *extra++;
+		for (i = 0; i < 8; i++)
+			atrb->offset_meq_128[0].mask[i] = *rest++;
+		for (i = 0; i < 8; i++)
+			atrb->offset_meq_128[0].value[i] = *rest++;
+		for (i = 8; i < 16; i++)
+			atrb->offset_meq_128[0].mask[i] = *rest++;
+		for (i = 8; i < 16; i++)
+			atrb->offset_meq_128[0].value[i] = *rest++;
+	}
+	if (atrb->num_offset_meq_128 > 1) {
+		atrb->offset_meq_128[1].offset = *extra++;
+		for (i = 0; i < 8; i++)
+			atrb->offset_meq_128[1].mask[i] = *rest++;
+		for (i = 0; i < 8; i++)
+			atrb->offset_meq_128[1].value[i] = *rest++;
+		for (i = 8; i < 16; i++)
+			atrb->offset_meq_128[1].mask[i] = *rest++;
+		for (i = 8; i < 16; i++)
+			atrb->offset_meq_128[1].value[i] = *rest++;
+	}
+
+	if (atrb->num_offset_meq_32 > 0) {
+		atrb->offset_meq_32[0].offset = *extra++;
+		atrb->offset_meq_32[0].mask = *((u32 *)rest);
+		rest += 4;
+		atrb->offset_meq_32[0].value = *((u32 *)rest);
+		rest += 4;
+	}
+	if (atrb->num_offset_meq_32 > 1) {
+		atrb->offset_meq_32[1].offset = *extra++;
+		atrb->offset_meq_32[1].mask = *((u32 *)rest);
+		rest += 4;
+		atrb->offset_meq_32[1].value = *((u32 *)rest);
+		rest += 4;
+	}
+
+	if (atrb->num_ihl_offset_meq_32 > 0) {
+		atrb->ihl_offset_meq_32[0].offset = *extra++;
+		atrb->ihl_offset_meq_32[0].mask = *((u32 *)rest);
+		rest += 4;
+		atrb->ihl_offset_meq_32[0].value = *((u32 *)rest);
+		rest += 4;
+	}
+	if (atrb->num_ihl_offset_meq_32 > 1) {
+		atrb->ihl_offset_meq_32[1].offset = *extra++;
+		atrb->ihl_offset_meq_32[1].mask = *((u32 *)rest);
+		rest += 4;
+		atrb->ihl_offset_meq_32[1].value = *((u32 *)rest);
+		rest += 4;
+	}
+
+	if (atrb->metadata_meq32_present) {
+		atrb->metadata_meq32.mask = *((u32 *)rest);
+		rest += 4;
+		atrb->metadata_meq32.value = *((u32 *)rest);
+		rest += 4;
+	}
+
+	if (atrb->num_ihl_offset_range_16 > 0) {
+		atrb->ihl_offset_range_16[0].offset = *extra++;
+		atrb->ihl_offset_range_16[0].range_high = *((u16 *)rest);
+		rest += 2;
+		atrb->ihl_offset_range_16[0].range_low = *((u16 *)rest);
+		rest += 2;
+	}
+	if (atrb->num_ihl_offset_range_16 > 1) {
+		atrb->ihl_offset_range_16[1].offset = *extra++;
+		atrb->ihl_offset_range_16[1].range_high = *((u16 *)rest);
+		rest += 2;
+		atrb->ihl_offset_range_16[1].range_low = *((u16 *)rest);
+		rest += 2;
+	}
+
+	if (atrb->ihl_offset_eq_32_present) {
+		atrb->ihl_offset_eq_32.offset = *extra++;
+		atrb->ihl_offset_eq_32.value = *((u32 *)rest);
+		rest += 4;
+	}
+
+	if (atrb->ihl_offset_eq_16_present) {
+		atrb->ihl_offset_eq_16.offset = *extra++;
+		atrb->ihl_offset_eq_16.value = *((u16 *)rest);
+		rest += 4;
+	}
+
+	if (atrb->fl_eq_present) {
+		atrb->fl_eq = *((u32 *)rest);
+		atrb->fl_eq &= 0xfffff;
+		rest += 4;
+	}
+
+	IPAHAL_DBG("before rule alignment rest=0x%p\n", rest);
+	rest = (u8 *)(((unsigned long)rest + IPA3_0_HW_RULE_START_ALIGNMENT) &
+		~IPA3_0_HW_RULE_START_ALIGNMENT);
+	IPAHAL_DBG("after rule alignment  rest=0x%p\n", rest);
+
+	*rule_size = rest - addr;
+	IPAHAL_DBG("rule_size=0x%x\n", *rule_size);
+
+	return 0;
+}
+
+static int ipa_rt_parse_hw_rule(u8 *addr, struct ipahal_rt_rule_entry *rule)
+{
+	struct ipa3_0_rt_rule_hw_hdr *rule_hdr;
+	struct ipa_ipfltri_rule_eq *atrb;
+
+	IPAHAL_DBG("Entry\n");
+
+	rule_hdr = (struct ipa3_0_rt_rule_hw_hdr *)addr;
+	atrb = &rule->eq_attrib;
+
+	IPAHAL_DBG("read hdr 0x%llx\n", rule_hdr->u.word);
+
+	if (rule_hdr->u.word == 0) {
+		/* table termintator - empty table */
+		rule->rule_size = 0;
+		return 0;
+	}
+
+	rule->dst_pipe_idx = rule_hdr->u.hdr.pipe_dest_idx;
+	if (rule_hdr->u.hdr.proc_ctx) {
+		rule->hdr_type = IPAHAL_RT_RULE_HDR_PROC_CTX;
+		rule->hdr_ofst = (rule_hdr->u.hdr.hdr_offset) << 5;
+	} else {
+		rule->hdr_type = IPAHAL_RT_RULE_HDR_RAW;
+		rule->hdr_ofst = (rule_hdr->u.hdr.hdr_offset) << 2;
+	}
+	rule->hdr_lcl = !rule_hdr->u.hdr.system;
+
+	rule->priority = rule_hdr->u.hdr.priority;
+	rule->retain_hdr = rule_hdr->u.hdr.retain_hdr;
+	rule->id = rule_hdr->u.hdr.rule_id;
+
+	atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule;
+	return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr),
+		atrb, &rule->rule_size);
+}
+
+static int ipa_flt_parse_hw_rule(u8 *addr, struct ipahal_flt_rule_entry *rule)
+{
+	struct ipa3_0_flt_rule_hw_hdr *rule_hdr;
+	struct ipa_ipfltri_rule_eq *atrb;
+
+	IPAHAL_DBG("Entry\n");
+
+	rule_hdr = (struct ipa3_0_flt_rule_hw_hdr *)addr;
+	atrb = &rule->rule.eq_attrib;
+
+	if (rule_hdr->u.word == 0) {
+		/* table termintator - empty table */
+		rule->rule_size = 0;
+		return 0;
+	}
+
+	switch (rule_hdr->u.hdr.action) {
+	case 0x0:
+		rule->rule.action = IPA_PASS_TO_ROUTING;
+		break;
+	case 0x1:
+		rule->rule.action = IPA_PASS_TO_SRC_NAT;
+		break;
+	case 0x2:
+		rule->rule.action = IPA_PASS_TO_DST_NAT;
+		break;
+	case 0x3:
+		rule->rule.action = IPA_PASS_TO_EXCEPTION;
+		break;
+	default:
+		IPAHAL_ERR("Invalid Rule Action %d\n", rule_hdr->u.hdr.action);
+		WARN_ON(1);
+		rule->rule.action = rule_hdr->u.hdr.action;
+	}
+
+	rule->rule.rt_tbl_idx = rule_hdr->u.hdr.rt_tbl_idx;
+	rule->rule.retain_hdr = rule_hdr->u.hdr.retain_hdr;
+	rule->priority = rule_hdr->u.hdr.priority;
+	rule->id = rule_hdr->u.hdr.rule_id;
+
+	atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule;
+	rule->rule.eq_attrib_type = 1;
+	return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr),
+		atrb, &rule->rule_size);
+}
+
+/*
+ * ipahal_fltrt_init() - Build the FLT/RT information table
+ *  See ipahal_fltrt_objs[] comments
+ *
+ * Note: As global variables are initialized with zero, any un-overridden
+ *  register entry will be zero. By this we recognize them.
+ */
+int ipahal_fltrt_init(enum ipa_hw_type ipa_hw_type)
+{
+	struct ipahal_fltrt_obj zero_obj;
+	int i;
+	struct ipa_mem_buffer *mem;
+	int rc = -EFAULT;
+
+	IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+	if (ipa_hw_type >= IPA_HW_MAX) {
+		IPAHAL_ERR("Invalid H/W type\n");
+		return -EFAULT;
+	}
+
+	memset(&zero_obj, 0, sizeof(zero_obj));
+	for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+		if (!memcmp(&ipahal_fltrt_objs[i+1], &zero_obj,
+			sizeof(struct ipahal_fltrt_obj))) {
+			memcpy(&ipahal_fltrt_objs[i+1],
+				&ipahal_fltrt_objs[i],
+				sizeof(struct ipahal_fltrt_obj));
+		} else {
+			/*
+			 * explicitly overridden FLT RT info
+			 * Check validity
+			 */
+			if (!ipahal_fltrt_objs[i+1].tbl_width) {
+				IPAHAL_ERR(
+				 "Zero tbl width ipaver=%d\n",
+				 i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].sysaddr_alignment) {
+				IPAHAL_ERR(
+				  "No tbl sysaddr alignment ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].lcladdr_alignment) {
+				IPAHAL_ERR(
+				  "No tbl lcladdr alignment ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].blk_sz_alignment) {
+				IPAHAL_ERR(
+				  "No blk sz alignment ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].rule_start_alignment) {
+				IPAHAL_ERR(
+				  "No rule start alignment ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].tbl_hdr_width) {
+				IPAHAL_ERR(
+				 "Zero tbl hdr width ipaver=%d\n",
+				 i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].tbl_addr_mask) {
+				IPAHAL_ERR(
+				 "Zero tbl hdr width ipaver=%d\n",
+				 i+1);
+				WARN_ON(1);
+			}
+			if (ipahal_fltrt_objs[i+1].rule_id_bit_len < 2) {
+				IPAHAL_ERR(
+				 "Too little bits for rule_id ipaver=%d\n",
+				 i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].rule_buf_size) {
+				IPAHAL_ERR(
+				 "zero rule buf size ipaver=%d\n",
+				 i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].write_val_to_hdr) {
+				IPAHAL_ERR(
+				  "No write_val_to_hdr CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].create_flt_bitmap) {
+				IPAHAL_ERR(
+				  "No create_flt_bitmap CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].create_tbl_addr) {
+				IPAHAL_ERR(
+				  "No create_tbl_addr CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].parse_tbl_addr) {
+				IPAHAL_ERR(
+				  "No parse_tbl_addr CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].rt_generate_hw_rule) {
+				IPAHAL_ERR(
+				  "No rt_generate_hw_rule CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].flt_generate_hw_rule) {
+				IPAHAL_ERR(
+				  "No flt_generate_hw_rule CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].flt_generate_eq) {
+				IPAHAL_ERR(
+				  "No flt_generate_eq CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].rt_parse_hw_rule) {
+				IPAHAL_ERR(
+				  "No rt_parse_hw_rule CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].flt_parse_hw_rule) {
+				IPAHAL_ERR(
+				  "No flt_parse_hw_rule CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+		}
+	}
+
+	mem = &ipahal_ctx->empty_fltrt_tbl;
+
+	/* setup an empty  table in system memory; This will
+	 * be used, for example, to delete a rt tbl safely
+	 */
+	mem->size = ipahal_fltrt_objs[ipa_hw_type].tbl_width;
+	mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
+		&mem->phys_base, GFP_KERNEL);
+	if (!mem->base) {
+		IPAHAL_ERR("DMA buff alloc fail %d bytes for empty tbl\n",
+			mem->size);
+		return -ENOMEM;
+	}
+
+	if (mem->phys_base &
+		ipahal_fltrt_objs[ipa_hw_type].sysaddr_alignment) {
+		IPAHAL_ERR("Empty table buf is not address aligned 0x%pad\n",
+			&mem->phys_base);
+		rc = -EFAULT;
+		goto clear_empty_tbl;
+	}
+
+	memset(mem->base, 0, mem->size);
+	IPAHAL_DBG("empty table allocated in system memory");
+
+	return 0;
+
+clear_empty_tbl:
+	dma_free_coherent(ipahal_ctx->ipa_pdev, mem->size, mem->base,
+		mem->phys_base);
+	return rc;
+}
+
+void ipahal_fltrt_destroy(void)
+{
+	IPAHAL_DBG("Entry\n");
+
+	if (ipahal_ctx && ipahal_ctx->empty_fltrt_tbl.base)
+		dma_free_coherent(ipahal_ctx->ipa_pdev,
+			ipahal_ctx->empty_fltrt_tbl.size,
+			ipahal_ctx->empty_fltrt_tbl.base,
+			ipahal_ctx->empty_fltrt_tbl.phys_base);
+}
+
+/* Get the H/W table (flt/rt) header width */
+u32 ipahal_get_hw_tbl_hdr_width(void)
+{
+	return ipahal_fltrt_objs[ipahal_ctx->hw_type].tbl_hdr_width;
+}
+
+/* Get the H/W local table (SRAM) address alignment
+ * Tables headers references to local tables via offsets in SRAM
+ * This function return the alignment of the offset that IPA expects
+ */
+u32 ipahal_get_lcl_tbl_addr_alignment(void)
+{
+	return ipahal_fltrt_objs[ipahal_ctx->hw_type].lcladdr_alignment;
+}
+
+/*
+ * Rule priority is used to distinguish rules order
+ * at the integrated table consisting from hashable and
+ * non-hashable tables. Max priority are rules that once are
+ * scanned by IPA, IPA will not look for further rules and use it.
+ */
+int ipahal_get_rule_max_priority(void)
+{
+	return ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_max_prio;
+}
+
+/* Given a priority, calc and return the next lower one if it is in
+ * legal range.
+ */
+int ipahal_rule_decrease_priority(int *prio)
+{
+	struct ipahal_fltrt_obj *obj;
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (!prio) {
+		IPAHAL_ERR("Invalid Input\n");
+		return -EINVAL;
+	}
+
+	/* Priority logic is reverse. 0 priority considred max priority */
+	if (*prio > obj->rule_min_prio || *prio < obj->rule_max_prio) {
+		IPAHAL_ERR("Invalid given priority %d\n", *prio);
+		return -EINVAL;
+	}
+
+	*prio += 1;
+
+	if (*prio > obj->rule_min_prio) {
+		IPAHAL_ERR("Cannot decrease priority. Already on min\n");
+		*prio -= 1;
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/* Does the given ID represents rule miss?
+ * Rule miss ID, is always the max ID possible in the bit-pattern
+ */
+bool ipahal_is_rule_miss_id(u32 id)
+{
+	return (id ==
+		((1U << ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_id_bit_len)
+		-1));
+}
+
+/* Get rule ID with high bit only asserted
+ * Used e.g. to create groups of IDs according to this bit
+ */
+u32 ipahal_get_rule_id_hi_bit(void)
+{
+	return BIT(ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_id_bit_len - 1);
+}
+
+/* Get the low value possible to be used for rule-id */
+u32 ipahal_get_low_rule_id(void)
+{
+	return  ipahal_fltrt_objs[ipahal_ctx->hw_type].low_rule_id;
+}
+
+/*
+ * ipahal_rt_generate_empty_img() - Generate empty route image
+ *  Creates routing header buffer for the given tables number.
+ *  For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ */
+int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+	u32 nhash_hdr_size, struct ipa_mem_buffer *mem)
+{
+	int i;
+	u64 addr;
+	struct ipahal_fltrt_obj *obj;
+
+	IPAHAL_DBG("Entry\n");
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (!tbls_num || !nhash_hdr_size || !mem) {
+		IPAHAL_ERR("Input Error: tbls_num=%d nhash_hdr_sz=%d mem=%p\n",
+			tbls_num, nhash_hdr_size, mem);
+		return -EINVAL;
+	}
+	if (obj->support_hash && !hash_hdr_size) {
+		IPAHAL_ERR("Input Error: hash_hdr_sz=%d\n", hash_hdr_size);
+		return -EINVAL;
+	}
+
+	if (nhash_hdr_size < (tbls_num * obj->tbl_hdr_width)) {
+		IPAHAL_ERR("No enough spc at non-hash hdr blk for all tbls\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
+	if (obj->support_hash &&
+		(hash_hdr_size < (tbls_num * obj->tbl_hdr_width))) {
+		IPAHAL_ERR("No enough spc at hash hdr blk for all tbls\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	mem->size = tbls_num * obj->tbl_hdr_width;
+	mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
+		&mem->phys_base, GFP_KERNEL);
+	if (!mem->base) {
+		IPAHAL_ERR("fail to alloc DMA buff of size %d\n", mem->size);
+		return -ENOMEM;
+	}
+
+	addr = obj->create_tbl_addr(true,
+		ipahal_ctx->empty_fltrt_tbl.phys_base);
+	for (i = 0; i < tbls_num; i++)
+		obj->write_val_to_hdr(addr,
+			mem->base + i * obj->tbl_hdr_width);
+
+	return 0;
+}
+
+/*
+ * ipahal_flt_generate_empty_img() - Generate empty filter image
+ *  Creates filter header buffer for the given tables number.
+ *  For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @ep_bitmap: Bitmap representing the EP that has flt tables. The format
+ *  should be: bit0->EP0, bit1->EP1
+ *  If bitmap is zero -> create tbl without bitmap entry
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ */
+int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+	u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem)
+{
+	int flt_spc;
+	u64 flt_bitmap;
+	int i;
+	u64 addr;
+	struct ipahal_fltrt_obj *obj;
+
+	IPAHAL_DBG("Entry - ep_bitmap 0x%llx\n", ep_bitmap);
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (!tbls_num || !nhash_hdr_size || !mem) {
+		IPAHAL_ERR("Input Error: tbls_num=%d nhash_hdr_sz=%d mem=%p\n",
+			tbls_num, nhash_hdr_size, mem);
+		return -EINVAL;
+	}
+	if (obj->support_hash && !hash_hdr_size) {
+		IPAHAL_ERR("Input Error: hash_hdr_sz=%d\n", hash_hdr_size);
+		return -EINVAL;
+	}
+
+	if (obj->support_hash) {
+		flt_spc = hash_hdr_size;
+		/* bitmap word */
+		if (ep_bitmap)
+			flt_spc -= obj->tbl_hdr_width;
+		flt_spc /= obj->tbl_hdr_width;
+		if (tbls_num > flt_spc)  {
+			IPAHAL_ERR("space for hash flt hdr is too small\n");
+			WARN_ON(1);
+			return -EPERM;
+		}
+	}
+
+	flt_spc = nhash_hdr_size;
+	/* bitmap word */
+	if (ep_bitmap)
+		flt_spc -= obj->tbl_hdr_width;
+	flt_spc /= obj->tbl_hdr_width;
+	if (tbls_num > flt_spc)  {
+		IPAHAL_ERR("space for non-hash flt hdr is too small\n");
+		WARN_ON(1);
+		return -EPERM;
+	}
+
+	mem->size = tbls_num * obj->tbl_hdr_width;
+	if (ep_bitmap)
+		mem->size += obj->tbl_hdr_width;
+	mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
+		&mem->phys_base, GFP_KERNEL);
+	if (!mem->base) {
+		IPAHAL_ERR("fail to alloc DMA buff of size %d\n", mem->size);
+		return -ENOMEM;
+	}
+
+	if (ep_bitmap) {
+		flt_bitmap = obj->create_flt_bitmap(ep_bitmap);
+		IPAHAL_DBG("flt bitmap 0x%llx\n", flt_bitmap);
+		obj->write_val_to_hdr(flt_bitmap, mem->base);
+	}
+
+	addr = obj->create_tbl_addr(true,
+		ipahal_ctx->empty_fltrt_tbl.phys_base);
+
+	if (ep_bitmap) {
+		for (i = 1; i <= tbls_num; i++)
+			obj->write_val_to_hdr(addr,
+				mem->base + i * obj->tbl_hdr_width);
+	} else {
+		for (i = 0; i < tbls_num; i++)
+			obj->write_val_to_hdr(addr,
+				mem->base + i * obj->tbl_hdr_width);
+	}
+
+	return 0;
+}
+
+/*
+ * ipa_fltrt_alloc_init_tbl_hdr() - allocate and initialize buffers for
+ *  flt/rt tables headers to be filled into sram. Init each table to point
+ *  to empty system table
+ * @params: Allocate IN and OUT params
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_fltrt_alloc_init_tbl_hdr(
+	struct ipahal_fltrt_alloc_imgs_params *params)
+{
+	u64 addr;
+	int i;
+	struct ipahal_fltrt_obj *obj;
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (!params) {
+		IPAHAL_ERR("Input error: params=%p\n", params);
+		return -EINVAL;
+	}
+
+	params->nhash_hdr.size = params->tbls_num * obj->tbl_hdr_width;
+	params->nhash_hdr.base = dma_alloc_coherent(ipahal_ctx->ipa_pdev,
+		params->nhash_hdr.size,
+		&params->nhash_hdr.phys_base, GFP_KERNEL);
+	if (!params->nhash_hdr.size) {
+		IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+			params->nhash_hdr.size);
+		goto nhash_alloc_fail;
+	}
+
+	if (obj->support_hash) {
+		params->hash_hdr.size = params->tbls_num * obj->tbl_hdr_width;
+		params->hash_hdr.base = dma_alloc_coherent(ipahal_ctx->ipa_pdev,
+			params->hash_hdr.size, &params->hash_hdr.phys_base,
+			GFP_KERNEL);
+		if (!params->hash_hdr.base) {
+			IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+				params->hash_hdr.size);
+			goto hash_alloc_fail;
+		}
+	}
+
+	addr = obj->create_tbl_addr(true,
+		ipahal_ctx->empty_fltrt_tbl.phys_base);
+	for (i = 0; i < params->tbls_num; i++) {
+		obj->write_val_to_hdr(addr,
+			params->nhash_hdr.base + i * obj->tbl_hdr_width);
+		if (obj->support_hash)
+			obj->write_val_to_hdr(addr,
+				params->hash_hdr.base +
+				i * obj->tbl_hdr_width);
+	}
+
+	return 0;
+
+hash_alloc_fail:
+	ipahal_free_dma_mem(&params->nhash_hdr);
+nhash_alloc_fail:
+	return -ENOMEM;
+}
+
+/*
+ * ipa_fltrt_alloc_lcl_bdy() - allocate and initialize buffers for
+ *  local flt/rt tables bodies to be filled into sram
+ * @params: Allocate IN and OUT params
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_fltrt_alloc_lcl_bdy(
+	struct ipahal_fltrt_alloc_imgs_params *params)
+{
+	struct ipahal_fltrt_obj *obj;
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	/* The HAL allocates larger sizes than the given effective ones
+	 * for alignments and border indications
+	 */
+	IPAHAL_DBG("lcl tbl bdy total effective sizes: hash=%u nhash=%u\n",
+		params->total_sz_lcl_hash_tbls,
+		params->total_sz_lcl_nhash_tbls);
+
+	IPAHAL_DBG("lcl tbl bdy count: hash=%u nhash=%u\n",
+		params->num_lcl_hash_tbls,
+		params->num_lcl_nhash_tbls);
+
+	/* Align the sizes to coop with termination word
+	 *  and H/W local table start offset alignment
+	 */
+	if (params->nhash_bdy.size) {
+		params->nhash_bdy.size = params->total_sz_lcl_nhash_tbls;
+		/* for table terminator */
+		params->nhash_bdy.size += obj->tbl_width *
+			params->num_lcl_nhash_tbls;
+		/* align the start of local rule-set */
+		params->nhash_bdy.size += obj->lcladdr_alignment *
+			params->num_lcl_nhash_tbls;
+		/* SRAM block size alignment */
+		params->nhash_bdy.size += obj->blk_sz_alignment;
+		params->nhash_bdy.size &= ~(obj->blk_sz_alignment);
+
+		IPAHAL_DBG("nhash lcl tbl bdy total h/w size = %u\n",
+			params->nhash_bdy.size);
+
+		params->nhash_bdy.base = dma_alloc_coherent(
+			ipahal_ctx->ipa_pdev, params->nhash_bdy.size,
+			&params->nhash_bdy.phys_base, GFP_KERNEL);
+		if (!params->nhash_bdy.base) {
+			IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+				params->nhash_bdy.size);
+			return -ENOMEM;
+		}
+		memset(params->nhash_bdy.base, 0, params->nhash_bdy.size);
+	}
+
+	if (!obj->support_hash && params->hash_bdy.size) {
+		IPAHAL_ERR("No HAL Hash tbls support - Will be ignored\n");
+		WARN_ON(1);
+	}
+
+	if (obj->support_hash && params->hash_bdy.size) {
+		params->hash_bdy.size = params->total_sz_lcl_hash_tbls;
+		/* for table terminator */
+		params->hash_bdy.size += obj->tbl_width *
+			params->num_lcl_hash_tbls;
+		/* align the start of local rule-set */
+		params->hash_bdy.size += obj->lcladdr_alignment *
+			params->num_lcl_hash_tbls;
+		/* SRAM block size alignment */
+		params->hash_bdy.size += obj->blk_sz_alignment;
+		params->hash_bdy.size &= ~(obj->blk_sz_alignment);
+
+		IPAHAL_DBG("hash lcl tbl bdy total h/w size = %u\n",
+			params->hash_bdy.size);
+
+		params->hash_bdy.base = dma_alloc_coherent(
+			ipahal_ctx->ipa_pdev, params->hash_bdy.size,
+			&params->hash_bdy.phys_base, GFP_KERNEL);
+		if (!params->hash_bdy.base) {
+			IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+				params->hash_bdy.size);
+			goto hash_bdy_fail;
+		}
+		memset(params->hash_bdy.base, 0, params->hash_bdy.size);
+	}
+
+	return 0;
+
+hash_bdy_fail:
+	if (params->nhash_bdy.size)
+		ipahal_free_dma_mem(&params->nhash_bdy);
+
+	return -ENOMEM;
+}
+
+/*
+ * ipahal_fltrt_allocate_hw_tbl_imgs() - Allocate tbl images DMA structures
+ *  Used usually during commit.
+ *  Allocates header structures and init them to point to empty DDR table
+ *  Allocate body strucutres for local bodies tables
+ * @params: Parameters for IN and OUT regard the allocation.
+ */
+int ipahal_fltrt_allocate_hw_tbl_imgs(
+	struct ipahal_fltrt_alloc_imgs_params *params)
+{
+	IPAHAL_DBG("Entry\n");
+
+	/* Input validation */
+	if (!params) {
+		IPAHAL_ERR("Input err: no params\n");
+		return -EINVAL;
+	}
+	if (params->ipt >= IPA_IP_MAX) {
+		IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt);
+		return -EINVAL;
+	}
+
+	if (ipa_fltrt_alloc_init_tbl_hdr(params)) {
+		IPAHAL_ERR("fail to alloc and init tbl hdr\n");
+		return -ENOMEM;
+	}
+
+	if (ipa_fltrt_alloc_lcl_bdy(params)) {
+		IPAHAL_ERR("fail to alloc tbl bodies\n");
+		goto bdy_alloc_fail;
+	}
+
+	return 0;
+
+bdy_alloc_fail:
+	ipahal_free_dma_mem(&params->nhash_hdr);
+	if (params->hash_hdr.size)
+		ipahal_free_dma_mem(&params->hash_hdr);
+	return -ENOMEM;
+}
+
+/*
+ * ipahal_fltrt_allocate_hw_sys_tbl() - Allocate DMA mem for H/W flt/rt sys tbl
+ * @tbl_mem: IN/OUT param. size for effective table size. Pointer, for the
+ *  allocated memory.
+ *
+ * The size is adapted for needed alignments/borders.
+ */
+int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem)
+{
+	struct ipahal_fltrt_obj *obj;
+
+	IPAHAL_DBG("Entry\n");
+
+	if (!tbl_mem) {
+		IPAHAL_ERR("Input err\n");
+		return -EINVAL;
+	}
+
+	if (!tbl_mem->size) {
+		IPAHAL_ERR("Input err: zero table size\n");
+		return -EINVAL;
+	}
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	/* add word for rule-set terminator */
+	tbl_mem->size += obj->tbl_width;
+
+	tbl_mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, tbl_mem->size,
+		&tbl_mem->phys_base, GFP_KERNEL);
+	if (!tbl_mem->base) {
+		IPAHAL_ERR("fail to alloc DMA buf of size %d\n",
+			tbl_mem->size);
+		return -ENOMEM;
+	}
+	if (tbl_mem->phys_base & obj->sysaddr_alignment) {
+		IPAHAL_ERR("sys rt tbl address is not aligned\n");
+		goto align_err;
+	}
+
+	memset(tbl_mem->base, 0, tbl_mem->size);
+
+	return 0;
+
+align_err:
+	ipahal_free_dma_mem(tbl_mem);
+	return -EPERM;
+}
+
+/*
+ * ipahal_fltrt_write_addr_to_hdr() - Fill table header with table address
+ *  Given table addr/offset, adapt it to IPA H/W format and write it
+ *  to given header index.
+ * @addr: Address or offset to be used
+ * @hdr_base: base address of header structure to write the address
+ * @hdr_idx: index of the address in the header structure
+ * @is_sys: Is it system address or local offset
+ */
+int ipahal_fltrt_write_addr_to_hdr(u64 addr, void *hdr_base, u32 hdr_idx,
+	bool is_sys)
+{
+	struct ipahal_fltrt_obj *obj;
+	u64 hwaddr;
+	u8 *hdr;
+
+	IPAHAL_DBG("Entry\n");
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (!addr || !hdr_base) {
+		IPAHAL_ERR("Input err: addr=0x%llx hdr_base=%p\n",
+			addr, hdr_base);
+		return -EINVAL;
+	}
+
+	hdr = (u8 *)hdr_base;
+	hdr += hdr_idx * obj->tbl_hdr_width;
+	hwaddr = obj->create_tbl_addr(is_sys, addr);
+	obj->write_val_to_hdr(hwaddr, hdr);
+
+	return 0;
+}
+
+/*
+ * ipahal_fltrt_read_addr_from_hdr() - Given sram address, read it's
+ *  content (physical address or offset) and parse it.
+ * @hdr_base: base sram address of the header structure.
+ * @hdr_idx: index of the header entry line in the header structure.
+ * @addr: The parsed address - Out parameter
+ * @is_sys: Is this system or local address - Out parameter
+ */
+int ipahal_fltrt_read_addr_from_hdr(void *hdr_base, u32 hdr_idx, u64 *addr,
+	bool *is_sys)
+{
+	struct ipahal_fltrt_obj *obj;
+	u64 hwaddr;
+	u8 *hdr;
+
+	IPAHAL_DBG("Entry\n");
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (!addr || !hdr_base || !is_sys) {
+		IPAHAL_ERR("Input err: addr=%p hdr_base=%p is_sys=%p\n",
+			addr, hdr_base, is_sys);
+		return -EINVAL;
+	}
+
+	hdr = (u8 *)hdr_base;
+	hdr += hdr_idx * obj->tbl_hdr_width;
+	hwaddr = *((u64 *)hdr);
+	obj->parse_tbl_addr(hwaddr, addr, is_sys);
+	return 0;
+}
+
+/*
+ * ipahal_rt_generate_hw_rule() - generates the routing hardware rule
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ *  be built in internal temp buf. This is used e.g. to get the rule size
+ *  only.
+ */
+int ipahal_rt_generate_hw_rule(struct ipahal_rt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf)
+{
+	struct ipahal_fltrt_obj *obj;
+	u8 *tmp = NULL;
+	int rc;
+
+	IPAHAL_DBG("Entry\n");
+
+	if (!params || !hw_len) {
+		IPAHAL_ERR("Input err: params=%p hw_len=%p\n", params, hw_len);
+		return -EINVAL;
+	}
+	if (!params->rule) {
+		IPAHAL_ERR("Input err: invalid rule\n");
+		return -EINVAL;
+	}
+	if (params->ipt >= IPA_IP_MAX) {
+		IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt);
+		return -EINVAL;
+	}
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (buf == NULL) {
+		tmp = kzalloc(obj->rule_buf_size, GFP_KERNEL);
+		if (!tmp) {
+			IPAHAL_ERR("failed to alloc %u bytes\n",
+				obj->rule_buf_size);
+			return -ENOMEM;
+		}
+		buf = tmp;
+	} else
+		if ((long)buf & obj->rule_start_alignment) {
+			IPAHAL_ERR("buff is not rule rule start aligned\n");
+			return -EPERM;
+		}
+
+	rc = ipahal_fltrt_objs[ipahal_ctx->hw_type].rt_generate_hw_rule(
+		params, hw_len, buf);
+	if (!tmp && !rc) {
+		/* write the rule-set terminator */
+		memset(buf + *hw_len, 0, obj->tbl_width);
+	}
+
+	kfree(tmp);
+
+	return rc;
+}
+
+/*
+ * ipahal_flt_generate_hw_rule() - generates the filtering hardware rule.
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ *  be built in internal temp buf. This is used e.g. to get the rule size
+ *  only.
+ */
+int ipahal_flt_generate_hw_rule(struct ipahal_flt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf)
+{
+	struct ipahal_fltrt_obj *obj;
+	u8 *tmp = NULL;
+	int rc;
+
+	IPAHAL_DBG("Entry\n");
+
+	if (!params || !hw_len) {
+		IPAHAL_ERR("Input err: params=%p hw_len=%p\n", params, hw_len);
+		return -EINVAL;
+	}
+	if (!params->rule) {
+		IPAHAL_ERR("Input err: invalid rule\n");
+		return -EINVAL;
+	}
+	if (params->ipt >= IPA_IP_MAX) {
+		IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt);
+		return -EINVAL;
+	}
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (buf == NULL) {
+		tmp = kzalloc(obj->rule_buf_size, GFP_KERNEL);
+		if (!tmp) {
+			IPAHAL_ERR("failed to alloc %u bytes\n",
+				obj->rule_buf_size);
+			return -ENOMEM;
+		}
+		buf = tmp;
+	} else
+		if ((long)buf & obj->rule_start_alignment) {
+			IPAHAL_ERR("buff is not rule rule start aligned\n");
+			return -EPERM;
+		}
+
+	rc = ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_generate_hw_rule(
+		params, hw_len, buf);
+	if (!tmp && !rc) {
+		/* write the rule-set terminator */
+		memset(buf + *hw_len, 0, obj->tbl_width);
+	}
+
+	kfree(tmp);
+
+	return rc;
+
+}
+
+/*
+ * ipahal_flt_generate_equation() - generate flt rule in equation form
+ *  Will build equation form flt rule from given info.
+ * @ipt: IP family
+ * @attrib: Rule attribute to be generated
+ * @eq_atrb: Equation form generated rule
+ * Note: Usage example: Pass the generated form to other sub-systems
+ *  for inter-subsystems rules exchange.
+ */
+int ipahal_flt_generate_equation(enum ipa_ip_type ipt,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+	IPAHAL_DBG("Entry\n");
+
+	if (ipt >= IPA_IP_MAX) {
+		IPAHAL_ERR("Input err: Invalid ip type %d\n", ipt);
+		return -EINVAL;
+	}
+
+	if (!attrib || !eq_atrb) {
+		IPAHAL_ERR("Input err: attrib=%p eq_atrb=%p\n",
+			attrib, eq_atrb);
+		return -EINVAL;
+	}
+
+	return ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_generate_eq(ipt,
+		attrib, eq_atrb);
+
+}
+
+/*
+ * ipahal_rt_parse_hw_rule() - Parse H/W formated rt rule
+ *  Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_rt_parse_hw_rule(u8 *rule_addr,
+	struct ipahal_rt_rule_entry *rule)
+{
+	IPAHAL_DBG("Entry\n");
+
+	if (!rule_addr || !rule) {
+		IPAHAL_ERR("Input err: rule_addr=%p rule=%p\n",
+			rule_addr, rule);
+		return -EINVAL;
+	}
+
+	return ipahal_fltrt_objs[ipahal_ctx->hw_type].rt_parse_hw_rule(
+		rule_addr, rule);
+}
+
+/*
+ * ipahal_flt_parse_hw_rule() - Parse H/W formated flt rule
+ *  Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_flt_parse_hw_rule(u8 *rule_addr,
+	struct ipahal_flt_rule_entry *rule)
+{
+	IPAHAL_DBG("Entry\n");
+
+	if (!rule_addr || !rule) {
+		IPAHAL_ERR("Input err: rule_addr=%p rule=%p\n",
+			rule_addr, rule);
+		return -EINVAL;
+	}
+
+	return ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_parse_hw_rule(
+		rule_addr, rule);
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h
new file mode 100644
index 0000000..ee2704d6
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h
@@ -0,0 +1,288 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_FLTRT_H_
+#define _IPAHAL_FLTRT_H_
+
+/*
+ * struct ipahal_fltrt_alloc_imgs_params - Params for tbls imgs allocations
+ *  The allocation logic will allocate DMA memory representing the header.
+ *  If the bodies are local (SRAM) the allocation will allocate
+ *  a DMA buffers that would contain the content of these local tables in raw
+ * @ipt: IP version type
+ * @tbls_num: Number of tables to represent by the header
+ * @num_lcl_hash_tbls: Number of local (sram) hashable tables
+ * @num_lcl_nhash_tbls: Number of local (sram) non-hashable tables
+ * @total_sz_lcl_hash_tbls: Total size of local hashable tables
+ * @total_sz_lcl_nhash_tbls: Total size of local non-hashable tables
+ * @hash_hdr/nhash_hdr: OUT params for the header structures
+ * @hash_bdy/nhash_bdy: OUT params for the local body structures
+ */
+struct ipahal_fltrt_alloc_imgs_params {
+	enum ipa_ip_type ipt;
+	u32 tbls_num;
+	u32 num_lcl_hash_tbls;
+	u32 num_lcl_nhash_tbls;
+	u32 total_sz_lcl_hash_tbls;
+	u32 total_sz_lcl_nhash_tbls;
+
+	/* OUT PARAMS */
+	struct ipa_mem_buffer hash_hdr;
+	struct ipa_mem_buffer nhash_hdr;
+	struct ipa_mem_buffer hash_bdy;
+	struct ipa_mem_buffer nhash_bdy;
+};
+
+/*
+ * enum ipahal_rt_rule_hdr_type - Header type used in rt rules
+ * @IPAHAL_RT_RULE_HDR_NONE: No header is used
+ * @IPAHAL_RT_RULE_HDR_RAW: Raw header is used
+ * @IPAHAL_RT_RULE_HDR_PROC_CTX: Header Processing context is used
+ */
+enum ipahal_rt_rule_hdr_type {
+	IPAHAL_RT_RULE_HDR_NONE,
+	IPAHAL_RT_RULE_HDR_RAW,
+	IPAHAL_RT_RULE_HDR_PROC_CTX,
+};
+
+/*
+ * struct ipahal_rt_rule_gen_params - Params for generating rt rule
+ * @ipt: IP family version
+ * @dst_pipe_idx: Destination pipe index
+ * @hdr_type: Header type to be used
+ * @hdr_lcl: Does header on local or system table?
+ * @hdr_ofst: Offset of the header in the header table
+ * @priority: Rule priority
+ * @id: Rule ID
+ * @rule: Rule info
+ */
+struct ipahal_rt_rule_gen_params {
+	enum ipa_ip_type ipt;
+	int dst_pipe_idx;
+	enum ipahal_rt_rule_hdr_type hdr_type;
+	bool hdr_lcl;
+	u32 hdr_ofst;
+	u32 priority;
+	u32 id;
+	const struct ipa_rt_rule *rule;
+};
+
+/*
+ * struct ipahal_rt_rule_entry - Rt rule info parsed from H/W
+ * @dst_pipe_idx: Destination pipe index
+ * @hdr_lcl: Does the references header located in sram or system mem?
+ * @hdr_ofst: Offset of the header in the header table
+ * @hdr_type: Header type to be used
+ * @priority: Rule priority
+ * @retain_hdr: to retain the removed header in header removal
+ * @id: Rule ID
+ * @eq_attrib: Equations and their params in the rule
+ * @rule_size: Rule size in memory
+ */
+struct ipahal_rt_rule_entry {
+	int dst_pipe_idx;
+	bool hdr_lcl;
+	u32 hdr_ofst;
+	enum ipahal_rt_rule_hdr_type hdr_type;
+	u32 priority;
+	bool retain_hdr;
+	u32 id;
+	struct ipa_ipfltri_rule_eq eq_attrib;
+	u32 rule_size;
+};
+
+/*
+ * struct ipahal_flt_rule_gen_params - Params for generating flt rule
+ * @ipt: IP family version
+ * @rt_tbl_idx: Routing table the rule pointing to
+ * @priority: Rule priority
+ * @id: Rule ID
+ * @rule: Rule info
+ */
+struct ipahal_flt_rule_gen_params {
+	enum ipa_ip_type ipt;
+	u32 rt_tbl_idx;
+	u32 priority;
+	u32 id;
+	const struct ipa_flt_rule *rule;
+};
+
+/*
+ * struct ipahal_flt_rule_entry - Flt rule info parsed from H/W
+ * @rule: Rule info
+ * @priority: Rule priority
+ * @id: Rule ID
+ * @rule_size: Rule size in memory
+ */
+struct ipahal_flt_rule_entry {
+	struct ipa_flt_rule rule;
+	u32 priority;
+	u32 id;
+	u32 rule_size;
+};
+
+/* Get the H/W table (flt/rt) header width */
+u32 ipahal_get_hw_tbl_hdr_width(void);
+
+/* Get the H/W local table (SRAM) address alignment
+ * Tables headers references to local tables via offsets in SRAM
+ * This function return the alignment of the offset that IPA expects
+ */
+u32 ipahal_get_lcl_tbl_addr_alignment(void);
+
+/*
+ * Rule priority is used to distinguish rules order
+ * at the integrated table consisting from hashable and
+ * non-hashable tables. Max priority are rules that once are
+ * scanned by IPA, IPA will not look for further rules and use it.
+ */
+int ipahal_get_rule_max_priority(void);
+
+/* Given a priority, calc and return the next lower one if it is in
+ * legal range.
+ */
+int ipahal_rule_decrease_priority(int *prio);
+
+/* Does the given ID represents rule miss? */
+bool ipahal_is_rule_miss_id(u32 id);
+
+/* Get rule ID with high bit only asserted
+ * Used e.g. to create groups of IDs according to this bit
+ */
+u32 ipahal_get_rule_id_hi_bit(void);
+
+/* Get the low value possible to be used for rule-id */
+u32 ipahal_get_low_rule_id(void);
+
+/*
+ * ipahal_rt_generate_empty_img() - Generate empty route image
+ *  Creates routing header buffer for the given tables number.
+ * For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ */
+int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+	u32 nhash_hdr_size, struct ipa_mem_buffer *mem);
+
+/*
+ * ipahal_flt_generate_empty_img() - Generate empty filter image
+ *  Creates filter header buffer for the given tables number.
+ *  For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @ep_bitmap: Bitmap representing the EP that has flt tables. The format
+ *  should be: bit0->EP0, bit1->EP1
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ */
+int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+	u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem);
+
+/*
+ * ipahal_fltrt_allocate_hw_tbl_imgs() - Allocate tbl images DMA structures
+ *  Used usually during commit.
+ *  Allocates header structures and init them to point to empty DDR table
+ *  Allocate body strucutres for local bodies tables
+ * @params: Parameters for IN and OUT regard the allocation.
+ */
+int ipahal_fltrt_allocate_hw_tbl_imgs(
+	struct ipahal_fltrt_alloc_imgs_params *params);
+
+/*
+ * ipahal_fltrt_allocate_hw_sys_tbl() - Allocate DMA mem for H/W flt/rt sys tbl
+ * @tbl_mem: IN/OUT param. size for effective table size. Pointer, for the
+ *  allocated memory.
+ *
+ * The size is adapted for needed alignments/borders.
+ */
+int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem);
+
+/*
+ * ipahal_fltrt_write_addr_to_hdr() - Fill table header with table address
+ *  Given table addr/offset, adapt it to IPA H/W format and write it
+ *  to given header index.
+ * @addr: Address or offset to be used
+ * @hdr_base: base address of header structure to write the address
+ * @hdr_idx: index of the address in the header structure
+ * @is_sys: Is it system address or local offset
+ */
+int ipahal_fltrt_write_addr_to_hdr(u64 addr, void *hdr_base, u32 hdr_idx,
+	bool is_sys);
+
+/*
+ * ipahal_fltrt_read_addr_from_hdr() - Given sram address, read it's
+ *  content (physical address or offset) and parse it.
+ * @hdr_base: base sram address of the header structure.
+ * @hdr_idx: index of the header entry line in the header structure.
+ * @addr: The parsed address - Out parameter
+ * @is_sys: Is this system or local address - Out parameter
+ */
+int ipahal_fltrt_read_addr_from_hdr(void *hdr_base, u32 hdr_idx, u64 *addr,
+	bool *is_sys);
+
+/*
+ * ipahal_rt_generate_hw_rule() - generates the routing hardware rule.
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ *  be built in internal temp buf. This is used e.g. to get the rule size
+ *  only.
+ */
+int ipahal_rt_generate_hw_rule(struct ipahal_rt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf);
+
+/*
+ * ipahal_flt_generate_hw_rule() - generates the filtering hardware rule.
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ *  be built in internal temp buf. This is used e.g. to get the rule size
+ *  only.
+ */
+int ipahal_flt_generate_hw_rule(struct ipahal_flt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf);
+
+/*
+ * ipahal_flt_generate_equation() - generate flt rule in equation form
+ *  Will build equation form flt rule from given info.
+ * @ipt: IP family
+ * @attrib: Rule attribute to be generated
+ * @eq_atrb: Equation form generated rule
+ * Note: Usage example: Pass the generated form to other sub-systems
+ *  for inter-subsystems rules exchange.
+ */
+int ipahal_flt_generate_equation(enum ipa_ip_type ipt,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb);
+
+/*
+ * ipahal_rt_parse_hw_rule() - Parse H/W formated rt rule
+ *  Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_rt_parse_hw_rule(u8 *rule_addr,
+	struct ipahal_rt_rule_entry *rule);
+
+/*
+ * ipahal_flt_parse_hw_rule() - Parse H/W formated flt rule
+ *  Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_flt_parse_hw_rule(u8 *rule_addr,
+	struct ipahal_flt_rule_entry *rule);
+
+
+#endif /* _IPAHAL_FLTRT_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h
new file mode 100644
index 0000000..0c0637d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h
@@ -0,0 +1,143 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_FLTRT_I_H_
+#define _IPAHAL_FLTRT_I_H_
+
+/*
+ * enum ipa_fltrt_equations - RULE equations
+ *  These are names values to the equations that can be used
+ *  The HAL layer holds mapping between these names and H/W
+ *  presentation.
+ */
+enum ipa_fltrt_equations {
+	IPA_TOS_EQ,
+	IPA_PROTOCOL_EQ,
+	IPA_TC_EQ,
+	IPA_OFFSET_MEQ128_0,
+	IPA_OFFSET_MEQ128_1,
+	IPA_OFFSET_MEQ32_0,
+	IPA_OFFSET_MEQ32_1,
+	IPA_IHL_OFFSET_MEQ32_0,
+	IPA_IHL_OFFSET_MEQ32_1,
+	IPA_METADATA_COMPARE,
+	IPA_IHL_OFFSET_RANGE16_0,
+	IPA_IHL_OFFSET_RANGE16_1,
+	IPA_IHL_OFFSET_EQ_32,
+	IPA_IHL_OFFSET_EQ_16,
+	IPA_FL_EQ,
+	IPA_IS_FRAG,
+	IPA_EQ_MAX,
+};
+
+/* Width and Alignment values for H/W structures.
+ * Specific for IPA version.
+ */
+#define IPA3_0_HW_TBL_SYSADDR_ALIGNMENT (127)
+#define IPA3_0_HW_TBL_LCLADDR_ALIGNMENT (7)
+#define IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT (127)
+#define IPA3_0_HW_TBL_WIDTH (8)
+#define IPA3_0_HW_TBL_HDR_WIDTH (8)
+#define IPA3_0_HW_TBL_ADDR_MASK (127)
+#define IPA3_0_HW_RULE_BUF_SIZE (256)
+#define IPA3_0_HW_RULE_START_ALIGNMENT (7)
+
+
+/*
+ * Rules Priority.
+ * Needed due to rules classification to hashable and non-hashable.
+ * Higher priority is lower in number. i.e. 0 is highest priority
+ */
+#define IPA3_0_RULE_MAX_PRIORITY (0)
+#define IPA3_0_RULE_MIN_PRIORITY (1023)
+
+/*
+ * RULE ID, bit length (e.g. 10 bits).
+ */
+#define IPA3_0_RULE_ID_BIT_LEN (10)
+#define IPA3_0_LOW_RULE_ID (1)
+
+/**
+ * struct ipa3_0_rt_rule_hw_hdr - HW header of IPA routing rule
+ * @word: routing rule header properties
+ * @en_rule: enable rule - Equation bit fields
+ * @pipe_dest_idx: destination pipe index
+ * @system: Is referenced header is lcl or sys memory
+ * @hdr_offset: header offset
+ * @proc_ctx: whether hdr_offset points to header table or to
+ *	header processing context table
+ * @priority: Rule priority. Added to distinguish rules order
+ *  at the integrated table consisting from hashable and
+ *  non-hashable parts
+ * @rsvd1: reserved bits
+ * @retain_hdr: added to add back to the packet the header removed
+ *  as part of header removal. This will be done as part of
+ *  header insertion block.
+ * @rule_id: rule ID that will be returned in the packet status
+ * @rsvd2: reserved bits
+ */
+struct ipa3_0_rt_rule_hw_hdr {
+	union {
+		u64 word;
+		struct {
+			u64 en_rule:16;
+			u64 pipe_dest_idx:5;
+			u64 system:1;
+			u64 hdr_offset:9;
+			u64 proc_ctx:1;
+			u64 priority:10;
+			u64 rsvd1:5;
+			u64 retain_hdr:1;
+			u64 rule_id:10;
+			u64 rsvd2:6;
+		} hdr;
+	} u;
+};
+
+/**
+ * struct ipa3_0_flt_rule_hw_hdr - HW header of IPA filter rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @action: post filtering action
+ * @rt_tbl_idx: index in routing table
+ * @retain_hdr: added to add back to the packet the header removed
+ *  as part of header removal. This will be done as part of
+ *  header insertion block.
+ * @rsvd1: reserved bits
+ * @priority: Rule priority. Added to distinguish rules order
+ *  at the integrated table consisting from hashable and
+ *  non-hashable parts
+ * @rsvd2: reserved bits
+ * @rule_id: rule ID that will be returned in the packet status
+ * @rsvd3: reserved bits
+ */
+struct ipa3_0_flt_rule_hw_hdr {
+	union {
+		u64 word;
+		struct {
+			u64 en_rule:16;
+			u64 action:5;
+			u64 rt_tbl_idx:5;
+			u64 retain_hdr:1;
+			u64 rsvd1:5;
+			u64 priority:10;
+			u64 rsvd2:6;
+			u64 rule_id:10;
+			u64 rsvd3:6;
+		} hdr;
+	} u;
+};
+
+int ipahal_fltrt_init(enum ipa_hw_type ipa_hw_type);
+void ipahal_fltrt_destroy(void);
+
+#endif /* _IPAHAL_FLTRT_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
new file mode 100644
index 0000000..4c4b666
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -0,0 +1,549 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_I_H_
+#define _IPAHAL_I_H_
+
+#include <linux/ipa.h>
+#include "../../ipa_common_i.h"
+
+#define IPAHAL_DRV_NAME "ipahal"
+
+#define IPAHAL_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+			## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPAHAL_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+			## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPAHAL_ERR(fmt, args...) \
+	do { \
+		pr_err(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+			## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+/*
+ * struct ipahal_context - HAL global context data
+ * @hw_type: IPA H/W type/version.
+ * @base: Base address to be used for accessing IPA memory. This is
+ *  I/O memory mapped address.
+ *  Controlled by debugfs. default is off
+ * @dent: Debugfs folder dir entry
+ * @ipa_pdev: IPA Platform Device. Will be used for DMA memory
+ * @empty_fltrt_tbl: Empty table to be used at tables init.
+ */
+struct ipahal_context {
+	enum ipa_hw_type hw_type;
+	void __iomem *base;
+	struct dentry *dent;
+	struct device *ipa_pdev;
+	struct ipa_mem_buffer empty_fltrt_tbl;
+};
+
+extern struct ipahal_context *ipahal_ctx;
+
+
+
+/* Immediate commands H/W structures */
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v4_filter_init - IP_V4_FILTER_INIT command payload
+ *  in H/W format.
+ * Inits IPv4 filter block.
+ * @hash_rules_addr: Addr in system mem where ipv4 hashable flt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable flt tbl should
+ *  be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable flt tbl should
+ *  be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v4_filter_init {
+	u64 hash_rules_addr:64;
+	u64 hash_rules_size:12;
+	u64 hash_local_addr:16;
+	u64 nhash_rules_size:12;
+	u64 nhash_local_addr:16;
+	u64 rsvd:8;
+	u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v6_filter_init - IP_V6_FILTER_INIT command payload
+ *  in H/W format.
+ * Inits IPv6 filter block.
+ * @hash_rules_addr: Addr in system mem where ipv6 hashable flt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable flt tbl should
+ *  be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable flt tbl should
+ *  be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v6_filter_init {
+	u64 hash_rules_addr:64;
+	u64 hash_rules_size:12;
+	u64 hash_local_addr:16;
+	u64 nhash_rules_size:12;
+	u64 nhash_local_addr:16;
+	u64 rsvd:8;
+	u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v4_nat_init - IP_V4_NAT_INIT command payload
+ *  in H/W format.
+ * Inits IPv4 NAT block. Initiate NAT table with it dimensions, location
+ *  cache address abd itger related parameters.
+ * @ipv4_rules_addr: Addr in sys/shared mem where ipv4 NAT rules start
+ * @ipv4_expansion_rules_addr: Addr in sys/shared mem where expantion NAT
+ *  table starts. IPv4 NAT rules that result in NAT collision are located
+ *  in this table.
+ * @index_table_addr: Addr in sys/shared mem where index table, which points
+ *  to NAT table starts
+ * @index_table_expansion_addr: Addr in sys/shared mem where expansion index
+ *  table starts
+ * @table_index: For future support of multiple NAT tables
+ * @rsvd1: reserved
+ * @ipv4_rules_addr_type: ipv4_rules_addr in sys or shared mem
+ * @ipv4_expansion_rules_addr_type: ipv4_expansion_rules_addr in
+ *  sys or shared mem
+ * @index_table_addr_type: index_table_addr in sys or shared mem
+ * @index_table_expansion_addr_type: index_table_expansion_addr in
+ *  sys or shared mem
+ * @size_base_tables: Num of entries in NAT tbl and idx tbl (each)
+ * @size_expansion_tables: Num of entries in NAT expantion tbl and expantion
+ *  idx tbl (each)
+ * @rsvd2: reserved
+ * @public_ip_addr: public IP address
+ */
+struct ipa_imm_cmd_hw_ip_v4_nat_init {
+	u64 ipv4_rules_addr:64;
+	u64 ipv4_expansion_rules_addr:64;
+	u64 index_table_addr:64;
+	u64 index_table_expansion_addr:64;
+	u64 table_index:3;
+	u64 rsvd1:1;
+	u64 ipv4_rules_addr_type:1;
+	u64 ipv4_expansion_rules_addr_type:1;
+	u64 index_table_addr_type:1;
+	u64 index_table_expansion_addr_type:1;
+	u64 size_base_tables:12;
+	u64 size_expansion_tables:10;
+	u64 rsvd2:2;
+	u64 public_ip_addr:32;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v4_routing_init - IP_V4_ROUTING_INIT command payload
+ *  in H/W format.
+ * Inits IPv4 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in system mem where ipv4 hashable rt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable rt tbl should
+ *  be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable rt tbl should
+ *  be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v4_routing_init {
+	u64 hash_rules_addr:64;
+	u64 hash_rules_size:12;
+	u64 hash_local_addr:16;
+	u64 nhash_rules_size:12;
+	u64 nhash_local_addr:16;
+	u64 rsvd:8;
+	u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v6_routing_init - IP_V6_ROUTING_INIT command payload
+ *  in H/W format.
+ * Inits IPv6 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in system mem where ipv6 hashable rt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable rt tbl should
+ *  be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable rt tbl should
+ *  be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v6_routing_init {
+	u64 hash_rules_addr:64;
+	u64 hash_rules_size:12;
+	u64 hash_local_addr:16;
+	u64 nhash_rules_size:12;
+	u64 nhash_local_addr:16;
+	u64 rsvd:8;
+	u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_hdr_init_local - HDR_INIT_LOCAL command payload
+ *  in H/W format.
+ * Inits hdr table within local mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in sys mem where the table starts (SRC)
+ * @size_hdr_table: Size of the above (in bytes)
+ * @hdr_addr: header address in IPA sram (used as DST for memory copy)
+ * @rsvd: reserved
+ */
+struct ipa_imm_cmd_hw_hdr_init_local {
+	u64 hdr_table_addr:64;
+	u64 size_hdr_table:12;
+	u64 hdr_addr:16;
+	u64 rsvd:4;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_nat_dma - NAT_DMA command payload
+ *  in H/W format
+ * Perform DMA operation on NAT related mem addressess. Copy data into
+ *  different locations within NAT associated tbls. (For add/remove NAT rules)
+ * @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op.
+ * @rsvd1: reserved
+ * @base_addr: Base addr to which the DMA operation should be performed.
+ * @rsvd2: reserved
+ * @offset: offset in bytes from base addr to write 'data' to
+ * @data: data to be written
+ * @rsvd3: reserved
+ */
+struct ipa_imm_cmd_hw_nat_dma {
+	u64 table_index:3;
+	u64 rsvd1:1;
+	u64 base_addr:2;
+	u64 rsvd2:2;
+	u64 offset:32;
+	u64 data:16;
+	u64 rsvd3:8;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_hdr_init_system - HDR_INIT_SYSTEM command payload
+ *  in H/W format.
+ * Inits hdr table within sys mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in system memory where the hdrs tbl starts.
+ */
+struct ipa_imm_cmd_hw_hdr_init_system {
+	u64 hdr_table_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_packet_init - IP_PACKET_INIT command payload
+ *  in H/W format.
+ * Configuration for specific IP pkt. Shall be called prior to an IP pkt
+ *  data. Pkt will not go through IP pkt processing.
+ * @destination_pipe_index: Destination pipe index  (in case routing
+ *  is enabled, this field will overwrite the rt  rule)
+ * @rsvd: reserved
+ */
+struct ipa_imm_cmd_hw_ip_packet_init {
+	u64 destination_pipe_index:5;
+	u64 rsv1:59;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_register_write - REGISTER_WRITE command payload
+ *  in H/W format.
+ * Write value to register. Allows reg changes to be synced with data packet
+ *  and other immediate command. Can be used to access the sram
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait
+ * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
+ * @value: value to write to register
+ * @value_mask: mask specifying which value bits to write to the register
+ * @pipeline_clear_options: options for pipeline to clear
+ *	0: HPS - no pkt inside HPS (not grp specific)
+ *	1: source group - The immediate cmd src grp does not use any pkt ctxs
+ *	2: Wait until no pkt reside inside IPA pipeline
+ *	3: reserved
+ * @rsvd: reserved - should be set to zero
+ */
+struct ipa_imm_cmd_hw_register_write {
+	u64 sw_rsvd:15;
+	u64 skip_pipeline_clear:1;
+	u64 offset:16;
+	u64 value:32;
+	u64 value_mask:32;
+	u64 pipeline_clear_options:2;
+	u64 rsvd:30;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_dma_shared_mem - DMA_SHARED_MEM command payload
+ *  in H/W format.
+ * Perform mem copy into or out of the SW area of IPA local mem
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @size: Size in bytes of data to copy. Expected size is up to 2K bytes
+ * @local_addr: Address in IPA local memory
+ * @direction: Read or write?
+ *	0: IPA write, Write to local address from system address
+ *	1: IPA read, Read from local address to system address
+ * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait
+ * @pipeline_clear_options: options for pipeline to clear
+ *	0: HPS - no pkt inside HPS (not grp specific)
+ *	1: source group - The immediate cmd src grp does npt use any pkt ctxs
+ *	2: Wait until no pkt reside inside IPA pipeline
+ *	3: reserved
+ * @rsvd: reserved - should be set to zero
+ * @system_addr: Address in system memory
+ */
+struct ipa_imm_cmd_hw_dma_shared_mem {
+	u64 sw_rsvd:16;
+	u64 size:16;
+	u64 local_addr:16;
+	u64 direction:1;
+	u64 skip_pipeline_clear:1;
+	u64 pipeline_clear_options:2;
+	u64 rsvd:12;
+	u64 system_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_packet_tag_status -
+ *  IP_PACKET_TAG_STATUS command payload in H/W format.
+ * This cmd is used for to allow SW to track HW processing by setting a TAG
+ *  value that is passed back to SW inside Packet Status information.
+ *  TAG info will be provided as part of Packet Status info generated for
+ *  the next pkt transferred over the pipe.
+ *  This immediate command must be followed by a packet in the same transfer.
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @tag: Tag that is provided back to SW
+ */
+struct ipa_imm_cmd_hw_ip_packet_tag_status {
+	u64 sw_rsvd:16;
+	u64 tag:48;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_dma_task_32b_addr -
+ *	IPA_DMA_TASK_32B_ADDR command payload in H/W format.
+ * Used by clients using 32bit addresses. Used to perform DMA operation on
+ *  multiple descriptors.
+ *  The Opcode is dynamic, where it holds the number of buffer to process
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @cmplt: Complete flag: When asserted IPA will interrupt SW when the entire
+ *  DMA related data was completely xfered to its destination.
+ * @eof: Enf Of Frame flag: When asserted IPA will assert the EOT to the
+ *  dest client. This is used used for aggr sequence
+ * @flsh: Flush flag: When asserted, pkt will go through the IPA blocks but
+ *  will not be xfered to dest client but rather will be discarded
+ * @lock: Lock pipe flag: When asserted, IPA will stop processing descriptors
+ *  from other EPs in the same src grp (RX queue)
+ * @unlock: Unlock pipe flag: When asserted, IPA will stop exclusively
+ *  servicing current EP out of the src EPs of the grp (RX queue)
+ * @size1: Size of buffer1 data
+ * @addr1: Pointer to buffer1 data
+ * @packet_size: Total packet size. If a pkt send using multiple DMA_TASKs,
+ *  only the first one needs to have this field set. It will be ignored
+ *  in subsequent DMA_TASKs until the packet ends (EOT). First DMA_TASK
+ *  must contain this field (2 or more buffers) or EOT.
+ */
+struct ipa_imm_cmd_hw_dma_task_32b_addr {
+	u64 sw_rsvd:11;
+	u64 cmplt:1;
+	u64 eof:1;
+	u64 flsh:1;
+	u64 lock:1;
+	u64 unlock:1;
+	u64 size1:16;
+	u64 addr1:32;
+	u64 packet_size:16;
+};
+
+
+
+/* IPA Status packet H/W structures and info */
+
+/*
+ * struct ipa_status_pkt_hw - IPA status packet payload in H/W format.
+ *  This structure describes the status packet H/W structure for the
+ *   following statuses: IPA_STATUS_PACKET, IPA_STATUS_DROPPED_PACKET,
+ *   IPA_STATUS_SUSPENDED_PACKET.
+ *  Other statuses types has different status packet structure.
+ * @status_opcode: The Type of the status (Opcode).
+ * @exception: (not bitmask) - the first exception that took place.
+ *  In case of exception, src endp and pkt len are always valid.
+ * @status_mask: Bit mask specifying on which H/W blocks the pkt was processed.
+ * @pkt_len: Pkt pyld len including hdr, include retained hdr if used. Does
+ *  not include padding or checksum trailer len.
+ * @endp_src_idx: Source end point index.
+ * @rsvd1: reserved
+ * @endp_dest_idx: Destination end point index.
+ *  Not valid in case of exception
+ * @rsvd2: reserved
+ * @metadata: meta data value used by packet
+ * @flt_local: Filter table location flag: Does matching flt rule belongs to
+ *  flt tbl that resides in lcl memory? (if not, then system mem)
+ * @flt_hash: Filter hash hit flag: Does matching flt rule was in hash tbl?
+ * @flt_global: Global filter rule flag: Does matching flt rule belongs to
+ *  the global flt tbl? (if not, then the per endp tables)
+ * @flt_ret_hdr: Retain header in filter rule flag: Does matching flt rule
+ *  specifies to retain header?
+ * @flt_rule_id: The ID of the matching filter rule. This info can be combined
+ *  with endp_src_idx to locate the exact rule. ID=0x3FF reserved to specify
+ *  flt miss. In case of miss, all flt info to be ignored
+ * @rt_local: Route table location flag: Does matching rt rule belongs to
+ *  rt tbl that resides in lcl memory? (if not, then system mem)
+ * @rt_hash: Route hash hit flag: Does matching rt rule was in hash tbl?
+ * @ucp: UC Processing flag.
+ * @rt_tbl_idx: Index of rt tbl that contains the rule on which was a match
+ * @rt_rule_id: The ID of the matching rt rule. This info can be combined
+ *  with rt_tbl_idx to locate the exact rule. ID=0x3FF reserved to specify
+ *  rt miss. In case of miss, all rt info to be ignored
+ * @nat_hit: NAT hit flag: Was their NAT hit?
+ * @nat_entry_idx: Index of the NAT entry used of NAT processing
+ * @nat_type: Defines the type of the NAT operation:
+ *	00: No NAT
+ *	01: Source NAT
+ *	10: Destination NAT
+ *	11: Reserved
+ * @tag_info: S/W defined value provided via immediate command
+ * @seq_num: Per source endp unique packet sequence number
+ * @time_of_day_ctr: running counter from IPA clock
+ * @hdr_local: Header table location flag: In header insertion, was the header
+ *  taken from the table resides in local memory? (If no, then system mem)
+ * @hdr_offset: Offset of used header in the header table
+ * @frag_hit: Frag hit flag: Was their frag rule hit in H/W frag table?
+ * @frag_rule: Frag rule index in H/W frag table in case of frag hit
+ * @hw_specific: H/W specific reserved value
+ */
+struct ipa_pkt_status_hw {
+	u64 status_opcode:8;
+	u64 exception:8;
+	u64 status_mask:16;
+	u64 pkt_len:16;
+	u64 endp_src_idx:5;
+	u64 rsvd1:3;
+	u64 endp_dest_idx:5;
+	u64 rsvd2:3;
+	u64 metadata:32;
+	u64 flt_local:1;
+	u64 flt_hash:1;
+	u64 flt_global:1;
+	u64 flt_ret_hdr:1;
+	u64 flt_rule_id:10;
+	u64 rt_local:1;
+	u64 rt_hash:1;
+	u64 ucp:1;
+	u64 rt_tbl_idx:5;
+	u64 rt_rule_id:10;
+	u64 nat_hit:1;
+	u64 nat_entry_idx:13;
+	u64 nat_type:2;
+	u64 tag_info:48;
+	u64 seq_num:8;
+	u64 time_of_day_ctr:24;
+	u64 hdr_local:1;
+	u64 hdr_offset:10;
+	u64 frag_hit:1;
+	u64 frag_rule:4;
+	u64 hw_specific:16;
+};
+
+/* Size of H/W Packet Status */
+#define IPA3_0_PKT_STATUS_SIZE 32
+
+/* Headers and processing context H/W structures and definitions */
+
+/* uCP command numbers */
+#define IPA_HDR_UCP_802_3_TO_802_3 6
+#define IPA_HDR_UCP_802_3_TO_ETHII 7
+#define IPA_HDR_UCP_ETHII_TO_802_3 8
+#define IPA_HDR_UCP_ETHII_TO_ETHII 9
+
+/* Processing context TLV type */
+#define IPA_PROC_CTX_TLV_TYPE_END 0
+#define IPA_PROC_CTX_TLV_TYPE_HDR_ADD 1
+#define IPA_PROC_CTX_TLV_TYPE_PROC_CMD 3
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_tlv -
+ * HW structure of IPA processing context header - TLV part
+ * @type: 0 - end type
+ *        1 - header addition type
+ *        3 - processing command type
+ * @length: number of bytes after tlv
+ *        for type:
+ *        0 - needs to be 0
+ *        1 - header addition length
+ *        3 - number of 32B including type and length.
+ * @value: specific value for type
+ *        for type:
+ *        0 - needs to be 0
+ *        1 - header length
+ *        3 - command ID (see IPA_HDR_UCP_* definitions)
+ */
+struct ipa_hw_hdr_proc_ctx_tlv {
+	u32 type:8;
+	u32 length:8;
+	u32 value:16;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_hdr_add -
+ * HW structure of IPA processing context - add header tlv
+ * @tlv: IPA processing context TLV
+ * @hdr_addr: processing context header address
+ */
+struct ipa_hw_hdr_proc_ctx_hdr_add {
+	struct ipa_hw_hdr_proc_ctx_tlv tlv;
+	u32 hdr_addr;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_add_hdr_seq -
+ * IPA processing context header - add header sequence
+ * @hdr_add: add header command
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hw_hdr_proc_ctx_add_hdr_seq {
+	struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
+	struct ipa_hw_hdr_proc_ctx_tlv end;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq -
+ * IPA processing context header - process command sequence
+ * @hdr_add: add header command
+ * @cmd: tlv processing command (cmd.type must be 3)
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq {
+	struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
+	struct ipa_hw_hdr_proc_ctx_tlv cmd;
+	struct ipa_hw_hdr_proc_ctx_tlv end;
+};
+
+#endif /* _IPAHAL_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
new file mode 100644
index 0000000..08decd8
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -0,0 +1,1541 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/ipa.h>
+#include <linux/kernel.h>
+#include <linux/msm_ipa.h>
+#include "ipahal_i.h"
+#include "ipahal_reg.h"
+#include "ipahal_reg_i.h"
+
+static const char *ipareg_name_to_str[IPA_REG_MAX] = {
+	__stringify(IPA_ROUTE),
+	__stringify(IPA_IRQ_STTS_EE_n),
+	__stringify(IPA_IRQ_EN_EE_n),
+	__stringify(IPA_IRQ_CLR_EE_n),
+	__stringify(IPA_IRQ_SUSPEND_INFO_EE_n),
+	__stringify(IPA_SUSPEND_IRQ_EN_EE_n),
+	__stringify(IPA_SUSPEND_IRQ_CLR_EE_n),
+	__stringify(IPA_BCR),
+	__stringify(IPA_ENABLED_PIPES),
+	__stringify(IPA_COMP_SW_RESET),
+	__stringify(IPA_VERSION),
+	__stringify(IPA_TAG_TIMER),
+	__stringify(IPA_COMP_HW_VERSION),
+	__stringify(IPA_SPARE_REG_1),
+	__stringify(IPA_SPARE_REG_2),
+	__stringify(IPA_COMP_CFG),
+	__stringify(IPA_STATE_AGGR_ACTIVE),
+	__stringify(IPA_ENDP_INIT_HDR_n),
+	__stringify(IPA_ENDP_INIT_HDR_EXT_n),
+	__stringify(IPA_ENDP_INIT_AGGR_n),
+	__stringify(IPA_AGGR_FORCE_CLOSE),
+	__stringify(IPA_ENDP_INIT_ROUTE_n),
+	__stringify(IPA_ENDP_INIT_MODE_n),
+	__stringify(IPA_ENDP_INIT_NAT_n),
+	__stringify(IPA_ENDP_INIT_CTRL_n),
+	__stringify(IPA_ENDP_INIT_HOL_BLOCK_EN_n),
+	__stringify(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n),
+	__stringify(IPA_ENDP_INIT_DEAGGR_n),
+	__stringify(IPA_ENDP_INIT_SEQ_n),
+	__stringify(IPA_DEBUG_CNT_REG_n),
+	__stringify(IPA_ENDP_INIT_CFG_n),
+	__stringify(IPA_IRQ_EE_UC_n),
+	__stringify(IPA_ENDP_INIT_HDR_METADATA_MASK_n),
+	__stringify(IPA_ENDP_INIT_HDR_METADATA_n),
+	__stringify(IPA_ENDP_INIT_RSRC_GRP_n),
+	__stringify(IPA_SHARED_MEM_SIZE),
+	__stringify(IPA_SRAM_DIRECT_ACCESS_n),
+	__stringify(IPA_DEBUG_CNT_CTRL_n),
+	__stringify(IPA_UC_MAILBOX_m_n),
+	__stringify(IPA_FILT_ROUT_HASH_FLUSH),
+	__stringify(IPA_SINGLE_NDP_MODE),
+	__stringify(IPA_QCNCM),
+	__stringify(IPA_SYS_PKT_PROC_CNTXT_BASE),
+	__stringify(IPA_LOCAL_PKT_PROC_CNTXT_BASE),
+	__stringify(IPA_ENDP_STATUS_n),
+	__stringify(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n),
+	__stringify(IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n),
+	__stringify(IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n),
+	__stringify(IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n),
+	__stringify(IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n),
+	__stringify(IPA_DST_RSRC_GRP_01_RSRC_TYPE_n),
+	__stringify(IPA_DST_RSRC_GRP_23_RSRC_TYPE_n),
+	__stringify(IPA_DST_RSRC_GRP_45_RSRC_TYPE_n),
+	__stringify(IPA_DST_RSRC_GRP_67_RSRC_TYPE_n),
+	__stringify(IPA_RX_HPS_CLIENTS_MIN_DEPTH_0),
+	__stringify(IPA_RX_HPS_CLIENTS_MIN_DEPTH_1),
+	__stringify(IPA_RX_HPS_CLIENTS_MAX_DEPTH_0),
+	__stringify(IPA_RX_HPS_CLIENTS_MAX_DEPTH_1),
+	__stringify(IPA_QSB_MAX_WRITES),
+	__stringify(IPA_QSB_MAX_READS),
+	__stringify(IPA_TX_CFG),
+};
+
+static void ipareg_construct_dummy(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	IPAHAL_ERR("No construct function for %s\n",
+		ipahal_reg_name_str(reg));
+	WARN_ON(1);
+}
+
+static void ipareg_parse_dummy(enum ipahal_reg_name reg,
+	void *fields, u32 val)
+{
+	IPAHAL_ERR("No parse function for %s\n",
+		ipahal_reg_name_str(reg));
+	WARN_ON(1);
+}
+
+static void ipareg_construct_rx_hps_clients_depth1(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_rx_hps_clients *clients =
+		(struct ipahal_reg_rx_hps_clients *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(0));
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(1));
+}
+
+static void ipareg_construct_rx_hps_clients_depth0(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_rx_hps_clients *clients =
+		(struct ipahal_reg_rx_hps_clients *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(0));
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(1));
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[2],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(2),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(2));
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[3],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(3),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(3));
+}
+
+static void ipareg_construct_rx_hps_clients_depth0_v3_5(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_rx_hps_clients *clients =
+		(struct ipahal_reg_rx_hps_clients *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(0));
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(1));
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[2],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(2),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(2));
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[3],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(3),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(3));
+}
+
+static void ipareg_construct_rsrg_grp_xy(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_rsrc_grp_cfg *grp =
+		(struct ipahal_reg_rsrc_grp_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, grp->x_min,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK);
+	IPA_SETFIELD_IN_REG(*val, grp->x_max,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK);
+	IPA_SETFIELD_IN_REG(*val, grp->y_min,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK);
+	IPA_SETFIELD_IN_REG(*val, grp->y_max,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK);
+}
+
+static void ipareg_construct_rsrg_grp_xy_v3_5(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_rsrc_grp_cfg *grp =
+		(struct ipahal_reg_rsrc_grp_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, grp->x_min,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5);
+	IPA_SETFIELD_IN_REG(*val, grp->x_max,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT_V3_5,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK_V3_5);
+
+	/* DST_23 register has only X fields at ipa V3_5 */
+	if (reg == IPA_DST_RSRC_GRP_23_RSRC_TYPE_n)
+		return;
+
+	IPA_SETFIELD_IN_REG(*val, grp->y_min,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT_V3_5,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK_V3_5);
+	IPA_SETFIELD_IN_REG(*val, grp->y_max,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT_V3_5,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK_V3_5);
+}
+
+static void ipareg_construct_hash_cfg_n(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_fltrt_hash_tuple *tuple =
+		(struct ipahal_reg_fltrt_hash_tuple *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, tuple->flt.src_id,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->flt.src_ip_addr,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->flt.dst_ip_addr,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->flt.src_port,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->flt.dst_port,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->flt.protocol,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->flt.meta_data,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->undefined1,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->rt.src_id,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->rt.src_ip_addr,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->rt.dst_ip_addr,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->rt.src_port,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->rt.dst_port,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->rt.protocol,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->rt.meta_data,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->undefined2,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK);
+}
+
+static void ipareg_parse_hash_cfg_n(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_fltrt_hash_tuple *tuple =
+		(struct ipahal_reg_fltrt_hash_tuple *)fields;
+
+	tuple->flt.src_id =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK);
+	tuple->flt.src_ip_addr =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK);
+	tuple->flt.dst_ip_addr =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK);
+	tuple->flt.src_port =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK);
+	tuple->flt.dst_port =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK);
+	tuple->flt.protocol =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK);
+	tuple->flt.meta_data =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK);
+	tuple->undefined1 =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK);
+	tuple->rt.src_id =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK);
+	tuple->rt.src_ip_addr =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK);
+	tuple->rt.dst_ip_addr =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK);
+	tuple->rt.src_port =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK);
+	tuple->rt.dst_port =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK);
+	tuple->rt.protocol =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK);
+	tuple->rt.meta_data =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK);
+	tuple->undefined2 =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK);
+}
+
+static void ipareg_construct_endp_status_n(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_ep_cfg_status *ep_status =
+		(struct ipahal_reg_ep_cfg_status *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_status->status_en,
+			IPA_ENDP_STATUS_n_STATUS_EN_SHFT,
+			IPA_ENDP_STATUS_n_STATUS_EN_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_status->status_ep,
+			IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT,
+			IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_status->status_location,
+			IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT,
+			IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK);
+}
+
+static void ipareg_construct_qcncm(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_qcncm *qcncm =
+		(struct ipahal_reg_qcncm *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, qcncm->mode_en ? 1 : 0,
+		IPA_QCNCM_MODE_EN_SHFT,
+		IPA_QCNCM_MODE_EN_BMSK);
+	IPA_SETFIELD_IN_REG(*val, qcncm->mode_val,
+		IPA_QCNCM_MODE_VAL_SHFT,
+		IPA_QCNCM_MODE_VAL_BMSK);
+	IPA_SETFIELD_IN_REG(*val, qcncm->undefined,
+		0, IPA_QCNCM_MODE_VAL_BMSK);
+}
+
+static void ipareg_parse_qcncm(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_qcncm *qcncm =
+		(struct ipahal_reg_qcncm *)fields;
+
+	memset(qcncm, 0, sizeof(struct ipahal_reg_qcncm));
+	qcncm->mode_en = IPA_GETFIELD_FROM_REG(val,
+		IPA_QCNCM_MODE_EN_SHFT,
+		IPA_QCNCM_MODE_EN_BMSK);
+	qcncm->mode_val = IPA_GETFIELD_FROM_REG(val,
+		IPA_QCNCM_MODE_VAL_SHFT,
+		IPA_QCNCM_MODE_VAL_BMSK);
+	qcncm->undefined = IPA_GETFIELD_FROM_REG(val,
+		0, IPA_QCNCM_UNDEFINED1_BMSK);
+	qcncm->undefined |= IPA_GETFIELD_FROM_REG(val,
+		0, IPA_QCNCM_MODE_UNDEFINED2_BMSK);
+}
+
+static void ipareg_construct_single_ndp_mode(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_single_ndp_mode *mode =
+		(struct ipahal_reg_single_ndp_mode *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, mode->single_ndp_en ? 1 : 0,
+		IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT,
+		IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, mode->undefined,
+		IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT,
+		IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK);
+}
+
+static void ipareg_parse_single_ndp_mode(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_single_ndp_mode *mode =
+		(struct ipahal_reg_single_ndp_mode *)fields;
+
+	memset(mode, 0, sizeof(struct ipahal_reg_single_ndp_mode));
+	mode->single_ndp_en = IPA_GETFIELD_FROM_REG(val,
+		IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT,
+		IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK);
+	mode->undefined = IPA_GETFIELD_FROM_REG(val,
+		IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT,
+		IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK);
+}
+
+static void ipareg_construct_debug_cnt_ctrl_n(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_debug_cnt_ctrl *dbg_cnt_ctrl =
+		(struct ipahal_reg_debug_cnt_ctrl *)fields;
+	u8 type;
+
+	IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->en ? 1 : 0,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_SHFT,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_BMSK);
+
+	switch (dbg_cnt_ctrl->type) {
+	case DBG_CNT_TYPE_IPV4_FLTR:
+		type = 0x0;
+		if (!dbg_cnt_ctrl->rule_idx_pipe_rule) {
+			IPAHAL_ERR("No FLT global rules\n");
+			WARN_ON(1);
+		}
+		break;
+	case DBG_CNT_TYPE_IPV4_ROUT:
+		type = 0x1;
+		break;
+	case DBG_CNT_TYPE_GENERAL:
+		type = 0x2;
+		break;
+	case DBG_CNT_TYPE_IPV6_FLTR:
+		type = 0x4;
+		if (!dbg_cnt_ctrl->rule_idx_pipe_rule) {
+			IPAHAL_ERR("No FLT global rules\n");
+			WARN_ON(1);
+		}
+		break;
+	case DBG_CNT_TYPE_IPV6_ROUT:
+		type = 0x5;
+		break;
+	default:
+		IPAHAL_ERR("Invalid dbg_cnt_ctrl type (%d) for %s\n",
+			dbg_cnt_ctrl->type, ipahal_reg_name_str(reg));
+		WARN_ON(1);
+		return;
+
+	};
+
+	IPA_SETFIELD_IN_REG(*val, type,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_SHFT,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->product ? 1 : 0,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_SHFT,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->src_pipe,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_SHFT,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_BMSK);
+
+	if (ipahal_ctx->hw_type <= IPA_HW_v3_1) {
+		IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx,
+			IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT,
+			IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK);
+		IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx_pipe_rule,
+			IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_SHFT,
+			IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_BMSK
+			);
+	} else {
+		IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx,
+			IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT,
+			IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK_V3_5);
+	}
+}
+
+static void ipareg_parse_shared_mem_size(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_shared_mem_size *smem_sz =
+		(struct ipahal_reg_shared_mem_size *)fields;
+
+	memset(smem_sz, 0, sizeof(struct ipahal_reg_shared_mem_size));
+	smem_sz->shared_mem_sz = IPA_GETFIELD_FROM_REG(val,
+		IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT,
+		IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK);
+
+	smem_sz->shared_mem_baddr = IPA_GETFIELD_FROM_REG(val,
+		IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT,
+		IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK);
+}
+
+static void ipareg_construct_endp_init_rsrc_grp_n(
+		enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_endp_init_rsrc_grp *rsrc_grp =
+		(struct ipahal_reg_endp_init_rsrc_grp *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, rsrc_grp->rsrc_grp,
+		IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT,
+		IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK);
+}
+
+static void ipareg_construct_endp_init_rsrc_grp_n_v3_5(
+		enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_endp_init_rsrc_grp *rsrc_grp =
+		(struct ipahal_reg_endp_init_rsrc_grp *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, rsrc_grp->rsrc_grp,
+		IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v3_5,
+		IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v3_5);
+}
+
+static void ipareg_construct_endp_init_hdr_metadata_n(
+		enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_metadata *metadata =
+		(struct ipa_ep_cfg_metadata *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, metadata->qmap_id,
+			IPA_ENDP_INIT_HDR_METADATA_n_METADATA_SHFT,
+			IPA_ENDP_INIT_HDR_METADATA_n_METADATA_BMSK);
+}
+
+static void ipareg_construct_endp_init_hdr_metadata_mask_n(
+		enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_metadata_mask *metadata_mask =
+		(struct ipa_ep_cfg_metadata_mask *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, metadata_mask->metadata_mask,
+			IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT,
+			IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK);
+}
+
+static void ipareg_construct_endp_init_cfg_n(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_cfg *cfg =
+		(struct ipa_ep_cfg_cfg *)fields;
+	u32 cs_offload_en;
+
+	switch (cfg->cs_offload_en) {
+	case IPA_DISABLE_CS_OFFLOAD:
+		cs_offload_en = 0;
+		break;
+	case IPA_ENABLE_CS_OFFLOAD_UL:
+		cs_offload_en = 1;
+		break;
+	case IPA_ENABLE_CS_OFFLOAD_DL:
+		cs_offload_en = 2;
+		break;
+	default:
+		IPAHAL_ERR("Invalid cs_offload_en value for %s\n",
+			ipahal_reg_name_str(reg));
+		WARN_ON(1);
+		return;
+	}
+
+	IPA_SETFIELD_IN_REG(*val, cfg->frag_offload_en ? 1 : 0,
+			IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT,
+			IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK);
+	IPA_SETFIELD_IN_REG(*val, cs_offload_en,
+			IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT,
+			IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK);
+	IPA_SETFIELD_IN_REG(*val, cfg->cs_metadata_hdr_offset,
+			IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT,
+			IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK);
+	IPA_SETFIELD_IN_REG(*val, cfg->gen_qmb_master_sel,
+			IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_SHFT,
+			IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_BMSK);
+
+}
+
+static void ipareg_construct_endp_init_deaggr_n(
+		enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_deaggr *ep_deaggr =
+		(struct ipa_ep_cfg_deaggr *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_deaggr->deaggr_hdr_len,
+		IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT,
+		IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_deaggr->packet_offset_valid,
+		IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT,
+		IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_deaggr->packet_offset_location,
+		IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT,
+		IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_deaggr->max_packet_len,
+		IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT,
+		IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK);
+}
+
+static void ipareg_construct_endp_init_hol_block_en_n(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_holb *ep_holb =
+		(struct ipa_ep_cfg_holb *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_holb->en,
+		IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT,
+		IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK);
+}
+
+static void ipareg_construct_endp_init_hol_block_timer_n(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_holb *ep_holb =
+		(struct ipa_ep_cfg_holb *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_holb->tmr_val,
+		IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_SHFT,
+		IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_BMSK);
+}
+
+static void ipareg_construct_endp_init_ctrl_n(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_ctrl *ep_ctrl =
+		(struct ipa_ep_cfg_ctrl *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_suspend,
+		IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT,
+		IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_delay,
+		IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT,
+		IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK);
+}
+
+static void ipareg_construct_endp_init_nat_n(enum ipahal_reg_name reg,
+		const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_nat *ep_nat =
+		(struct ipa_ep_cfg_nat *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_nat->nat_en,
+		IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT,
+		IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK);
+}
+
+static void ipareg_construct_endp_init_mode_n(enum ipahal_reg_name reg,
+		const void *fields, u32 *val)
+{
+	struct ipahal_reg_endp_init_mode *init_mode =
+		(struct ipahal_reg_endp_init_mode *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, init_mode->ep_mode.mode,
+		IPA_ENDP_INIT_MODE_n_MODE_SHFT,
+		IPA_ENDP_INIT_MODE_n_MODE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, init_mode->dst_pipe_number,
+		IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT,
+		IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK);
+}
+
+static void ipareg_construct_endp_init_route_n(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_endp_init_route *ep_init_rt =
+		(struct ipahal_reg_endp_init_route *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_init_rt->route_table_index,
+		IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT,
+		IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK);
+
+}
+
+static void ipareg_parse_endp_init_aggr_n(enum ipahal_reg_name reg,
+	void *fields, u32 val)
+{
+	struct ipa_ep_cfg_aggr *ep_aggr =
+		(struct ipa_ep_cfg_aggr *)fields;
+
+	memset(ep_aggr, 0, sizeof(struct ipa_ep_cfg_aggr));
+
+	ep_aggr->aggr_en =
+		(((val & IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT)
+			== IPA_ENABLE_AGGR);
+	ep_aggr->aggr =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT);
+	ep_aggr->aggr_byte_limit =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT);
+	ep_aggr->aggr_time_limit =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT);
+	ep_aggr->aggr_pkt_limit =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT);
+	ep_aggr->aggr_sw_eof_active =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT);
+	ep_aggr->aggr_hard_byte_limit_en =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK)
+			>>
+			IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT);
+}
+
+static void ipareg_construct_endp_init_aggr_n(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_aggr *ep_aggr =
+		(struct ipa_ep_cfg_aggr *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_en,
+		IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT,
+		IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr,
+		IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT,
+		IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_byte_limit,
+		IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT,
+		IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_time_limit,
+		IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT,
+		IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_pkt_limit,
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT,
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_sw_eof_active,
+		IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT,
+		IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK);
+
+	/* At IPAv3 hard_byte_limit is not supported */
+	ep_aggr->aggr_hard_byte_limit_en = 0;
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_hard_byte_limit_en,
+		IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT,
+		IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK);
+}
+
+static void ipareg_construct_endp_init_hdr_ext_n(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_hdr_ext *ep_hdr_ext;
+	u8 hdr_endianness;
+
+	ep_hdr_ext = (struct ipa_ep_cfg_hdr_ext *)fields;
+	hdr_endianness = ep_hdr_ext->hdr_little_endian ? 0 : 1;
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_pad_to_alignment,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v3_0);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad_offset,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_payload_len_inc_padding,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad_valid,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, hdr_endianness,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK);
+}
+
+static void ipareg_construct_endp_init_hdr_n(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_hdr *ep_hdr;
+
+	ep_hdr = (struct ipa_ep_cfg_hdr *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_metadata_reg_valid,
+		IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_SHFT_v2,
+		IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_BMSK_v2);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_remove_additional,
+		IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT_v2,
+		IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK_v2);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_a5_mux,
+		IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size_valid,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_additional_const_len,
+		IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata_valid,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_len,
+		IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK);
+}
+
+static void ipareg_construct_route(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_route *route;
+
+	route = (struct ipahal_reg_route *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, route->route_dis,
+		IPA_ROUTE_ROUTE_DIS_SHFT,
+		IPA_ROUTE_ROUTE_DIS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, route->route_def_pipe,
+		IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
+		IPA_ROUTE_ROUTE_DEF_PIPE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, route->route_def_hdr_table,
+		IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
+		IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, route->route_def_hdr_ofst,
+		IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
+		IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, route->route_frag_def_pipe,
+		IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT,
+		IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, route->route_def_retain_hdr,
+		IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT,
+		IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK);
+}
+
+static void ipareg_construct_qsb_max_writes(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	int *qsb_max_writes = (int *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, qsb_max_writes[0],
+			    IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_SHFT,
+			    IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_BMSK);
+	IPA_SETFIELD_IN_REG(*val, qsb_max_writes[1],
+			    IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_SHFT,
+			    IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_BMSK);
+}
+
+static void ipareg_construct_qsb_max_reads(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	int *qsb_max_reads = (int *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, qsb_max_reads[0],
+			    IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT,
+			    IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK);
+	IPA_SETFIELD_IN_REG(*val, qsb_max_reads[1],
+			    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT,
+			    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK);
+}
+
+static void ipareg_construct_tx_cfg(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_tx_cfg *tx_cfg;
+
+	tx_cfg = (struct ipahal_reg_tx_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->tx0_prefetch_disable,
+		IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5,
+		IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5);
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->tx1_prefetch_disable,
+		IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5,
+		IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5);
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->prefetch_almost_empty_size,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5);
+}
+
+/*
+ * struct ipahal_reg_obj - Register H/W information for specific IPA version
+ * @construct - CB to construct register value from abstracted structure
+ * @parse - CB to parse register value to abstracted structure
+ * @offset - register offset relative to base address
+ * @n_ofst - N parameterized register sub-offset
+ */
+struct ipahal_reg_obj {
+	void (*construct)(enum ipahal_reg_name reg, const void *fields,
+		u32 *val);
+	void (*parse)(enum ipahal_reg_name reg, void *fields,
+		u32 val);
+	u32 offset;
+	u32 n_ofst;
+};
+
+/*
+ * This table contains the info regarding each register for IPAv3 and later.
+ * Information like: offset and construct/parse functions.
+ * All the information on the register on IPAv3 are statically defined below.
+ * If information is missing regarding some register on some IPA version,
+ *  the init function will fill it with the information from the previous
+ *  IPA version.
+ * Information is considered missing if all of the fields are 0.
+ * If offset is -1, this means that the register is removed on the
+ *  specific version.
+ */
+static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
+	/* IPAv3 */
+	[IPA_HW_v3_0][IPA_ROUTE] = {
+		ipareg_construct_route, ipareg_parse_dummy,
+		0x00000048, 0},
+	[IPA_HW_v3_0][IPA_IRQ_STTS_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003008, 0x1000},
+	[IPA_HW_v3_0][IPA_IRQ_EN_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000300c, 0x1000},
+	[IPA_HW_v3_0][IPA_IRQ_CLR_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003010, 0x1000},
+	[IPA_HW_v3_0][IPA_IRQ_SUSPEND_INFO_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003098, 0x1000},
+	[IPA_HW_v3_0][IPA_BCR] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000001D0, 0},
+	[IPA_HW_v3_0][IPA_ENABLED_PIPES] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000038, 0},
+	[IPA_HW_v3_0][IPA_COMP_SW_RESET] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000040, 0},
+	[IPA_HW_v3_0][IPA_VERSION] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000034, 0},
+	[IPA_HW_v3_0][IPA_TAG_TIMER] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000060, 0 },
+	[IPA_HW_v3_0][IPA_COMP_HW_VERSION] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000030, 0},
+	[IPA_HW_v3_0][IPA_SPARE_REG_1] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00005090, 0},
+	[IPA_HW_v3_0][IPA_SPARE_REG_2] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00005094, 0},
+	[IPA_HW_v3_0][IPA_COMP_CFG] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000003C, 0},
+	[IPA_HW_v3_0][IPA_STATE_AGGR_ACTIVE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000010C, 0},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_HDR_n] = {
+		ipareg_construct_endp_init_hdr_n, ipareg_parse_dummy,
+		0x00000810, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_HDR_EXT_n] = {
+		ipareg_construct_endp_init_hdr_ext_n, ipareg_parse_dummy,
+		0x00000814, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_AGGR_n] = {
+		ipareg_construct_endp_init_aggr_n,
+		ipareg_parse_endp_init_aggr_n,
+		0x00000824, 0x70},
+	[IPA_HW_v3_0][IPA_AGGR_FORCE_CLOSE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000001EC, 0},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_ROUTE_n] = {
+		ipareg_construct_endp_init_route_n, ipareg_parse_dummy,
+		0x00000828, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_MODE_n] = {
+		ipareg_construct_endp_init_mode_n, ipareg_parse_dummy,
+		0x00000820, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_NAT_n] = {
+		ipareg_construct_endp_init_nat_n, ipareg_parse_dummy,
+		0x0000080C, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_CTRL_n] = {
+		ipareg_construct_endp_init_ctrl_n, ipareg_parse_dummy,
+		0x00000800, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_HOL_BLOCK_EN_n] = {
+		ipareg_construct_endp_init_hol_block_en_n,
+		ipareg_parse_dummy,
+		0x0000082c, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_HOL_BLOCK_TIMER_n] = {
+		ipareg_construct_endp_init_hol_block_timer_n,
+		ipareg_parse_dummy,
+		0x00000830, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_DEAGGR_n] = {
+		ipareg_construct_endp_init_deaggr_n,
+		ipareg_parse_dummy,
+		0x00000834, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_SEQ_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000083C, 0x70},
+	[IPA_HW_v3_0][IPA_DEBUG_CNT_REG_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000600, 0x4},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_CFG_n] = {
+		ipareg_construct_endp_init_cfg_n, ipareg_parse_dummy,
+		0x00000808, 0x70},
+	[IPA_HW_v3_0][IPA_IRQ_EE_UC_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000301c, 0x1000},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_HDR_METADATA_MASK_n] = {
+		ipareg_construct_endp_init_hdr_metadata_mask_n,
+		ipareg_parse_dummy,
+		0x00000818, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_HDR_METADATA_n] = {
+		ipareg_construct_endp_init_hdr_metadata_n,
+		ipareg_parse_dummy,
+		0x0000081c, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_RSRC_GRP_n] = {
+		ipareg_construct_endp_init_rsrc_grp_n,
+		ipareg_parse_dummy,
+		0x00000838, 0x70},
+	[IPA_HW_v3_0][IPA_SHARED_MEM_SIZE] = {
+		ipareg_construct_dummy, ipareg_parse_shared_mem_size,
+		0x00000054, 0},
+	[IPA_HW_v3_0][IPA_SRAM_DIRECT_ACCESS_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00007000, 0x4},
+	[IPA_HW_v3_0][IPA_DEBUG_CNT_CTRL_n] = {
+		ipareg_construct_debug_cnt_ctrl_n, ipareg_parse_dummy,
+		0x00000640, 0x4},
+	[IPA_HW_v3_0][IPA_UC_MAILBOX_m_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00032000, 0x4},
+	[IPA_HW_v3_0][IPA_FILT_ROUT_HASH_FLUSH] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000090, 0},
+	[IPA_HW_v3_0][IPA_SINGLE_NDP_MODE] = {
+		ipareg_construct_single_ndp_mode, ipareg_parse_single_ndp_mode,
+		0x00000068, 0},
+	[IPA_HW_v3_0][IPA_QCNCM] = {
+		ipareg_construct_qcncm, ipareg_parse_qcncm,
+		0x00000064, 0},
+	[IPA_HW_v3_0][IPA_SYS_PKT_PROC_CNTXT_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000001e0, 0},
+	[IPA_HW_v3_0][IPA_LOCAL_PKT_PROC_CNTXT_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000001e8, 0},
+	[IPA_HW_v3_0][IPA_ENDP_STATUS_n] = {
+		ipareg_construct_endp_status_n, ipareg_parse_dummy,
+		0x00000840, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_FILTER_ROUTER_HSH_CFG_n] = {
+		ipareg_construct_hash_cfg_n, ipareg_parse_hash_cfg_n,
+		0x0000085C, 0x70},
+	[IPA_HW_v3_0][IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x00000400, 0x20},
+	[IPA_HW_v3_0][IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x00000404, 0x20},
+	[IPA_HW_v3_0][IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x00000408, 0x20},
+	[IPA_HW_v3_0][IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x0000040C, 0x20},
+	[IPA_HW_v3_0][IPA_DST_RSRC_GRP_01_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x00000500, 0x20},
+	[IPA_HW_v3_0][IPA_DST_RSRC_GRP_23_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x00000504, 0x20},
+	[IPA_HW_v3_0][IPA_DST_RSRC_GRP_45_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x00000508, 0x20},
+	[IPA_HW_v3_0][IPA_DST_RSRC_GRP_67_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x0000050c, 0x20},
+	[IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MIN_DEPTH_0] = {
+		ipareg_construct_rx_hps_clients_depth0, ipareg_parse_dummy,
+		0x000023C4, 0},
+	[IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MIN_DEPTH_1] = {
+		ipareg_construct_rx_hps_clients_depth1, ipareg_parse_dummy,
+		0x000023C8, 0},
+	[IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MAX_DEPTH_0] = {
+		ipareg_construct_rx_hps_clients_depth0, ipareg_parse_dummy,
+		0x000023CC, 0},
+	[IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MAX_DEPTH_1] = {
+		ipareg_construct_rx_hps_clients_depth1, ipareg_parse_dummy,
+		0x000023D0, 0},
+	[IPA_HW_v3_0][IPA_QSB_MAX_WRITES] = {
+		ipareg_construct_qsb_max_writes, ipareg_parse_dummy,
+		0x00000074, 0},
+	[IPA_HW_v3_0][IPA_QSB_MAX_READS] = {
+		ipareg_construct_qsb_max_reads, ipareg_parse_dummy,
+		0x00000078, 0},
+
+
+	/* IPAv3.1 */
+	[IPA_HW_v3_1][IPA_IRQ_SUSPEND_INFO_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003030, 0x1000},
+	[IPA_HW_v3_1][IPA_SUSPEND_IRQ_EN_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003034, 0x1000},
+	[IPA_HW_v3_1][IPA_SUSPEND_IRQ_CLR_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003038, 0x1000},
+
+
+	/* IPAv3.5 */
+	[IPA_HW_v3_5][IPA_TX_CFG] = {
+		ipareg_construct_tx_cfg, ipareg_parse_dummy,
+		0x000001FC, 0},
+	[IPA_HW_v3_5][IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+		0x00000400, 0x20},
+	[IPA_HW_v3_5][IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+		0x00000404, 0x20},
+	[IPA_HW_v3_5][IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0},
+	[IPA_HW_v3_5][IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0},
+	[IPA_HW_v3_5][IPA_DST_RSRC_GRP_01_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+		0x00000500, 0x20},
+	[IPA_HW_v3_5][IPA_DST_RSRC_GRP_23_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+		0x00000504, 0x20},
+	[IPA_HW_v3_5][IPA_DST_RSRC_GRP_45_RSRC_TYPE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0},
+	[IPA_HW_v3_5][IPA_DST_RSRC_GRP_67_RSRC_TYPE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0},
+	[IPA_HW_v3_5][IPA_ENDP_INIT_RSRC_GRP_n] = {
+		ipareg_construct_endp_init_rsrc_grp_n_v3_5,
+		ipareg_parse_dummy,
+		0x00000838, 0x70},
+	[IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MIN_DEPTH_0] = {
+		ipareg_construct_rx_hps_clients_depth0_v3_5,
+		ipareg_parse_dummy,
+		0x000023C4, 0},
+	[IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MIN_DEPTH_1] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0},
+	[IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MAX_DEPTH_0] = {
+		ipareg_construct_rx_hps_clients_depth0_v3_5,
+		ipareg_parse_dummy,
+		0x000023CC, 0},
+	[IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MAX_DEPTH_1] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0},
+	[IPA_HW_v3_5][IPA_SPARE_REG_1] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00002780, 0},
+	[IPA_HW_v3_5][IPA_SPARE_REG_2] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00002784, 0},
+};
+
+/*
+ * ipahal_reg_init() - Build the registers information table
+ *  See ipahal_reg_objs[][] comments
+ *
+ * Note: As global variables are initialized with zero, any un-overridden
+ *  register entry will be zero. By this we recognize them.
+ */
+int ipahal_reg_init(enum ipa_hw_type ipa_hw_type)
+{
+	int i;
+	int j;
+	struct ipahal_reg_obj zero_obj;
+
+	IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+	if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
+		IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+		return -EINVAL;
+	}
+
+	memset(&zero_obj, 0, sizeof(zero_obj));
+	for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+		for (j = 0; j < IPA_REG_MAX ; j++) {
+			if (!memcmp(&ipahal_reg_objs[i+1][j], &zero_obj,
+				sizeof(struct ipahal_reg_obj))) {
+				memcpy(&ipahal_reg_objs[i+1][j],
+					&ipahal_reg_objs[i][j],
+					sizeof(struct ipahal_reg_obj));
+			} else {
+				/*
+				 * explicitly overridden register.
+				 * Check validity
+				 */
+				if (!ipahal_reg_objs[i+1][j].offset) {
+					IPAHAL_ERR(
+					  "reg=%s with zero offset ipa_ver=%d\n",
+					  ipahal_reg_name_str(j), i+1);
+					WARN_ON(1);
+				}
+				if (!ipahal_reg_objs[i+1][j].construct) {
+					IPAHAL_ERR(
+					  "reg=%s with NULL construct func ipa_ver=%d\n",
+					  ipahal_reg_name_str(j), i+1);
+					WARN_ON(1);
+				}
+				if (!ipahal_reg_objs[i+1][j].parse) {
+					IPAHAL_ERR(
+					  "reg=%s with NULL parse func ipa_ver=%d\n",
+					  ipahal_reg_name_str(j), i+1);
+					WARN_ON(1);
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * ipahal_reg_name_str() - returns string that represent the register
+ * @reg_name: [in] register name
+ */
+const char *ipahal_reg_name_str(enum ipahal_reg_name reg_name)
+{
+	if (reg_name < 0 || reg_name >= IPA_REG_MAX) {
+		IPAHAL_ERR("requested name of invalid reg=%d\n", reg_name);
+		return "Invalid Register";
+	}
+
+	return ipareg_name_to_str[reg_name];
+}
+
+/*
+ * ipahal_read_reg_n() - Get n parameterized reg value
+ */
+u32 ipahal_read_reg_n(enum ipahal_reg_name reg, u32 n)
+{
+	u32 offset;
+
+	if (reg >= IPA_REG_MAX) {
+		IPAHAL_ERR("Invalid register reg=%u\n", reg);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	IPAHAL_DBG_LOW("read from %s n=%u\n",
+		ipahal_reg_name_str(reg), n);
+
+	offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+	if (offset == -1) {
+		IPAHAL_ERR("Read access to obsolete reg=%s\n",
+			ipahal_reg_name_str(reg));
+		WARN_ON(1);
+		return -EFAULT;
+	}
+	offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+	return ioread32(ipahal_ctx->base + offset);
+}
+
+/*
+ * ipahal_write_reg_mn() - Write to m/n parameterized reg a raw value
+ */
+void ipahal_write_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n, u32 val)
+{
+	u32 offset;
+
+	if (reg >= IPA_REG_MAX) {
+		IPAHAL_ERR("Invalid register reg=%u\n", reg);
+		return;
+	}
+
+	IPAHAL_DBG_LOW("write to %s m=%u n=%u val=%u\n",
+		ipahal_reg_name_str(reg), m, n, val);
+	offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+	if (offset == -1) {
+		IPAHAL_ERR("Write access to obsolete reg=%s\n",
+			ipahal_reg_name_str(reg));
+		WARN_ON(1);
+		return;
+	}
+	/*
+	 * Currently there is one register with m and n parameters
+	 *	IPA_UC_MAILBOX_m_n. The m value of it is 0x80.
+	 * If more such registers will be added in the future,
+	 *	we can move the m parameter to the table above.
+	 */
+	offset +=  0x80 * m;
+	offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+	iowrite32(val, ipahal_ctx->base + offset);
+}
+
+/*
+ * ipahal_read_reg_n_fields() - Get the parsed value of n parameterized reg
+ */
+u32 ipahal_read_reg_n_fields(enum ipahal_reg_name reg, u32 n, void *fields)
+{
+	u32 val = 0;
+	u32 offset;
+
+	if (!fields) {
+		IPAHAL_ERR("Input error fields=%p\n", fields);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	if (reg >= IPA_REG_MAX) {
+		IPAHAL_ERR("Invalid register reg=%u\n", reg);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	IPAHAL_DBG_LOW("read from %s n=%u and parse it\n",
+		ipahal_reg_name_str(reg), n);
+	offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+	if (offset == -1) {
+		IPAHAL_ERR("Read access to obsolete reg=%s\n",
+			ipahal_reg_name_str(reg));
+		WARN_ON(1);
+		return -EFAULT;
+	}
+	offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+	val = ioread32(ipahal_ctx->base + offset);
+	ipahal_reg_objs[ipahal_ctx->hw_type][reg].parse(reg, fields, val);
+
+	return val;
+}
+
+/*
+ * ipahal_write_reg_n_fields() - Write to n parameterized reg a prased value
+ */
+void ipahal_write_reg_n_fields(enum ipahal_reg_name reg, u32 n,
+		const void *fields)
+{
+	u32 val = 0;
+	u32 offset;
+
+	if (!fields) {
+		IPAHAL_ERR("Input error fields=%p\n", fields);
+		return;
+	}
+
+	if (reg >= IPA_REG_MAX) {
+		IPAHAL_ERR("Invalid register reg=%u\n", reg);
+		return;
+	}
+
+	IPAHAL_DBG_LOW("write to %s n=%u after constructing it\n",
+		ipahal_reg_name_str(reg), n);
+	offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+	if (offset == -1) {
+		IPAHAL_ERR("Write access to obsolete reg=%s\n",
+			ipahal_reg_name_str(reg));
+		WARN_ON(1);
+		return;
+	}
+	offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+	ipahal_reg_objs[ipahal_ctx->hw_type][reg].construct(reg, fields, &val);
+
+	iowrite32(val, ipahal_ctx->base + offset);
+}
+
+/*
+ * Get the offset of a m/n parameterized register
+ */
+u32 ipahal_get_reg_mn_ofst(enum ipahal_reg_name reg, u32 m, u32 n)
+{
+	u32 offset;
+
+	if (reg >= IPA_REG_MAX) {
+		IPAHAL_ERR("Invalid register reg=%u\n", reg);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	IPAHAL_DBG_LOW("get offset of %s m=%u n=%u\n",
+		ipahal_reg_name_str(reg), m, n);
+	offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+	if (offset == -1) {
+		IPAHAL_ERR("Access to obsolete reg=%s\n",
+			ipahal_reg_name_str(reg));
+		WARN_ON(1);
+		return -EFAULT;
+	}
+	/*
+	 * Currently there is one register with m and n parameters
+	 *	IPA_UC_MAILBOX_m_n. The m value of it is 0x80.
+	 * If more such registers will be added in the future,
+	 *	we can move the m parameter to the table above.
+	 */
+	offset +=  0x80 * m;
+	offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+
+	return offset;
+}
+
+u32 ipahal_get_reg_base(void)
+{
+	return 0x00040000;
+}
+
+
+/*
+ * Specific functions
+ * These functions supply specific register values for specific operations
+ *  that cannot be reached by generic functions.
+ * E.g. To disable aggregation, need to write to specific bits of the AGGR
+ *  register. The other bits should be untouched. This oeprate is very specific
+ *  and cannot be generically defined. For such operations we define these
+ *  specific functions.
+ */
+
+void ipahal_get_disable_aggr_valmask(struct ipahal_reg_valmask *valmask)
+{
+	if (!valmask) {
+		IPAHAL_ERR("Input error\n");
+		return;
+	}
+
+	valmask->val = (1 & IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK) <<
+		IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
+	valmask->mask = IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK <<
+		IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
+
+	valmask->val |= ((0 & IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK) <<
+		IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT);
+	valmask->mask |= ((IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK <<
+		IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT));
+}
+
+u32 ipahal_aggr_get_max_byte_limit(void)
+{
+	return
+		IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK >>
+		IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT;
+}
+
+u32 ipahal_aggr_get_max_pkt_limit(void)
+{
+	return
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >>
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT;
+}
+
+void ipahal_get_aggr_force_close_valmask(int ep_idx,
+	struct ipahal_reg_valmask *valmask)
+{
+	u32 shft;
+	u32 bmsk;
+
+	if (!valmask) {
+		IPAHAL_ERR("Input error\n");
+		return;
+	}
+
+	if (ipahal_ctx->hw_type <= IPA_HW_v3_1) {
+		shft = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT;
+		bmsk = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK;
+	} else {
+		shft =
+		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V3_5;
+		bmsk =
+		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5;
+	}
+
+	IPA_SETFIELD_IN_REG(valmask->val, 1 << ep_idx, shft, bmsk);
+	valmask->mask = bmsk << shft;
+}
+
+void ipahal_get_fltrt_hash_flush_valmask(
+	struct ipahal_reg_fltrt_hash_flush *flush,
+	struct ipahal_reg_valmask *valmask)
+{
+	if (!flush || !valmask) {
+		IPAHAL_ERR("Input error: flush=%p ; valmask=%p\n",
+			flush, valmask);
+		return;
+	}
+
+	memset(valmask, 0, sizeof(struct ipahal_reg_valmask));
+
+	if (flush->v6_rt)
+		valmask->val |=
+			(1<<IPA_FILT_ROUT_HASH_FLUSH_IPv6_ROUT_SHFT);
+	if (flush->v6_flt)
+		valmask->val |=
+			(1<<IPA_FILT_ROUT_HASH_FLUSH_IPv6_FILT_SHFT);
+	if (flush->v4_rt)
+		valmask->val |=
+			(1<<IPA_FILT_ROUT_HASH_FLUSH_IPv4_ROUT_SHFT);
+	if (flush->v4_flt)
+		valmask->val |=
+			(1<<IPA_FILT_ROUT_HASH_FLUSH_IPv4_FILT_SHFT);
+
+	valmask->mask = valmask->val;
+}
+
+void ipahal_get_status_ep_valmask(int pipe_num,
+	struct ipahal_reg_valmask *valmask)
+{
+	if (!valmask) {
+		IPAHAL_ERR("Input error\n");
+		return;
+	}
+
+	valmask->val =
+		(pipe_num & IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK) <<
+		IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
+
+	valmask->mask =
+		IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK <<
+		IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
new file mode 100644
index 0000000..8fb9040
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -0,0 +1,449 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_REG_H_
+#define _IPAHAL_REG_H_
+
+#include <linux/ipa.h>
+
+/*
+ * Registers names
+ *
+ * NOTE:: Any change to this enum, need to change to ipareg_name_to_str
+ *	array as well.
+ */
+enum ipahal_reg_name {
+	IPA_ROUTE,
+	IPA_IRQ_STTS_EE_n,
+	IPA_IRQ_EN_EE_n,
+	IPA_IRQ_CLR_EE_n,
+	IPA_IRQ_SUSPEND_INFO_EE_n,
+	IPA_SUSPEND_IRQ_EN_EE_n,
+	IPA_SUSPEND_IRQ_CLR_EE_n,
+	IPA_BCR,
+	IPA_ENABLED_PIPES,
+	IPA_COMP_SW_RESET,
+	IPA_VERSION,
+	IPA_TAG_TIMER,
+	IPA_COMP_HW_VERSION,
+	IPA_SPARE_REG_1,
+	IPA_SPARE_REG_2,
+	IPA_COMP_CFG,
+	IPA_STATE_AGGR_ACTIVE,
+	IPA_ENDP_INIT_HDR_n,
+	IPA_ENDP_INIT_HDR_EXT_n,
+	IPA_ENDP_INIT_AGGR_n,
+	IPA_AGGR_FORCE_CLOSE,
+	IPA_ENDP_INIT_ROUTE_n,
+	IPA_ENDP_INIT_MODE_n,
+	IPA_ENDP_INIT_NAT_n,
+	IPA_ENDP_INIT_CTRL_n,
+	IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+	IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
+	IPA_ENDP_INIT_DEAGGR_n,
+	IPA_ENDP_INIT_SEQ_n,
+	IPA_DEBUG_CNT_REG_n,
+	IPA_ENDP_INIT_CFG_n,
+	IPA_IRQ_EE_UC_n,
+	IPA_ENDP_INIT_HDR_METADATA_MASK_n,
+	IPA_ENDP_INIT_HDR_METADATA_n,
+	IPA_ENDP_INIT_RSRC_GRP_n,
+	IPA_SHARED_MEM_SIZE,
+	IPA_SRAM_DIRECT_ACCESS_n,
+	IPA_DEBUG_CNT_CTRL_n,
+	IPA_UC_MAILBOX_m_n,
+	IPA_FILT_ROUT_HASH_FLUSH,
+	IPA_SINGLE_NDP_MODE,
+	IPA_QCNCM,
+	IPA_SYS_PKT_PROC_CNTXT_BASE,
+	IPA_LOCAL_PKT_PROC_CNTXT_BASE,
+	IPA_ENDP_STATUS_n,
+	IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+	IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+	IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+	IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n,
+	IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n,
+	IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+	IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+	IPA_DST_RSRC_GRP_45_RSRC_TYPE_n,
+	IPA_DST_RSRC_GRP_67_RSRC_TYPE_n,
+	IPA_RX_HPS_CLIENTS_MIN_DEPTH_0,
+	IPA_RX_HPS_CLIENTS_MIN_DEPTH_1,
+	IPA_RX_HPS_CLIENTS_MAX_DEPTH_0,
+	IPA_RX_HPS_CLIENTS_MAX_DEPTH_1,
+	IPA_QSB_MAX_WRITES,
+	IPA_QSB_MAX_READS,
+	IPA_TX_CFG,
+	IPA_REG_MAX,
+};
+
+/*
+ * struct ipahal_reg_route - IPA route register
+ * @route_dis: route disable
+ * @route_def_pipe: route default pipe
+ * @route_def_hdr_table: route default header table
+ * @route_def_hdr_ofst: route default header offset table
+ * @route_frag_def_pipe: Default pipe to route fragmented exception
+ *    packets and frag new rule statues, if source pipe does not have
+ *    a notification status pipe defined.
+ * @route_def_retain_hdr: default value of retain header. It is used
+ *    when no rule was hit
+ */
+struct ipahal_reg_route {
+	u32 route_dis;
+	u32 route_def_pipe;
+	u32 route_def_hdr_table;
+	u32 route_def_hdr_ofst;
+	u8  route_frag_def_pipe;
+	u32 route_def_retain_hdr;
+};
+
+/*
+ * struct ipahal_reg_endp_init_route - IPA ENDP_INIT_ROUTE_n register
+ * @route_table_index: Default index of routing table (IPA Consumer).
+ */
+struct ipahal_reg_endp_init_route {
+	u32 route_table_index;
+};
+
+/*
+ * struct ipahal_reg_endp_init_rsrc_grp - IPA_ENDP_INIT_RSRC_GRP_n register
+ * @rsrc_grp: Index of group for this ENDP. If this ENDP is a source-ENDP,
+ *	index is for source-resource-group. If destination ENPD, index is
+ *	for destination-resoruce-group.
+ */
+struct ipahal_reg_endp_init_rsrc_grp {
+	u32 rsrc_grp;
+};
+
+/*
+ * struct ipahal_reg_endp_init_mode - IPA ENDP_INIT_MODE_n register
+ * @dst_pipe_number: This parameter specifies destination output-pipe-packets
+ *	will be routed to. Valid for DMA mode only and for Input
+ *	Pipes only (IPA Consumer)
+ */
+struct ipahal_reg_endp_init_mode {
+	u32 dst_pipe_number;
+	struct ipa_ep_cfg_mode ep_mode;
+};
+
+/*
+ * struct ipahal_reg_shared_mem_size - IPA SHARED_MEM_SIZE register
+ * @shared_mem_sz: Available size [in 8Bytes] of SW partition within
+ *	IPA shared memory.
+ * @shared_mem_baddr: Offset of SW partition within IPA
+ *	shared memory[in 8Bytes]. To get absolute address of SW partition,
+ *	add this offset to IPA_SRAM_DIRECT_ACCESS_n baddr.
+ */
+struct ipahal_reg_shared_mem_size {
+	u32 shared_mem_sz;
+	u32 shared_mem_baddr;
+};
+
+/*
+ * struct ipahal_reg_ep_cfg_status - status configuration in IPA end-point
+ * @status_en: Determines if end point supports Status Indications. SW should
+ *	set this bit in order to enable Statuses. Output Pipe - send
+ *	Status indications only if bit is set. Input Pipe - forward Status
+ *	indication to STATUS_ENDP only if bit is set. Valid for Input
+ *	and Output Pipes (IPA Consumer and Producer)
+ * @status_ep: Statuses generated for this endpoint will be forwarded to the
+ *	specified Status End Point. Status endpoint needs to be
+ *	configured with STATUS_EN=1 Valid only for Input Pipes (IPA
+ *	Consumer)
+ * @status_location: Location of PKT-STATUS on destination pipe.
+ *	If set to 0 (default), PKT-STATUS will be appended before the packet
+ *	for this endpoint. If set to 1, PKT-STATUS will be appended after the
+ *	packet for this endpoint. Valid only for Output Pipes (IPA Producer)
+ */
+struct ipahal_reg_ep_cfg_status {
+	bool status_en;
+	u8 status_ep;
+	bool status_location;
+};
+
+/*
+ * struct ipa_hash_tuple - Hash tuple members for flt and rt
+ *  the fields tells if to be masked or not
+ * @src_id: pipe number for flt, table index for rt
+ * @src_ip_addr: IP source address
+ * @dst_ip_addr: IP destination address
+ * @src_port: L4 source port
+ * @dst_port: L4 destination port
+ * @protocol: IP protocol field
+ * @meta_data: packet meta-data
+ *
+ */
+struct ipahal_reg_hash_tuple {
+	/* src_id: pipe in flt, tbl index in rt */
+	bool src_id;
+	bool src_ip_addr;
+	bool dst_ip_addr;
+	bool src_port;
+	bool dst_port;
+	bool protocol;
+	bool meta_data;
+};
+
+/*
+ * struct ipahal_reg_fltrt_hash_tuple - IPA hash tuple register
+ * @flt: Hash tuple info for filtering
+ * @rt: Hash tuple info for routing
+ * @undefinedX: Undefined/Unused bit fields set of the register
+ */
+struct ipahal_reg_fltrt_hash_tuple {
+	struct ipahal_reg_hash_tuple flt;
+	struct ipahal_reg_hash_tuple rt;
+	u32 undefined1;
+	u32 undefined2;
+};
+
+/*
+ * enum ipahal_reg_dbg_cnt_type - Debug Counter Type
+ * DBG_CNT_TYPE_IPV4_FLTR - Count IPv4 filtering rules
+ * DBG_CNT_TYPE_IPV4_ROUT - Count IPv4 routing rules
+ * DBG_CNT_TYPE_GENERAL - General counter
+ * DBG_CNT_TYPE_IPV6_FLTR - Count IPv6 filtering rules
+ * DBG_CNT_TYPE_IPV4_ROUT - Count IPv6 routing rules
+ */
+enum ipahal_reg_dbg_cnt_type {
+	DBG_CNT_TYPE_IPV4_FLTR,
+	DBG_CNT_TYPE_IPV4_ROUT,
+	DBG_CNT_TYPE_GENERAL,
+	DBG_CNT_TYPE_IPV6_FLTR,
+	DBG_CNT_TYPE_IPV6_ROUT,
+};
+
+/*
+ * struct ipahal_reg_debug_cnt_ctrl - IPA_DEBUG_CNT_CTRL_n register
+ * @en - Enable debug counter
+ * @type - Type of debugging couting
+ * @product - False->Count Bytes . True->Count #packets
+ * @src_pipe - Specific Pipe to match. If FF, no need to match
+ *	specific pipe
+ * @rule_idx_pipe_rule - Global Rule or Pipe Rule. If pipe, then indicated by
+ *	src_pipe. Starting at IPA V3_5,
+ *	no support on Global Rule. This field will be ignored.
+ * @rule_idx - Rule index. Irrelevant for type General
+ */
+struct ipahal_reg_debug_cnt_ctrl {
+	bool en;
+	enum ipahal_reg_dbg_cnt_type type;
+	bool product;
+	u8 src_pipe;
+	bool rule_idx_pipe_rule;
+	u16 rule_idx;
+};
+
+/*
+ * struct ipahal_reg_rsrc_grp_cfg - Mix/Max values for two rsrc groups
+ * @x_min - first group min value
+ * @x_max - first group max value
+ * @y_min - second group min value
+ * @y_max - second group max value
+ */
+struct ipahal_reg_rsrc_grp_cfg {
+	u32 x_min;
+	u32 x_max;
+	u32 y_min;
+	u32 y_max;
+};
+
+/*
+ * struct ipahal_reg_rx_hps_clients - Min or Max values for RX HPS clients
+ * @client_minmax - Min or Max values. In case of depth 0 the 4 values
+ *	are used. In case of depth 1, only the first 2 values are used
+ */
+struct ipahal_reg_rx_hps_clients {
+	u32 client_minmax[4];
+};
+
+/*
+ * struct ipahal_reg_valmask - holding values and masking for registers
+ *	HAL application may require only value and mask of it for some
+ *	register fields.
+ * @val - The value
+ * @mask - Tha mask of the value
+ */
+struct ipahal_reg_valmask {
+	u32 val;
+	u32 mask;
+};
+
+/*
+ * struct ipahal_reg_fltrt_hash_flush - Flt/Rt flush configuration
+ * @v6_rt - Flush IPv6 Routing cache
+ * @v6_flt - Flush IPv6 Filtering cache
+ * @v4_rt - Flush IPv4 Routing cache
+ * @v4_flt - Flush IPv4 Filtering cache
+ */
+struct ipahal_reg_fltrt_hash_flush {
+	bool v6_rt;
+	bool v6_flt;
+	bool v4_rt;
+	bool v4_flt;
+};
+
+/*
+ * struct ipahal_reg_single_ndp_mode - IPA SINGLE_NDP_MODE register
+ * @single_ndp_en: When set to '1', IPA builds MBIM frames with up to 1
+ *	NDP-header.
+ * @unused: undefined bits of the register
+ */
+struct ipahal_reg_single_ndp_mode {
+	bool single_ndp_en;
+	u32 undefined;
+};
+
+/*
+ * struct ipahal_reg_qcncm - IPA QCNCM register
+ * @mode_en: When QCNCM_MODE_EN=1, IPA will use QCNCM signature.
+ * @mode_val: Used only when QCNCM_MODE_EN=1 and sets SW Signature in
+ *	the NDP header.
+ * @unused: undefined bits of the register
+ */
+struct ipahal_reg_qcncm {
+	bool mode_en;
+	u32 mode_val;
+	u32 undefined;
+};
+
+/*
+ * struct ipahal_reg_tx_cfg - IPA TX_CFG register
+ * @tx0_prefetch_disable: Disable prefetch on TX0
+ * @tx1_prefetch_disable: Disable prefetch on TX1
+ * @prefetch_almost_empty_size: Prefetch almost empty size
+ */
+struct ipahal_reg_tx_cfg {
+	bool tx0_prefetch_disable;
+	bool tx1_prefetch_disable;
+	u16 prefetch_almost_empty_size;
+};
+
+/*
+ * ipahal_reg_name_str() - returns string that represent the register
+ * @reg_name: [in] register name
+ */
+const char *ipahal_reg_name_str(enum ipahal_reg_name reg_name);
+
+/*
+ * ipahal_read_reg_n() - Get the raw value of n parameterized reg
+ */
+u32 ipahal_read_reg_n(enum ipahal_reg_name reg, u32 n);
+
+/*
+ * ipahal_write_reg_mn() - Write to m/n parameterized reg a raw value
+ */
+void ipahal_write_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n, u32 val);
+
+/*
+ * ipahal_write_reg_n() - Write to n parameterized reg a raw value
+ */
+static inline void ipahal_write_reg_n(enum ipahal_reg_name reg,
+	u32 n, u32 val)
+{
+	ipahal_write_reg_mn(reg, 0, n, val);
+}
+
+/*
+ * ipahal_read_reg_n_fields() - Get the parsed value of n parameterized reg
+ */
+u32 ipahal_read_reg_n_fields(enum ipahal_reg_name reg, u32 n, void *fields);
+
+/*
+ * ipahal_write_reg_n_fields() - Write to n parameterized reg a prased value
+ */
+void ipahal_write_reg_n_fields(enum ipahal_reg_name reg, u32 n,
+	const void *fields);
+
+/*
+ * ipahal_read_reg() - Get the raw value of a reg
+ */
+static inline u32 ipahal_read_reg(enum ipahal_reg_name reg)
+{
+	return ipahal_read_reg_n(reg, 0);
+}
+
+/*
+ * ipahal_write_reg() - Write to reg a raw value
+ */
+static inline void ipahal_write_reg(enum ipahal_reg_name reg,
+	u32 val)
+{
+	ipahal_write_reg_mn(reg, 0, 0, val);
+}
+
+/*
+ * ipahal_read_reg_fields() - Get the parsed value of a reg
+ */
+static inline u32 ipahal_read_reg_fields(enum ipahal_reg_name reg, void *fields)
+{
+	return ipahal_read_reg_n_fields(reg, 0, fields);
+}
+
+/*
+ * ipahal_write_reg_fields() - Write to reg a parsed value
+ */
+static inline void ipahal_write_reg_fields(enum ipahal_reg_name reg,
+	const void *fields)
+{
+	ipahal_write_reg_n_fields(reg, 0, fields);
+}
+
+/*
+ * Get the offset of a m/n parameterized register
+ */
+u32 ipahal_get_reg_mn_ofst(enum ipahal_reg_name reg, u32 m, u32 n);
+
+/*
+ * Get the offset of a n parameterized register
+ */
+static inline u32 ipahal_get_reg_n_ofst(enum ipahal_reg_name reg, u32 n)
+{
+	return ipahal_get_reg_mn_ofst(reg, 0, n);
+}
+
+/*
+ * Get the offset of a register
+ */
+static inline u32 ipahal_get_reg_ofst(enum ipahal_reg_name reg)
+{
+	return ipahal_get_reg_mn_ofst(reg, 0, 0);
+}
+
+/*
+ * Get the register base address
+ */
+u32 ipahal_get_reg_base(void);
+
+/*
+ * Specific functions
+ * These functions supply specific register values for specific operations
+ *  that cannot be reached by generic functions.
+ * E.g. To disable aggregation, need to write to specific bits of the AGGR
+ *  register. The other bits should be untouched. This oeprate is very specific
+ *  and cannot be generically defined. For such operations we define these
+ *  specific functions.
+ */
+void ipahal_get_disable_aggr_valmask(struct ipahal_reg_valmask *valmask);
+u32 ipahal_aggr_get_max_byte_limit(void);
+u32 ipahal_aggr_get_max_pkt_limit(void);
+void ipahal_get_aggr_force_close_valmask(int ep_idx,
+	struct ipahal_reg_valmask *valmask);
+void ipahal_get_fltrt_hash_flush_valmask(
+	struct ipahal_reg_fltrt_hash_flush *flush,
+	struct ipahal_reg_valmask *valmask);
+void ipahal_get_status_ep_valmask(int pipe_num,
+	struct ipahal_reg_valmask *valmask);
+
+#endif /* _IPAHAL_REG_H_ */
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
new file mode 100644
index 0000000..1606a2f
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
@@ -0,0 +1,315 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_REG_I_H_
+#define _IPAHAL_REG_I_H_
+
+int ipahal_reg_init(enum ipa_hw_type ipa_hw_type);
+
+#define IPA_SETFIELD(val, shift, mask) (((val) << (shift)) & (mask))
+#define IPA_SETFIELD_IN_REG(reg, val, shift, mask) \
+			(reg |= ((val) << (shift)) & (mask))
+#define IPA_GETFIELD_FROM_REG(reg, shift, mask) \
+		(((reg) & (mask)) >> (shift))
+
+
+/* IPA_ROUTE register */
+#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0
+#define IPA_ROUTE_ROUTE_DIS_BMSK 0x1
+#define IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1
+#define IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0X40
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80
+#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK 0x3e0000
+#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT 0x11
+#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK  0x1000000
+#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT 0x18
+
+/* IPA_ENDP_INIT_HDR_n register */
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK 0x3f
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT 0x0
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK 0x40
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT 0x6
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT 0x7
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK 0x1f80
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK 0x3f00000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT 0x14
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK 0x4000000
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT 0x1a
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK_v2 0x8000000
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT_v2 0x1b
+#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_BMSK_v2 0x10000000
+#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_SHFT_v2 0x1c
+
+/* IPA_ENDP_INIT_HDR_EXT_n register */
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK 0x1
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT 0x0
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK 0x2
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT 0x1
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK 0x4
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT 0x2
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK 0x8
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT 0x3
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK 0x3f0
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT 0x4
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT 0xa
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v3_0 0x3c00
+
+/* IPA_ENDP_INIT_AGGR_N register */
+#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK	0x1000000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT	0x18
+#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK 0x400000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT 0x16
+#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK	0x200000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT	0x15
+#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK 0x1f8000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT 0xf
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK 0x7c00
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT 0xa
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK 0x3e0
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT 0x5
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK 0x1c
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT 0x2
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK 0x3
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT 0x0
+
+/* IPA_AGGR_FORCE_CLOSE register */
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK 0x3fffffff
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT 0
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5 0xfffff
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V3_5 0
+
+/* IPA_ENDP_INIT_ROUTE_n register */
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK 0x1f
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT 0x0
+
+/* IPA_ENDP_INIT_MODE_n register */
+#define IPA_ENDP_INIT_MODE_n_HDR_FTCH_DISABLE_BMSK 0x40000000
+#define IPA_ENDP_INIT_MODE_n_HDR_FTCH_DISABLE_SHFT 0x1e
+#define IPA_ENDP_INIT_MODE_n_PAD_EN_BMSK 0x20000000
+#define IPA_ENDP_INIT_MODE_n_PAD_EN_SHFT 0x1d
+#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_BMSK 0x10000000
+#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_SHFT 0x1c
+#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_BMSK 0xffff000
+#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_SHFT 0xc
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK 0x1f0
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT 0x4
+#define IPA_ENDP_INIT_MODE_n_MODE_BMSK 0x7
+#define IPA_ENDP_INIT_MODE_n_MODE_SHFT 0x0
+
+/* IPA_ENDP_INIT_NAT_n register */
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK 0x3
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT 0x0
+
+/* IPA_ENDP_INIT_CTRL_n register */
+#define IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK 0x1
+#define IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT 0x0
+#define IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK 0x2
+#define IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT 0x1
+
+/* IPA_ENDP_INIT_HOL_BLOCK_EN_n register */
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_RMSK 0x1
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_MAX 19
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK 0x1
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT 0x0
+
+/* IPA_ENDP_INIT_HOL_BLOCK_TIMER_n register */
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_BMSK 0xffffffff
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_SHFT 0x0
+
+/* IPA_ENDP_INIT_DEAGGR_n register */
+#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK 0xFFFF0000
+#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT 0x10
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK 0x3F00
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT 0x8
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK  0x80
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT 0x7
+#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK 0x3F
+#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT 0x0
+
+/* IPA_IPA_ENDP_INIT_SEQ_n register */
+#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_BMSK 0xf000
+#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_SHFT 0xc
+#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_BMSK 0xf00
+#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_SHFT 0x8
+#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_BMSK 0xf0
+#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_SHFT 0x4
+#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_BMSK 0xf
+#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_SHFT 0x0
+
+/* IPA_DEBUG_CNT_REG_m register */
+#define IPA_DEBUG_CNT_REG_N_RMSK 0xffffffff
+#define IPA_DEBUG_CNT_REG_N_MAX 15
+#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_BMSK 0xffffffff
+#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_SHFT 0x0
+
+/* IPA_ENDP_INIT_CFG_n register */
+#define IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_BMSK 0x100
+#define IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_SHFT 0x8
+#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK 0x78
+#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT 0x3
+#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK 0x6
+#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT 0x1
+#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK 0x1
+#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT 0x0
+
+/* IPA_ENDP_INIT_HDR_METADATA_MASK_n register */
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK 0xffffffff
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT 0x0
+
+/* IPA_IPA_ENDP_INIT_HDR_METADATA_n register */
+#define IPA_ENDP_INIT_HDR_METADATA_n_METADATA_BMSK 0xffffffff
+#define IPA_ENDP_INIT_HDR_METADATA_n_METADATA_SHFT 0x0
+
+/* IPA_ENDP_INIT_RSRC_GRP_n register */
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK 0x7
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT 0
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v3_5 0x3
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v3_5 0
+
+/* IPA_SHARED_MEM_SIZE register */
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK 0xffff0000
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT 0x10
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK  0xffff
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT  0x0
+
+/* IPA_DEBUG_CNT_CTRL_n register */
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_BMSK 0x10000000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_SHFT 0x1c
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK 0x0ff00000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK_V3_5 0x1ff00000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT 0x14
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_BMSK 0x1f000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_SHFT 0xc
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_BMSK 0x100
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_SHFT 0x8
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_BMSK 0x70
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_SHFT 0x4
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_BMSK 0x1
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_SHFT 0x0
+
+/* IPA_FILT_ROUT_HASH_FLUSH register */
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_FILT_SHFT 12
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_ROUT_SHFT 8
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_FILT_SHFT 4
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_ROUT_SHFT 0
+
+/* IPA_SINGLE_NDP_MODE register */
+#define IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK 0xfffffffe
+#define IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT 0x1
+#define IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK 0x1
+#define IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT 0
+
+/* IPA_QCNCM register */
+#define IPA_QCNCM_MODE_UNDEFINED2_BMSK 0xf0000000
+#define IPA_QCNCM_MODE_UNDEFINED2_SHFT 0x1c
+#define IPA_QCNCM_MODE_VAL_BMSK 0xffffff0
+#define IPA_QCNCM_MODE_VAL_SHFT 0x4
+#define IPA_QCNCM_UNDEFINED1_BMSK 0xe
+#define IPA_QCNCM_UNDEFINED1_SHFT 0x1
+#define IPA_QCNCM_MODE_EN_BMSK 0x1
+#define IPA_QCNCM_MODE_EN_SHFT 0
+
+/* IPA_ENDP_STATUS_n register */
+#define IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK 0x100
+#define IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT 0x8
+#define IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK 0x3e
+#define IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT 0x1
+#define IPA_ENDP_STATUS_n_STATUS_EN_BMSK 0x1
+#define IPA_ENDP_STATUS_n_STATUS_EN_SHFT 0x0
+
+/* IPA_ENDP_FILTER_ROUTER_HSH_CFG_n register */
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT 0
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK 0x1
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT 1
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK 0x2
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT 2
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK 0x4
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT 3
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK 0x8
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT 4
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK 0x10
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT 5
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK 0x20
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT 6
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK 0x40
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT 7
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK 0xff80
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT 16
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK 0x10000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT 17
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK 0x20000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT 18
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK 0x40000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT 19
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK 0x80000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT 20
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK 0x100000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT 21
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK 0x200000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT 22
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK 0x400000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT 23
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK 0xff800000
+
+/* IPA_RSRC_GRP_XY_RSRC_TYPE_n register */
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK 0xFF000000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT 24
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK 0xFF0000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT 16
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK 0xFF00
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT 8
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK 0xFF
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT 0
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK_V3_5 0x3F000000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT_V3_5 24
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK_V3_5 0x3F0000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT_V3_5 16
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK_V3_5 0x3F00
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT_V3_5 8
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5 0x3F
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5 0
+
+
+/* IPA_IPA_IPA_RX_HPS_CLIENTS_MIN/MAX_DEPTH_0/1 registers */
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(n) (0x7F << (8 * (n)))
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(n) \
+						(0xF << (8 * (n)))
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(n) (8 * (n))
+
+/* IPA_QSB_MAX_WRITES register */
+#define IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_BMSK (0xf)
+#define IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_SHFT (0)
+#define IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_BMSK (0xf0)
+#define IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_SHFT (4)
+
+/* IPA_QSB_MAX_READS register */
+#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK (0xf)
+#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT (0)
+#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK (0xf0)
+#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT (4)
+
+/* IPA_TX_CFG register */
+#define IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5 (0x1)
+#define IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5 (0)
+#define IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5 (0x2)
+#define IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5 (1)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5 (0x1C)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5 (2)
+
+#endif /* _IPAHAL_REG_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
new file mode 100644
index 0000000..56ec538
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -0,0 +1,2960 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * WWAN Transport Network Driver.
+ */
+
+#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_device.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <net/pkt_sched.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+#include "ipa_qmi_service.h"
+#include <linux/rmnet_ipa_fd_ioctl.h>
+#include <linux/ipa.h>
+#include <uapi/linux/net_map.h>
+
+#include "ipa_trace.h"
+
+#define WWAN_METADATA_SHFT 24
+#define WWAN_METADATA_MASK 0xFF000000
+#define WWAN_DATA_LEN 2000
+#define IPA_RM_INACTIVITY_TIMER 100 /* IPA_RM */
+#define HEADROOM_FOR_QMAP   8 /* for mux header */
+#define TAILROOM            0 /* for padding by mux layer */
+#define MAX_NUM_OF_MUX_CHANNEL  10 /* max mux channels */
+#define UL_FILTER_RULE_HANDLE_START 69
+#define DEFAULT_OUTSTANDING_HIGH_CTL 96
+#define DEFAULT_OUTSTANDING_HIGH 64
+#define DEFAULT_OUTSTANDING_LOW 32
+
+#define IPA_WWAN_DEV_NAME "rmnet_ipa%d"
+
+#define IPA_WWAN_RX_SOFTIRQ_THRESH 16
+
+#define INVALID_MUX_ID 0xFF
+#define IPA_QUOTA_REACH_ALERT_MAX_SIZE 64
+#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64
+#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
+#define NAPI_WEIGHT 60
+
+#define IPA_NETDEV() \
+	((rmnet_ipa3_ctx && rmnet_ipa3_ctx->wwan_priv) ? \
+	  rmnet_ipa3_ctx->wwan_priv->net : NULL)
+
+
+static int ipa3_wwan_add_ul_flt_rule_to_ipa(void);
+static int ipa3_wwan_del_ul_flt_rule_to_ipa(void);
+static void ipa3_wwan_msg_free_cb(void*, u32, u32);
+static void ipa3_rmnet_rx_cb(void *priv);
+static int ipa3_rmnet_poll(struct napi_struct *napi, int budget);
+
+static void ipa3_wake_tx_queue(struct work_struct *work);
+static DECLARE_WORK(ipa3_tx_wakequeue_work, ipa3_wake_tx_queue);
+
+static void tethering_stats_poll_queue(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa_tether_stats_poll_wakequeue_work,
+			    tethering_stats_poll_queue);
+
+enum ipa3_wwan_device_status {
+	WWAN_DEVICE_INACTIVE = 0,
+	WWAN_DEVICE_ACTIVE   = 1
+};
+
+struct ipa3_rmnet_plat_drv_res {
+	bool ipa_rmnet_ssr;
+	bool ipa_loaduC;
+	bool ipa_advertise_sg_support;
+	bool ipa_napi_enable;
+};
+
+/**
+ * struct ipa3_wwan_private - WWAN private data
+ * @net: network interface struct implemented by this driver
+ * @stats: iface statistics
+ * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed
+ * @outstanding_high: number of outstanding packets allowed
+ * @outstanding_low: number of outstanding packets which shall cause
+ * @ch_id: channel id
+ * @lock: spinlock for mutual exclusion
+ * @device_status: holds device status
+ *
+ * WWAN private - holds all relevant info about WWAN driver
+ */
+struct ipa3_wwan_private {
+	struct net_device *net;
+	struct net_device_stats stats;
+	atomic_t outstanding_pkts;
+	int outstanding_high_ctl;
+	int outstanding_high;
+	int outstanding_low;
+	uint32_t ch_id;
+	spinlock_t lock;
+	struct completion resource_granted_completion;
+	enum ipa3_wwan_device_status device_status;
+	struct napi_struct napi;
+};
+
+struct rmnet_ipa3_context {
+	struct ipa3_wwan_private *wwan_priv;
+	struct ipa_sys_connect_params apps_to_ipa_ep_cfg;
+	struct ipa_sys_connect_params ipa_to_apps_ep_cfg;
+	u32 qmap_hdr_hdl;
+	u32 dflt_v4_wan_rt_hdl;
+	u32 dflt_v6_wan_rt_hdl;
+	struct ipa3_rmnet_mux_val mux_channel[MAX_NUM_OF_MUX_CHANNEL];
+	int num_q6_rules;
+	int old_num_q6_rules;
+	int rmnet_index;
+	bool egress_set;
+	bool a7_ul_flt_set;
+	struct workqueue_struct *rm_q6_wq;
+	atomic_t is_initialized;
+	atomic_t is_ssr;
+	void *subsys_notify_handle;
+	u32 apps_to_ipa3_hdl;
+	u32 ipa3_to_apps_hdl;
+	struct mutex ipa_to_apps_pipe_handle_guard;
+};
+
+static struct rmnet_ipa3_context *rmnet_ipa3_ctx;
+static struct ipa3_rmnet_plat_drv_res ipa3_rmnet_res;
+
+/**
+* ipa3_setup_a7_qmap_hdr() - Setup default a7 qmap hdr
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate memory
+* -EPERM: failed to add the tables
+*/
+static int ipa3_setup_a7_qmap_hdr(void)
+{
+	struct ipa_ioc_add_hdr *hdr;
+	struct ipa_hdr_add *hdr_entry;
+	u32 pyld_sz;
+	int ret;
+
+	/* install the basic exception header */
+	pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
+		      sizeof(struct ipa_hdr_add);
+	hdr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!hdr) {
+		IPAWANERR("fail to alloc exception hdr\n");
+		return -ENOMEM;
+	}
+	hdr->num_hdrs = 1;
+	hdr->commit = 1;
+	hdr_entry = &hdr->hdr[0];
+
+	strlcpy(hdr_entry->name, IPA_A7_QMAP_HDR_NAME,
+				IPA_RESOURCE_NAME_MAX);
+	hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
+
+	if (ipa3_add_hdr(hdr)) {
+		IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	if (hdr_entry->status) {
+		IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+	rmnet_ipa3_ctx->qmap_hdr_hdl = hdr_entry->hdr_hdl;
+
+	ret = 0;
+bail:
+	kfree(hdr);
+	return ret;
+}
+
+static void ipa3_del_a7_qmap_hdr(void)
+{
+	struct ipa_ioc_del_hdr *del_hdr;
+	struct ipa_hdr_del *hdl_entry;
+	u32 pyld_sz;
+	int ret;
+
+	pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
+		      sizeof(struct ipa_hdr_del);
+	del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!del_hdr) {
+		IPAWANERR("fail to alloc exception hdr_del\n");
+		return;
+	}
+
+	del_hdr->commit = 1;
+	del_hdr->num_hdls = 1;
+	hdl_entry = &del_hdr->hdl[0];
+	hdl_entry->hdl = rmnet_ipa3_ctx->qmap_hdr_hdl;
+
+	ret = ipa3_del_hdr(del_hdr);
+	if (ret || hdl_entry->status)
+		IPAWANERR("ipa3_del_hdr failed\n");
+	else
+		IPAWANDBG("hdrs deletion done\n");
+
+	rmnet_ipa3_ctx->qmap_hdr_hdl = 0;
+	kfree(del_hdr);
+}
+
+static void ipa3_del_qmap_hdr(uint32_t hdr_hdl)
+{
+	struct ipa_ioc_del_hdr *del_hdr;
+	struct ipa_hdr_del *hdl_entry;
+	u32 pyld_sz;
+	int ret;
+
+	if (hdr_hdl == 0) {
+		IPAWANERR("Invalid hdr_hdl provided\n");
+		return;
+	}
+
+	pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
+		sizeof(struct ipa_hdr_del);
+	del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!del_hdr) {
+		IPAWANERR("fail to alloc exception hdr_del\n");
+		return;
+	}
+
+	del_hdr->commit = 1;
+	del_hdr->num_hdls = 1;
+	hdl_entry = &del_hdr->hdl[0];
+	hdl_entry->hdl = hdr_hdl;
+
+	ret = ipa3_del_hdr(del_hdr);
+	if (ret || hdl_entry->status)
+		IPAWANERR("ipa3_del_hdr failed\n");
+	else
+		IPAWANDBG("header deletion done\n");
+
+	rmnet_ipa3_ctx->qmap_hdr_hdl = 0;
+	kfree(del_hdr);
+}
+
+static void ipa3_del_mux_qmap_hdrs(void)
+{
+	int index;
+
+	for (index = 0; index < rmnet_ipa3_ctx->rmnet_index; index++) {
+		ipa3_del_qmap_hdr(rmnet_ipa3_ctx->mux_channel[index].hdr_hdl);
+		rmnet_ipa3_ctx->mux_channel[index].hdr_hdl = 0;
+	}
+}
+
+static int ipa3_add_qmap_hdr(uint32_t mux_id, uint32_t *hdr_hdl)
+{
+	struct ipa_ioc_add_hdr *hdr;
+	struct ipa_hdr_add *hdr_entry;
+	char hdr_name[IPA_RESOURCE_NAME_MAX];
+	u32 pyld_sz;
+	int ret;
+
+	pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
+		      sizeof(struct ipa_hdr_add);
+	hdr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!hdr) {
+		IPAWANERR("fail to alloc exception hdr\n");
+		return -ENOMEM;
+	}
+	hdr->num_hdrs = 1;
+	hdr->commit = 1;
+	hdr_entry = &hdr->hdr[0];
+
+	snprintf(hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
+		 A2_MUX_HDR_NAME_V4_PREF,
+		 mux_id);
+	 strlcpy(hdr_entry->name, hdr_name,
+				IPA_RESOURCE_NAME_MAX);
+
+	hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
+	hdr_entry->hdr[1] = (uint8_t) mux_id;
+	IPAWANDBG("header (%s) with mux-id: (%d)\n",
+		hdr_name,
+		hdr_entry->hdr[1]);
+	if (ipa3_add_hdr(hdr)) {
+		IPAWANERR("fail to add IPA_QMAP hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	if (hdr_entry->status) {
+		IPAWANERR("fail to add IPA_QMAP hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	ret = 0;
+	*hdr_hdl = hdr_entry->hdr_hdl;
+bail:
+	kfree(hdr);
+	return ret;
+}
+
+/**
+* ipa3_setup_dflt_wan_rt_tables() - Setup default wan routing tables
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate memory
+* -EPERM: failed to add the tables
+*/
+static int ipa3_setup_dflt_wan_rt_tables(void)
+{
+	struct ipa_ioc_add_rt_rule *rt_rule;
+	struct ipa_rt_rule_add *rt_rule_entry;
+
+	rt_rule =
+	   kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
+			   sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
+	if (!rt_rule) {
+		IPAWANERR("fail to alloc mem\n");
+		return -ENOMEM;
+	}
+	/* setup a default v4 route to point to Apps */
+	rt_rule->num_rules = 1;
+	rt_rule->commit = 1;
+	rt_rule->ip = IPA_IP_v4;
+	strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_WAN_RT_TBL_NAME,
+			IPA_RESOURCE_NAME_MAX);
+
+	rt_rule_entry = &rt_rule->rules[0];
+	rt_rule_entry->at_rear = 1;
+	rt_rule_entry->rule.dst = IPA_CLIENT_APPS_WAN_CONS;
+	rt_rule_entry->rule.hdr_hdl = rmnet_ipa3_ctx->qmap_hdr_hdl;
+
+	if (ipa3_add_rt_rule(rt_rule)) {
+		IPAWANERR("fail to add dflt_wan v4 rule\n");
+		kfree(rt_rule);
+		return -EPERM;
+	}
+
+	IPAWANDBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+	rmnet_ipa3_ctx->dflt_v4_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
+
+	/* setup a default v6 route to point to A5 */
+	rt_rule->ip = IPA_IP_v6;
+	if (ipa3_add_rt_rule(rt_rule)) {
+		IPAWANERR("fail to add dflt_wan v6 rule\n");
+		kfree(rt_rule);
+		return -EPERM;
+	}
+	IPAWANDBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+	rmnet_ipa3_ctx->dflt_v6_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
+
+	kfree(rt_rule);
+	return 0;
+}
+
+static void ipa3_del_dflt_wan_rt_tables(void)
+{
+	struct ipa_ioc_del_rt_rule *rt_rule;
+	struct ipa_rt_rule_del *rt_rule_entry;
+	int len;
+
+	len = sizeof(struct ipa_ioc_del_rt_rule) + 1 *
+			   sizeof(struct ipa_rt_rule_del);
+	rt_rule = kzalloc(len, GFP_KERNEL);
+	if (!rt_rule) {
+		IPAWANERR("unable to allocate memory for del route rule\n");
+		return;
+	}
+
+	memset(rt_rule, 0, len);
+	rt_rule->commit = 1;
+	rt_rule->num_hdls = 1;
+	rt_rule->ip = IPA_IP_v4;
+
+	rt_rule_entry = &rt_rule->hdl[0];
+	rt_rule_entry->status = -1;
+	rt_rule_entry->hdl = rmnet_ipa3_ctx->dflt_v4_wan_rt_hdl;
+
+	IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
+		rt_rule_entry->hdl, IPA_IP_v4);
+	if (ipa3_del_rt_rule(rt_rule) ||
+			(rt_rule_entry->status)) {
+		IPAWANERR("Routing rule deletion failed!\n");
+	}
+
+	rt_rule->ip = IPA_IP_v6;
+	rt_rule_entry->hdl = rmnet_ipa3_ctx->dflt_v6_wan_rt_hdl;
+	IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
+		rt_rule_entry->hdl, IPA_IP_v6);
+	if (ipa3_del_rt_rule(rt_rule) ||
+			(rt_rule_entry->status)) {
+		IPAWANERR("Routing rule deletion failed!\n");
+	}
+
+	kfree(rt_rule);
+}
+
+int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
+		*rule_req)
+{
+	int i, j;
+
+	if (rule_req->filter_spec_ex_list_valid == true) {
+		rmnet_ipa3_ctx->num_q6_rules =
+			rule_req->filter_spec_ex_list_len;
+		IPAWANDBG("Received (%d) install_flt_req\n",
+			rmnet_ipa3_ctx->num_q6_rules);
+	} else {
+		rmnet_ipa3_ctx->num_q6_rules = 0;
+		IPAWANERR("got no UL rules from modem\n");
+		return -EINVAL;
+	}
+
+	/* copy UL filter rules from Modem*/
+	for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
+		/* check if rules overside the cache*/
+		if (i == MAX_NUM_Q6_RULE) {
+			IPAWANERR("Reaching (%d) max cache ",
+				MAX_NUM_Q6_RULE);
+			IPAWANERR(" however total (%d)\n",
+				rmnet_ipa3_ctx->num_q6_rules);
+			goto failure;
+		}
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].ip =
+			rule_req->filter_spec_ex_list[i].ip_type;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].action =
+			rule_req->filter_spec_ex_list[i].filter_action;
+		if (rule_req->filter_spec_ex_list[i].
+			is_routing_table_index_valid == true)
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx =
+			rule_req->filter_spec_ex_list[i].route_table_index;
+		if (rule_req->filter_spec_ex_list[i].is_mux_id_valid == true)
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].mux_id =
+			rule_req->filter_spec_ex_list[i].mux_id;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].rule_id =
+			rule_req->filter_spec_ex_list[i].rule_id;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].is_rule_hashable =
+			rule_req->filter_spec_ex_list[i].is_rule_hashable;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.rule_eq_bitmap =
+			rule_req->filter_spec_ex_list[i].filter_rule.
+			rule_eq_bitmap;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq_present =
+			rule_req->filter_spec_ex_list[i].filter_rule.
+			tos_eq_present;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq =
+			rule_req->filter_spec_ex_list[i].filter_rule.tos_eq;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			protocol_eq_present = rule_req->filter_spec_ex_list[i].
+			filter_rule.protocol_eq_present;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.protocol_eq =
+			rule_req->filter_spec_ex_list[i].filter_rule.
+			protocol_eq;
+
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			num_ihl_offset_range_16 =
+			rule_req->filter_spec_ex_list[i].
+			filter_rule.num_ihl_offset_range_16;
+		for (j = 0; j < ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			num_ihl_offset_range_16; j++) {
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			ihl_offset_range_16[j].offset = rule_req->
+			filter_spec_ex_list[i].filter_rule.
+			ihl_offset_range_16[j].offset;
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			ihl_offset_range_16[j].range_low = rule_req->
+			filter_spec_ex_list[i].filter_rule.
+			ihl_offset_range_16[j].range_low;
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			ihl_offset_range_16[j].range_high = rule_req->
+			filter_spec_ex_list[i].filter_rule.
+			ihl_offset_range_16[j].range_high;
+		}
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_32 =
+			rule_req->filter_spec_ex_list[i].filter_rule.
+			num_offset_meq_32;
+		for (j = 0; j < ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+				num_offset_meq_32; j++) {
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			offset_meq_32[j].offset =
+			rule_req->filter_spec_ex_list[i].
+			filter_rule.offset_meq_32[j].offset;
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			offset_meq_32[j].mask =
+			rule_req->filter_spec_ex_list[i].
+			filter_rule.offset_meq_32[j].mask;
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			offset_meq_32[j].value =
+			rule_req->filter_spec_ex_list[i].
+			filter_rule.offset_meq_32[j].value;
+		}
+
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq_present =
+			rule_req->filter_spec_ex_list[i].
+			filter_rule.tc_eq_present;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq =
+			rule_req->filter_spec_ex_list[i].filter_rule.tc_eq;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq_present =
+			rule_req->filter_spec_ex_list[i].filter_rule.
+			flow_eq_present;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq =
+			rule_req->filter_spec_ex_list[i].filter_rule.flow_eq;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+		ihl_offset_eq_16_present = rule_req->filter_spec_ex_list[i].
+		filter_rule.ihl_offset_eq_16_present;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+		ihl_offset_eq_16.offset = rule_req->filter_spec_ex_list[i].
+		filter_rule.ihl_offset_eq_16.offset;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+		ihl_offset_eq_16.value = rule_req->filter_spec_ex_list[i].
+		filter_rule.ihl_offset_eq_16.value;
+
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+		ihl_offset_eq_32_present = rule_req->filter_spec_ex_list[i].
+		filter_rule.ihl_offset_eq_32_present;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+		ihl_offset_eq_32.offset = rule_req->filter_spec_ex_list[i].
+		filter_rule.ihl_offset_eq_32.offset;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+		ihl_offset_eq_32.value = rule_req->filter_spec_ex_list[i].
+		filter_rule.ihl_offset_eq_32.value;
+
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+		num_ihl_offset_meq_32 = rule_req->filter_spec_ex_list[i].
+		filter_rule.num_ihl_offset_meq_32;
+		for (j = 0; j < ipa3_qmi_ctx->q6_ul_filter_rule[i].
+			eq_attrib.num_ihl_offset_meq_32; j++) {
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+				ihl_offset_meq_32[j].offset = rule_req->
+				filter_spec_ex_list[i].filter_rule.
+				ihl_offset_meq_32[j].offset;
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+				ihl_offset_meq_32[j].mask = rule_req->
+				filter_spec_ex_list[i].filter_rule.
+				ihl_offset_meq_32[j].mask;
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+				ihl_offset_meq_32[j].value = rule_req->
+				filter_spec_ex_list[i].filter_rule.
+				ihl_offset_meq_32[j].value;
+		}
+		ipa3_qmi_ctx->
+			q6_ul_filter_rule[i].eq_attrib.num_offset_meq_128 =
+			rule_req->filter_spec_ex_list[i].filter_rule.
+			num_offset_meq_128;
+		for (j = 0; j < ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			num_offset_meq_128; j++) {
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+				offset_meq_128[j].offset = rule_req->
+				filter_spec_ex_list[i].filter_rule.
+				offset_meq_128[j].offset;
+			memcpy(ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+					offset_meq_128[j].mask,
+					rule_req->filter_spec_ex_list[i].
+					filter_rule.offset_meq_128[j].mask, 16);
+			memcpy(ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+					offset_meq_128[j].value, rule_req->
+					filter_spec_ex_list[i].filter_rule.
+					offset_meq_128[j].value, 16);
+		}
+
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			metadata_meq32_present =
+				rule_req->filter_spec_ex_list[i].
+				filter_rule.metadata_meq32_present;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			metadata_meq32.offset =
+			rule_req->filter_spec_ex_list[i].
+			filter_rule.metadata_meq32.offset;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			metadata_meq32.mask = rule_req->filter_spec_ex_list[i].
+			filter_rule.metadata_meq32.mask;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.metadata_meq32.
+			value = rule_req->filter_spec_ex_list[i].filter_rule.
+			metadata_meq32.value;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			ipv4_frag_eq_present = rule_req->filter_spec_ex_list[i].
+			filter_rule.ipv4_frag_eq_present;
+	}
+
+	if (rule_req->xlat_filter_indices_list_valid) {
+		if (rule_req->xlat_filter_indices_list_len >
+		    rmnet_ipa3_ctx->num_q6_rules) {
+			IPAWANERR("Number of xlat indices is not valid: %d\n",
+					rule_req->xlat_filter_indices_list_len);
+			goto failure;
+		}
+		IPAWANDBG("Receive %d XLAT indices: ",
+				rule_req->xlat_filter_indices_list_len);
+		for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++)
+			IPAWANDBG("%d ", rule_req->xlat_filter_indices_list[i]);
+		IPAWANDBG("\n");
+
+		for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) {
+			if (rule_req->xlat_filter_indices_list[i]
+				>= rmnet_ipa3_ctx->num_q6_rules) {
+				IPAWANERR("Xlat rule idx is wrong: %d\n",
+					rule_req->xlat_filter_indices_list[i]);
+				goto failure;
+			} else {
+				ipa3_qmi_ctx->q6_ul_filter_rule
+				[rule_req->xlat_filter_indices_list[i]]
+				.is_xlat_rule = 1;
+				IPAWANDBG("Rule %d is xlat rule\n",
+					rule_req->xlat_filter_indices_list[i]);
+			}
+		}
+	}
+	goto success;
+
+failure:
+	rmnet_ipa3_ctx->num_q6_rules = 0;
+	memset(ipa3_qmi_ctx->q6_ul_filter_rule, 0,
+		sizeof(ipa3_qmi_ctx->q6_ul_filter_rule));
+	return -EINVAL;
+
+success:
+	return 0;
+}
+
+static int ipa3_wwan_add_ul_flt_rule_to_ipa(void)
+{
+	u32 pyld_sz;
+	int i, retval = 0;
+	struct ipa_ioc_add_flt_rule *param;
+	struct ipa_flt_rule_add flt_rule_entry;
+	struct ipa_fltr_installed_notif_req_msg_v01 *req;
+
+	pyld_sz = sizeof(struct ipa_ioc_add_flt_rule) +
+	   sizeof(struct ipa_flt_rule_add);
+	param = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!param)
+		return -ENOMEM;
+
+	req = (struct ipa_fltr_installed_notif_req_msg_v01 *)
+		kzalloc(sizeof(struct ipa_fltr_installed_notif_req_msg_v01),
+			GFP_KERNEL);
+	if (!req) {
+		kfree(param);
+		return -ENOMEM;
+	}
+
+	param->commit = 1;
+	param->ep = IPA_CLIENT_APPS_LAN_WAN_PROD;
+	param->global = false;
+	param->num_rules = (uint8_t)1;
+
+	for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
+		param->ip = ipa3_qmi_ctx->q6_ul_filter_rule[i].ip;
+		memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_add));
+		flt_rule_entry.at_rear = true;
+		flt_rule_entry.rule.action =
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].action;
+		flt_rule_entry.rule.rt_tbl_idx
+		= ipa3_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx;
+		flt_rule_entry.rule.retain_hdr = true;
+		flt_rule_entry.rule.hashable =
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].is_rule_hashable;
+		flt_rule_entry.rule.rule_id =
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].rule_id;
+
+		/* debug rt-hdl*/
+		IPAWANDBG("install-IPA index(%d),rt-tbl:(%d)\n",
+			i, flt_rule_entry.rule.rt_tbl_idx);
+		flt_rule_entry.rule.eq_attrib_type = true;
+		memcpy(&(flt_rule_entry.rule.eq_attrib),
+			&ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib,
+			sizeof(struct ipa_ipfltri_rule_eq));
+		memcpy(&(param->rules[0]), &flt_rule_entry,
+			sizeof(struct ipa_flt_rule_add));
+		if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
+			retval = -EFAULT;
+			IPAWANERR("add A7 UL filter rule(%d) failed\n", i);
+		} else {
+			/* store the rule handler */
+			ipa3_qmi_ctx->q6_ul_filter_rule_hdl[i] =
+				param->rules[0].flt_rule_hdl;
+		}
+	}
+
+	/* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/
+	req->source_pipe_index =
+		ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
+	req->install_status = QMI_RESULT_SUCCESS_V01;
+	req->rule_id_valid = 1;
+	req->rule_id_len = rmnet_ipa3_ctx->num_q6_rules;
+	for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
+		req->rule_id[i] =
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].rule_id;
+	}
+	if (ipa3_qmi_filter_notify_send(req)) {
+		IPAWANDBG("add filter rule index on A7-RX failed\n");
+		retval = -EFAULT;
+	}
+	rmnet_ipa3_ctx->old_num_q6_rules = rmnet_ipa3_ctx->num_q6_rules;
+	IPAWANDBG("add (%d) filter rule index on A7-RX\n",
+			rmnet_ipa3_ctx->old_num_q6_rules);
+	kfree(param);
+	kfree(req);
+	return retval;
+}
+
+static int ipa3_wwan_del_ul_flt_rule_to_ipa(void)
+{
+	u32 pyld_sz;
+	int i, retval = 0;
+	struct ipa_ioc_del_flt_rule *param;
+	struct ipa_flt_rule_del flt_rule_entry;
+
+	pyld_sz = sizeof(struct ipa_ioc_del_flt_rule) +
+	   sizeof(struct ipa_flt_rule_del);
+	param = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!param) {
+		IPAWANERR("kzalloc failed\n");
+		return -ENOMEM;
+	}
+
+	param->commit = 1;
+	param->num_hdls = (uint8_t) 1;
+
+	for (i = 0; i < rmnet_ipa3_ctx->old_num_q6_rules; i++) {
+		param->ip = ipa3_qmi_ctx->q6_ul_filter_rule[i].ip;
+		memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_del));
+		flt_rule_entry.hdl = ipa3_qmi_ctx->q6_ul_filter_rule_hdl[i];
+		/* debug rt-hdl*/
+		IPAWANDBG("delete-IPA rule index(%d)\n", i);
+		memcpy(&(param->hdl[0]), &flt_rule_entry,
+			sizeof(struct ipa_flt_rule_del));
+		if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
+			IPAWANERR("del A7 UL filter rule(%d) failed\n", i);
+			kfree(param);
+			return -EFAULT;
+		}
+	}
+
+	/* set UL filter-rule add-indication */
+	rmnet_ipa3_ctx->a7_ul_flt_set = false;
+	rmnet_ipa3_ctx->old_num_q6_rules = 0;
+
+	kfree(param);
+	return retval;
+}
+
+static int ipa3_find_mux_channel_index(uint32_t mux_id)
+{
+	int i;
+
+	for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
+		if (mux_id == rmnet_ipa3_ctx->mux_channel[i].mux_id)
+			return i;
+	}
+	return MAX_NUM_OF_MUX_CHANNEL;
+}
+
+static int find_vchannel_name_index(const char *vchannel_name)
+{
+	int i;
+
+	for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
+		if (strcmp(rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
+					vchannel_name) == 0)
+			return i;
+	}
+	return MAX_NUM_OF_MUX_CHANNEL;
+}
+
+static int ipa3_wwan_register_to_ipa(int index)
+{
+	struct ipa_tx_intf tx_properties = {0};
+	struct ipa_ioc_tx_intf_prop tx_ioc_properties[2] = { {0}, {0} };
+	struct ipa_ioc_tx_intf_prop *tx_ipv4_property;
+	struct ipa_ioc_tx_intf_prop *tx_ipv6_property;
+	struct ipa_rx_intf rx_properties = {0};
+	struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
+	struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
+	struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
+	struct ipa_ext_intf ext_properties = {0};
+	struct ipa_ioc_ext_intf_prop *ext_ioc_properties;
+	u32 pyld_sz;
+	int ret = 0, i;
+
+	IPAWANDBG("index(%d) device[%s]:\n", index,
+		rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
+	if (!rmnet_ipa3_ctx->mux_channel[index].mux_hdr_set) {
+		ret = ipa3_add_qmap_hdr(
+			rmnet_ipa3_ctx->mux_channel[index].mux_id,
+			&rmnet_ipa3_ctx->mux_channel[index].hdr_hdl);
+		if (ret) {
+			IPAWANERR("ipa_add_mux_hdr failed (%d)\n", index);
+			return ret;
+		}
+		rmnet_ipa3_ctx->mux_channel[index].mux_hdr_set = true;
+	}
+	tx_properties.prop = tx_ioc_properties;
+	tx_ipv4_property = &tx_properties.prop[0];
+	tx_ipv4_property->ip = IPA_IP_v4;
+	tx_ipv4_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
+	snprintf(tx_ipv4_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
+		 A2_MUX_HDR_NAME_V4_PREF,
+		 rmnet_ipa3_ctx->mux_channel[index].mux_id);
+	tx_ipv6_property = &tx_properties.prop[1];
+	tx_ipv6_property->ip = IPA_IP_v6;
+	tx_ipv6_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
+	/* no need use A2_MUX_HDR_NAME_V6_PREF, same header */
+	snprintf(tx_ipv6_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
+		 A2_MUX_HDR_NAME_V4_PREF,
+		 rmnet_ipa3_ctx->mux_channel[index].mux_id);
+	tx_properties.num_props = 2;
+
+	rx_properties.prop = rx_ioc_properties;
+	rx_ipv4_property = &rx_properties.prop[0];
+	rx_ipv4_property->ip = IPA_IP_v4;
+	rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
+	rx_ipv4_property->attrib.meta_data =
+		rmnet_ipa3_ctx->mux_channel[index].mux_id << WWAN_METADATA_SHFT;
+	rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
+	rx_ipv4_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
+	rx_ipv6_property = &rx_properties.prop[1];
+	rx_ipv6_property->ip = IPA_IP_v6;
+	rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
+	rx_ipv6_property->attrib.meta_data =
+		rmnet_ipa3_ctx->mux_channel[index].mux_id << WWAN_METADATA_SHFT;
+	rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
+	rx_ipv6_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
+	rx_properties.num_props = 2;
+
+	pyld_sz = rmnet_ipa3_ctx->num_q6_rules *
+	   sizeof(struct ipa_ioc_ext_intf_prop);
+	ext_ioc_properties = kmalloc(pyld_sz, GFP_KERNEL);
+	if (!ext_ioc_properties) {
+		IPAWANERR("Error allocate memory\n");
+		return -ENOMEM;
+	}
+
+	ext_properties.prop = ext_ioc_properties;
+	ext_properties.excp_pipe_valid = true;
+	ext_properties.excp_pipe = IPA_CLIENT_APPS_WAN_CONS;
+	ext_properties.num_props = rmnet_ipa3_ctx->num_q6_rules;
+	for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
+		memcpy(&(ext_properties.prop[i]),
+				 &(ipa3_qmi_ctx->q6_ul_filter_rule[i]),
+				sizeof(struct ipa_ioc_ext_intf_prop));
+	ext_properties.prop[i].mux_id =
+		rmnet_ipa3_ctx->mux_channel[index].mux_id;
+	IPAWANDBG("index %d ip: %d rt-tbl:%d\n", i,
+		ext_properties.prop[i].ip,
+		ext_properties.prop[i].rt_tbl_idx);
+	IPAWANDBG("action: %d mux:%d\n",
+		ext_properties.prop[i].action,
+		ext_properties.prop[i].mux_id);
+	}
+	ret = ipa3_register_intf_ext(rmnet_ipa3_ctx->mux_channel[index].
+		vchannel_name, &tx_properties,
+		&rx_properties, &ext_properties);
+	if (ret) {
+		IPAWANERR("[%s]:ipa3_register_intf failed %d\n",
+			rmnet_ipa3_ctx->mux_channel[index].vchannel_name, ret);
+		goto fail;
+	}
+	rmnet_ipa3_ctx->mux_channel[index].ul_flt_reg = true;
+fail:
+	kfree(ext_ioc_properties);
+	return ret;
+}
+
+static void ipa3_cleanup_deregister_intf(void)
+{
+	int i;
+	int ret;
+
+	for (i = 0; i < rmnet_ipa3_ctx->rmnet_index; i++) {
+		if (rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg) {
+			ret = ipa3_deregister_intf(
+				rmnet_ipa3_ctx->mux_channel[i].vchannel_name);
+			if (ret < 0) {
+				IPAWANERR("de-register device %s(%d) failed\n",
+					rmnet_ipa3_ctx->mux_channel[i].
+					vchannel_name,
+					i);
+				return;
+			}
+			IPAWANDBG("de-register device %s(%d) success\n",
+				rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
+				i);
+		}
+		rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg = false;
+	}
+}
+
+int ipa3_wwan_update_mux_channel_prop(void)
+{
+	int ret = 0, i;
+	/* install UL filter rules */
+	if (rmnet_ipa3_ctx->egress_set) {
+		if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false) {
+			IPAWANDBG("setup UL filter rules\n");
+			if (rmnet_ipa3_ctx->a7_ul_flt_set) {
+				IPAWANDBG("del previous UL filter rules\n");
+				/* delete rule hdlers */
+				ret = ipa3_wwan_del_ul_flt_rule_to_ipa();
+				if (ret) {
+					IPAWANERR("failed to del old rules\n");
+					return -EINVAL;
+				}
+				IPAWANDBG("deleted old UL rules\n");
+			}
+			ret = ipa3_wwan_add_ul_flt_rule_to_ipa();
+		}
+		if (ret)
+			IPAWANERR("failed to install UL rules\n");
+		else
+			rmnet_ipa3_ctx->a7_ul_flt_set = true;
+	}
+	/* update Tx/Rx/Ext property */
+	IPAWANDBG("update Tx/Rx/Ext property in IPA\n");
+	if (rmnet_ipa3_ctx->rmnet_index == 0) {
+		IPAWANDBG("no Tx/Rx/Ext property registered in IPA\n");
+		return ret;
+	}
+
+	ipa3_cleanup_deregister_intf();
+
+	for (i = 0; i < rmnet_ipa3_ctx->rmnet_index; i++) {
+		ret = ipa3_wwan_register_to_ipa(i);
+		if (ret < 0) {
+			IPAWANERR("failed to re-regist %s, mux %d, index %d\n",
+				rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
+				rmnet_ipa3_ctx->mux_channel[i].mux_id,
+				i);
+			return -ENODEV;
+		}
+		IPAWANERR("dev(%s) has registered to IPA\n",
+		rmnet_ipa3_ctx->mux_channel[i].vchannel_name);
+		rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg = true;
+	}
+	return ret;
+}
+
+#ifdef INIT_COMPLETION
+#define reinit_completion(x) INIT_COMPLETION(*(x))
+#endif /* INIT_COMPLETION */
+
+static int __ipa_wwan_open(struct net_device *dev)
+{
+	struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
+
+	IPAWANDBG("[%s] __wwan_open()\n", dev->name);
+	if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE)
+		reinit_completion(&wwan_ptr->resource_granted_completion);
+	wwan_ptr->device_status = WWAN_DEVICE_ACTIVE;
+
+	if (ipa3_rmnet_res.ipa_napi_enable)
+		napi_enable(&(wwan_ptr->napi));
+	return 0;
+}
+
+/**
+ * wwan_open() - Opens the wwan network interface. Opens logical
+ * channel on A2 MUX driver and starts the network stack queue
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * -ENODEV: Error while opening logical channel on A2 MUX driver
+ */
+static int ipa3_wwan_open(struct net_device *dev)
+{
+	int rc = 0;
+
+	IPAWANDBG("[%s] wwan_open()\n", dev->name);
+	rc = __ipa_wwan_open(dev);
+	if (rc == 0)
+		netif_start_queue(dev);
+	return rc;
+}
+
+static int __ipa_wwan_close(struct net_device *dev)
+{
+	struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
+	int rc = 0;
+
+	if (wwan_ptr->device_status == WWAN_DEVICE_ACTIVE) {
+		wwan_ptr->device_status = WWAN_DEVICE_INACTIVE;
+		/* do not close wwan port once up,  this causes
+		 * remote side to hang if tried to open again
+		 */
+		reinit_completion(&wwan_ptr->resource_granted_completion);
+		rc = ipa3_deregister_intf(dev->name);
+		if (rc) {
+			IPAWANERR("[%s]: ipa3_deregister_intf failed %d\n",
+			       dev->name, rc);
+			return rc;
+		}
+		return rc;
+	} else {
+		return -EBADF;
+	}
+}
+
+/**
+ * ipa3_wwan_stop() - Stops the wwan network interface. Closes
+ * logical channel on A2 MUX driver and stops the network stack
+ * queue
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * -ENODEV: Error while opening logical channel on A2 MUX driver
+ */
+static int ipa3_wwan_stop(struct net_device *dev)
+{
+	IPAWANDBG("[%s] ipa3_wwan_stop()\n", dev->name);
+	__ipa_wwan_close(dev);
+	netif_stop_queue(dev);
+	return 0;
+}
+
+static int ipa3_wwan_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if (0 > new_mtu || WWAN_DATA_LEN < new_mtu)
+		return -EINVAL;
+	IPAWANDBG("[%s] MTU change: old=%d new=%d\n",
+		dev->name, dev->mtu, new_mtu);
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+/**
+ * ipa3_wwan_xmit() - Transmits an skb.
+ *
+ * @skb: skb to be transmitted
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
+ * later
+ * -EFAULT: Error while transmitting the skb
+ */
+static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	int ret = 0;
+	bool qmap_check;
+	struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
+	struct ipa_tx_meta meta;
+
+	if (skb->protocol != htons(ETH_P_MAP)) {
+		IPAWANDBG_LOW
+		("SW filtering out none QMAP packet received from %s",
+		current->comm);
+		return NETDEV_TX_OK;
+	}
+
+	qmap_check = RMNET_MAP_GET_CD_BIT(skb);
+	if (netif_queue_stopped(dev)) {
+		if (qmap_check &&
+			atomic_read(&wwan_ptr->outstanding_pkts) <
+					wwan_ptr->outstanding_high_ctl) {
+			pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name);
+			goto send;
+		} else {
+			pr_err("[%s]fatal: ipa_wwan_xmit stopped\n", dev->name);
+			return NETDEV_TX_BUSY;
+		}
+	}
+
+	/* checking High WM hit */
+	if (atomic_read(&wwan_ptr->outstanding_pkts) >=
+					wwan_ptr->outstanding_high) {
+		if (!qmap_check) {
+			IPAWANDBG_LOW("pending(%d)/(%d)- stop(%d)\n",
+				atomic_read(&wwan_ptr->outstanding_pkts),
+				wwan_ptr->outstanding_high,
+				netif_queue_stopped(dev));
+			IPAWANDBG_LOW("qmap_chk(%d)\n", qmap_check);
+			netif_stop_queue(dev);
+			return NETDEV_TX_BUSY;
+		}
+	}
+
+send:
+	/* IPA_RM checking start */
+	ret = ipa_rm_inactivity_timer_request_resource(
+		IPA_RM_RESOURCE_WWAN_0_PROD);
+	if (ret == -EINPROGRESS) {
+		netif_stop_queue(dev);
+		return NETDEV_TX_BUSY;
+	}
+	if (ret) {
+		pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
+		       dev->name, ret);
+		return -EFAULT;
+	}
+	/* IPA_RM checking end */
+
+	if (RMNET_MAP_GET_CD_BIT(skb)) {
+		memset(&meta, 0, sizeof(meta));
+		meta.pkt_init_dst_ep_valid = true;
+		meta.pkt_init_dst_ep_remote = true;
+		ret = ipa3_tx_dp(IPA_CLIENT_Q6_LAN_CONS, skb, &meta);
+	} else {
+		ret = ipa3_tx_dp(IPA_CLIENT_APPS_LAN_WAN_PROD, skb, NULL);
+	}
+
+	if (ret) {
+		ret = NETDEV_TX_BUSY;
+		dev->stats.tx_dropped++;
+		goto out;
+	}
+
+	atomic_inc(&wwan_ptr->outstanding_pkts);
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += skb->len;
+	ret = NETDEV_TX_OK;
+out:
+	ipa_rm_inactivity_timer_release_resource(
+		IPA_RM_RESOURCE_WWAN_0_PROD);
+	return ret;
+}
+
+static void ipa3_wwan_tx_timeout(struct net_device *dev)
+{
+	IPAWANERR("[%s] ipa3_wwan_tx_timeout(), data stall in UL\n", dev->name);
+}
+
+/**
+ * apps_ipa_tx_complete_notify() - Rx notify
+ *
+ * @priv: driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * Check that the packet is the one we sent and release it
+ * This function will be called in defered context in IPA wq.
+ */
+static void apps_ipa_tx_complete_notify(void *priv,
+		enum ipa_dp_evt_type evt,
+		unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+	struct net_device *dev = (struct net_device *)priv;
+	struct ipa3_wwan_private *wwan_ptr;
+
+	if (dev != IPA_NETDEV()) {
+		IPAWANDBG("Received pre-SSR packet completion\n");
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	if (evt != IPA_WRITE_DONE) {
+		IPAWANERR("unsupported evt on Tx callback, Drop the packet\n");
+		dev_kfree_skb_any(skb);
+		dev->stats.tx_dropped++;
+		return;
+	}
+
+	wwan_ptr = netdev_priv(dev);
+	atomic_dec(&wwan_ptr->outstanding_pkts);
+	__netif_tx_lock_bh(netdev_get_tx_queue(dev, 0));
+	if (!atomic_read(&rmnet_ipa3_ctx->is_ssr) &&
+		netif_queue_stopped(wwan_ptr->net) &&
+		atomic_read(&wwan_ptr->outstanding_pkts) <
+					(wwan_ptr->outstanding_low)) {
+		IPAWANDBG_LOW("Outstanding low (%d) - waking up queue\n",
+				wwan_ptr->outstanding_low);
+		netif_wake_queue(wwan_ptr->net);
+	}
+	__netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0));
+	dev_kfree_skb_any(skb);
+	ipa_rm_inactivity_timer_release_resource(
+		IPA_RM_RESOURCE_WWAN_0_PROD);
+}
+
+/**
+ * apps_ipa_packet_receive_notify() - Rx notify
+ *
+ * @priv: driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * IPA will pass a packet to the Linux network stack with skb->data
+ */
+static void apps_ipa_packet_receive_notify(void *priv,
+		enum ipa_dp_evt_type evt,
+		unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)priv;
+
+	if (evt == IPA_RECEIVE) {
+		struct sk_buff *skb = (struct sk_buff *)data;
+		int result;
+		unsigned int packet_len = skb->len;
+
+		IPAWANDBG_LOW("Rx packet was received");
+		skb->dev = IPA_NETDEV();
+		skb->protocol = htons(ETH_P_MAP);
+
+		if (ipa3_rmnet_res.ipa_napi_enable) {
+			trace_rmnet_ipa_netif_rcv_skb3(dev->stats.rx_packets);
+			result = netif_receive_skb(skb);
+		} else {
+			if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH
+					== 0) {
+				trace_rmnet_ipa_netifni3(dev->stats.rx_packets);
+				result = netif_rx_ni(skb);
+			} else {
+				trace_rmnet_ipa_netifrx3(dev->stats.rx_packets);
+				result = netif_rx(skb);
+			}
+		}
+
+		if (result)	{
+			pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n",
+							   __func__, __LINE__);
+			dev->stats.rx_dropped++;
+		}
+		dev->stats.rx_packets++;
+		dev->stats.rx_bytes += packet_len;
+	} else if (evt == IPA_CLIENT_START_POLL)
+		ipa3_rmnet_rx_cb(priv);
+	else if (evt == IPA_CLIENT_COMP_NAPI) {
+		if (ipa3_rmnet_res.ipa_napi_enable)
+			napi_complete(&(rmnet_ipa3_ctx->wwan_priv->napi));
+	} else
+		IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt);
+}
+
+/**
+ * ipa3_wwan_ioctl() - I/O control for wwan network driver.
+ *
+ * @dev: network device
+ * @ifr: ignored
+ * @cmd: cmd to be excecuded. can be one of the following:
+ * IPA_WWAN_IOCTL_OPEN - Open the network interface
+ * IPA_WWAN_IOCTL_CLOSE - Close the network interface
+ *
+ * Return codes:
+ * 0: success
+ * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
+ * later
+ * -EFAULT: Error while transmitting the skb
+ */
+static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	int rc = 0;
+	int mru = 1000, epid = 1, mux_index, len;
+	struct ipa_msg_meta msg_meta;
+	struct ipa_wan_msg *wan_msg = NULL;
+	struct rmnet_ioctl_extended_s extend_ioctl_data;
+	struct rmnet_ioctl_data_s ioctl_data;
+	struct ipa3_rmnet_mux_val *mux_channel;
+	int rmnet_index;
+
+	IPAWANDBG("rmnet_ipa got ioctl number 0x%08x", cmd);
+	switch (cmd) {
+	/*  Set Ethernet protocol  */
+	case RMNET_IOCTL_SET_LLP_ETHERNET:
+		break;
+	/*  Set RAWIP protocol  */
+	case RMNET_IOCTL_SET_LLP_IP:
+		break;
+	/*  Get link protocol  */
+	case RMNET_IOCTL_GET_LLP:
+		ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
+		if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+			sizeof(struct rmnet_ioctl_data_s)))
+			rc = -EFAULT;
+		break;
+	/*  Set QoS header enabled  */
+	case RMNET_IOCTL_SET_QOS_ENABLE:
+		return -EINVAL;
+	/*  Set QoS header disabled  */
+	case RMNET_IOCTL_SET_QOS_DISABLE:
+		break;
+	/*  Get QoS header state  */
+	case RMNET_IOCTL_GET_QOS:
+		ioctl_data.u.operation_mode = RMNET_MODE_NONE;
+		if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+			sizeof(struct rmnet_ioctl_data_s)))
+			rc = -EFAULT;
+		break;
+	/*  Get operation mode */
+	case RMNET_IOCTL_GET_OPMODE:
+		ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
+		if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+			sizeof(struct rmnet_ioctl_data_s)))
+			rc = -EFAULT;
+		break;
+	/*  Open transport port  */
+	case RMNET_IOCTL_OPEN:
+		break;
+	/*  Close transport port  */
+	case RMNET_IOCTL_CLOSE:
+		break;
+	/*  Flow enable  */
+	case RMNET_IOCTL_FLOW_ENABLE:
+		IPAWANDBG("Received flow enable\n");
+		if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
+			sizeof(struct rmnet_ioctl_data_s))) {
+			rc = -EFAULT;
+			break;
+		}
+		ipa3_flow_control(IPA_CLIENT_USB_PROD, true,
+			ioctl_data.u.tcm_handle);
+		break;
+	/*  Flow disable  */
+	case RMNET_IOCTL_FLOW_DISABLE:
+		IPAWANDBG("Received flow disable\n");
+		if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
+			sizeof(struct rmnet_ioctl_data_s))) {
+			rc = -EFAULT;
+			break;
+		}
+		ipa3_flow_control(IPA_CLIENT_USB_PROD, false,
+			ioctl_data.u.tcm_handle);
+		break;
+	/*  Set flow handle  */
+	case RMNET_IOCTL_FLOW_SET_HNDL:
+		break;
+
+	/*  Extended IOCTLs  */
+	case RMNET_IOCTL_EXTENDED:
+		IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n");
+		if (copy_from_user(&extend_ioctl_data,
+			(u8 *)ifr->ifr_ifru.ifru_data,
+			sizeof(struct rmnet_ioctl_extended_s))) {
+			IPAWANERR("failed to copy extended ioctl data\n");
+			rc = -EFAULT;
+			break;
+		}
+		switch (extend_ioctl_data.extended_ioctl) {
+		/*  Get features  */
+		case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
+			IPAWANDBG("get RMNET_IOCTL_GET_SUPPORTED_FEATURES\n");
+			extend_ioctl_data.u.data =
+				(RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL |
+				RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT |
+				RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT);
+			if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+				&extend_ioctl_data,
+				sizeof(struct rmnet_ioctl_extended_s)))
+				rc = -EFAULT;
+			break;
+		/*  Set MRU  */
+		case RMNET_IOCTL_SET_MRU:
+			mru = extend_ioctl_data.u.data;
+			IPAWANDBG("get MRU size %d\n",
+				extend_ioctl_data.u.data);
+			break;
+		/*  Get MRU  */
+		case RMNET_IOCTL_GET_MRU:
+			extend_ioctl_data.u.data = mru;
+			if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+				&extend_ioctl_data,
+				sizeof(struct rmnet_ioctl_extended_s)))
+				rc = -EFAULT;
+			break;
+		/* GET SG support */
+		case RMNET_IOCTL_GET_SG_SUPPORT:
+			extend_ioctl_data.u.data =
+				ipa3_rmnet_res.ipa_advertise_sg_support;
+			if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+				&extend_ioctl_data,
+				sizeof(struct rmnet_ioctl_extended_s)))
+				rc = -EFAULT;
+			break;
+		/*  Get endpoint ID  */
+		case RMNET_IOCTL_GET_EPID:
+			IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EPID\n");
+			extend_ioctl_data.u.data = epid;
+			if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+				&extend_ioctl_data,
+				sizeof(struct rmnet_ioctl_extended_s)))
+				rc = -EFAULT;
+			if (copy_from_user(&extend_ioctl_data,
+				(u8 *)ifr->ifr_ifru.ifru_data,
+				sizeof(struct rmnet_ioctl_extended_s))) {
+				IPAWANERR("copy extended ioctl data failed\n");
+				rc = -EFAULT;
+			break;
+			}
+			IPAWANDBG("RMNET_IOCTL_GET_EPID return %d\n",
+					extend_ioctl_data.u.data);
+			break;
+		/*  Endpoint pair  */
+		case RMNET_IOCTL_GET_EP_PAIR:
+			IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EP_PAIR\n");
+			extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num =
+			ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
+			extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num =
+			ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+			if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+				&extend_ioctl_data,
+				sizeof(struct rmnet_ioctl_extended_s)))
+				rc = -EFAULT;
+			if (copy_from_user(&extend_ioctl_data,
+				(u8 *)ifr->ifr_ifru.ifru_data,
+				sizeof(struct rmnet_ioctl_extended_s))) {
+				IPAWANERR("copy extended ioctl data failed\n");
+				rc = -EFAULT;
+			break;
+		}
+			IPAWANDBG("RMNET_IOCTL_GET_EP_PAIR c: %d p: %d\n",
+			extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num,
+			extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num);
+			break;
+		/*  Get driver name  */
+		case RMNET_IOCTL_GET_DRIVER_NAME:
+			memcpy(&extend_ioctl_data.u.if_name,
+				IPA_NETDEV()->name,
+							sizeof(IFNAMSIZ));
+			if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+					&extend_ioctl_data,
+					sizeof(struct rmnet_ioctl_extended_s)))
+				rc = -EFAULT;
+			break;
+		/*  Add MUX ID  */
+		case RMNET_IOCTL_ADD_MUX_CHANNEL:
+			mux_index = ipa3_find_mux_channel_index(
+				extend_ioctl_data.u.rmnet_mux_val.mux_id);
+			if (mux_index < MAX_NUM_OF_MUX_CHANNEL) {
+				IPAWANDBG("already setup mux(%d)\n",
+					extend_ioctl_data.u.
+					rmnet_mux_val.mux_id);
+				return rc;
+			}
+			if (rmnet_ipa3_ctx->rmnet_index
+				>= MAX_NUM_OF_MUX_CHANNEL) {
+				IPAWANERR("Exceed mux_channel limit(%d)\n",
+				rmnet_ipa3_ctx->rmnet_index);
+				return -EFAULT;
+			}
+			IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
+			extend_ioctl_data.u.rmnet_mux_val.mux_id,
+			extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
+			/* cache the mux name and id */
+			mux_channel = rmnet_ipa3_ctx->mux_channel;
+			rmnet_index = rmnet_ipa3_ctx->rmnet_index;
+
+			mux_channel[rmnet_index].mux_id =
+				extend_ioctl_data.u.rmnet_mux_val.mux_id;
+			memcpy(mux_channel[rmnet_index].vchannel_name,
+				extend_ioctl_data.u.rmnet_mux_val.vchannel_name,
+				sizeof(mux_channel[rmnet_index]
+					.vchannel_name));
+			IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n",
+				mux_channel[rmnet_index].vchannel_name,
+				mux_channel[rmnet_index].mux_id,
+				rmnet_index);
+			/* check if UL filter rules coming*/
+			if (rmnet_ipa3_ctx->num_q6_rules != 0) {
+				IPAWANERR("dev(%s) register to IPA\n",
+					extend_ioctl_data.u.rmnet_mux_val.
+					vchannel_name);
+				rc = ipa3_wwan_register_to_ipa(
+						rmnet_ipa3_ctx->rmnet_index);
+				if (rc < 0) {
+					IPAWANERR("device %s reg IPA failed\n",
+						extend_ioctl_data.u.
+						rmnet_mux_val.vchannel_name);
+					return -ENODEV;
+				}
+				mux_channel[rmnet_index].mux_channel_set = true;
+				mux_channel[rmnet_index].ul_flt_reg = true;
+			} else {
+				IPAWANDBG("dev(%s) haven't registered to IPA\n",
+					extend_ioctl_data.u.
+					rmnet_mux_val.vchannel_name);
+				mux_channel[rmnet_index].mux_channel_set = true;
+				mux_channel[rmnet_index].ul_flt_reg = false;
+			}
+			rmnet_ipa3_ctx->rmnet_index++;
+			break;
+		case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
+			IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n");
+			if ((extend_ioctl_data.u.data) &
+					RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM) {
+				rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+					ipa_ep_cfg.hdr.hdr_len = 8;
+				rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+					ipa_ep_cfg.cfg.cs_offload_en =
+					IPA_ENABLE_CS_OFFLOAD_UL;
+				rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+					ipa_ep_cfg.cfg.cs_metadata_hdr_offset
+						= 1;
+			} else {
+				rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+					ipa_ep_cfg.hdr.hdr_len = 4;
+			}
+			if ((extend_ioctl_data.u.data) &
+					RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION)
+				rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+					ipa_ep_cfg.aggr.aggr_en =
+						IPA_ENABLE_AGGR;
+			else
+				rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+					ipa_ep_cfg.aggr.aggr_en =
+						IPA_BYPASS_AGGR;
+			rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.
+				hdr_ofst_metadata_valid = 1;
+			/* modem want offset at 0! */
+			rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.
+				hdr_ofst_metadata = 0;
+			rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.
+				dst = IPA_CLIENT_APPS_LAN_WAN_PROD;
+			rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.
+				mode = IPA_BASIC;
+
+			rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.client =
+				IPA_CLIENT_APPS_LAN_WAN_PROD;
+			rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.notify =
+				apps_ipa_tx_complete_notify;
+			rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.desc_fifo_sz =
+			IPA_SYS_TX_DATA_DESC_FIFO_SZ;
+			rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.priv = dev;
+
+			rc = ipa3_setup_sys_pipe(
+				&rmnet_ipa3_ctx->apps_to_ipa_ep_cfg,
+				&rmnet_ipa3_ctx->apps_to_ipa3_hdl);
+			if (rc)
+				IPAWANERR("failed to config egress endpoint\n");
+
+			if (rmnet_ipa3_ctx->num_q6_rules != 0) {
+				/* already got Q6 UL filter rules*/
+				if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt
+					== false)
+					rc = ipa3_wwan_add_ul_flt_rule_to_ipa();
+				else
+					rc = 0;
+				rmnet_ipa3_ctx->egress_set = true;
+				if (rc)
+					IPAWANERR("install UL rules failed\n");
+				else
+					rmnet_ipa3_ctx->a7_ul_flt_set = true;
+			} else {
+				/* wait Q6 UL filter rules*/
+				rmnet_ipa3_ctx->egress_set = true;
+				IPAWANDBG("no UL-rules, egress_set(%d)\n",
+					rmnet_ipa3_ctx->egress_set);
+			}
+			break;
+		case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/*  Set IDF  */
+			IPAWANDBG("get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
+			if ((extend_ioctl_data.u.data) &
+					RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
+				rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
+					ipa_ep_cfg.cfg.cs_offload_en =
+					IPA_ENABLE_CS_OFFLOAD_DL;
+
+			if ((extend_ioctl_data.u.data) &
+					RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
+				IPAWANERR("get AGG size %d count %d\n",
+					extend_ioctl_data.u.
+					ingress_format.agg_size,
+					extend_ioctl_data.u.
+					ingress_format.agg_count);
+				if (!ipa_disable_apps_wan_cons_deaggr(
+					extend_ioctl_data.u.
+					ingress_format.agg_size,
+					extend_ioctl_data.
+					u.ingress_format.agg_count)) {
+					rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
+					ipa_ep_cfg.aggr.aggr_byte_limit =
+					extend_ioctl_data.u.ingress_format.
+					agg_size;
+					rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
+					ipa_ep_cfg.aggr.aggr_pkt_limit =
+					extend_ioctl_data.u.ingress_format.
+					agg_count;
+				}
+			}
+
+			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
+				hdr_len = 4;
+			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
+				hdr_ofst_metadata_valid = 1;
+			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.
+				hdr.hdr_ofst_metadata = 1;
+			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
+				hdr_ofst_pkt_size_valid = 1;
+			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
+				hdr_ofst_pkt_size = 2;
+
+			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
+				hdr_total_len_or_pad_valid = true;
+			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
+				hdr_total_len_or_pad = 0;
+			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
+				hdr_payload_len_inc_padding = true;
+			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
+				hdr_total_len_or_pad_offset = 0;
+			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
+				hdr_little_endian = 0;
+			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.
+				metadata_mask.metadata_mask = 0xFF000000;
+
+			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.client =
+				IPA_CLIENT_APPS_WAN_CONS;
+			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.notify =
+				apps_ipa_packet_receive_notify;
+			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.priv = dev;
+
+			rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.napi_enabled =
+				ipa3_rmnet_res.ipa_napi_enable;
+			if (rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.napi_enabled)
+				rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
+				desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
+			else
+				rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
+				desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+
+			mutex_lock(
+				&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+			if (atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
+				IPAWANDBG("In SSR sequence/recovery\n");
+				mutex_unlock(&rmnet_ipa3_ctx->
+					ipa_to_apps_pipe_handle_guard);
+				rc = -EFAULT;
+				break;
+			}
+			rc = ipa3_setup_sys_pipe(
+				&rmnet_ipa3_ctx->ipa_to_apps_ep_cfg,
+				&rmnet_ipa3_ctx->ipa3_to_apps_hdl);
+			mutex_unlock(&rmnet_ipa3_ctx->
+				ipa_to_apps_pipe_handle_guard);
+			if (rc)
+				IPAWANERR("failed to configure ingress\n");
+			break;
+		case RMNET_IOCTL_SET_XLAT_DEV_INFO:
+			wan_msg = kzalloc(sizeof(struct ipa_wan_msg),
+						GFP_KERNEL);
+			if (!wan_msg) {
+				IPAWANERR("Failed to allocate memory.\n");
+				return -ENOMEM;
+			}
+			len = sizeof(wan_msg->upstream_ifname) >
+			sizeof(extend_ioctl_data.u.if_name) ?
+				sizeof(extend_ioctl_data.u.if_name) :
+				sizeof(wan_msg->upstream_ifname);
+			strlcpy(wan_msg->upstream_ifname,
+				extend_ioctl_data.u.if_name, len);
+			memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+			msg_meta.msg_type = WAN_XLAT_CONNECT;
+			msg_meta.msg_len = sizeof(struct ipa_wan_msg);
+			rc = ipa3_send_msg(&msg_meta, wan_msg,
+						ipa3_wwan_msg_free_cb);
+			if (rc) {
+				IPAWANERR("Failed to send XLAT_CONNECT msg\n");
+				kfree(wan_msg);
+			}
+			break;
+		/*  Get agg count  */
+		case RMNET_IOCTL_GET_AGGREGATION_COUNT:
+			break;
+		/*  Set agg count  */
+		case RMNET_IOCTL_SET_AGGREGATION_COUNT:
+			break;
+		/*  Get agg size  */
+		case RMNET_IOCTL_GET_AGGREGATION_SIZE:
+			break;
+		/*  Set agg size  */
+		case RMNET_IOCTL_SET_AGGREGATION_SIZE:
+			break;
+		/*  Do flow control  */
+		case RMNET_IOCTL_FLOW_CONTROL:
+			break;
+		/*  For legacy use  */
+		case RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL:
+			break;
+		/*  Get HW/SW map  */
+		case RMNET_IOCTL_GET_HWSW_MAP:
+			break;
+		/*  Set RX Headroom  */
+		case RMNET_IOCTL_SET_RX_HEADROOM:
+			break;
+		default:
+			IPAWANERR("[%s] unsupported extended cmd[%d]",
+				dev->name,
+				extend_ioctl_data.extended_ioctl);
+			rc = -EINVAL;
+		}
+		break;
+	default:
+			IPAWANERR("[%s] unsupported cmd[%d]",
+				dev->name, cmd);
+			rc = -EINVAL;
+	}
+	return rc;
+}
+
+static const struct net_device_ops ipa3_wwan_ops_ip = {
+	.ndo_open = ipa3_wwan_open,
+	.ndo_stop = ipa3_wwan_stop,
+	.ndo_start_xmit = ipa3_wwan_xmit,
+	.ndo_tx_timeout = ipa3_wwan_tx_timeout,
+	.ndo_do_ioctl = ipa3_wwan_ioctl,
+	.ndo_change_mtu = ipa3_wwan_change_mtu,
+	.ndo_set_mac_address = 0,
+	.ndo_validate_addr = 0,
+};
+
+/**
+ * wwan_setup() - Setups the wwan network driver.
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * None
+ */
+
+static void ipa3_wwan_setup(struct net_device *dev)
+{
+	dev->netdev_ops = &ipa3_wwan_ops_ip;
+	ether_setup(dev);
+	/* set this after calling ether_setup */
+	dev->header_ops = 0;  /* No header */
+	dev->type = ARPHRD_RAWIP;
+	dev->hard_header_len = 0;
+	dev->mtu = WWAN_DATA_LEN;
+	dev->addr_len = 0;
+	dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+	dev->needed_headroom = HEADROOM_FOR_QMAP;
+	dev->needed_tailroom = TAILROOM;
+	dev->watchdog_timeo = 1000;
+}
+
+/* IPA_RM related functions start*/
+static void ipa3_q6_prod_rm_request_resource(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_q6_con_rm_request,
+		ipa3_q6_prod_rm_request_resource);
+static void ipa3_q6_prod_rm_release_resource(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_q6_con_rm_release,
+		ipa3_q6_prod_rm_release_resource);
+
+static void ipa3_q6_prod_rm_request_resource(struct work_struct *work)
+{
+	int ret = 0;
+
+	ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD);
+	if (ret < 0 && ret != -EINPROGRESS) {
+		IPAWANERR("%s: ipa_rm_request_resource failed %d\n", __func__,
+		       ret);
+		return;
+	}
+}
+
+static int ipa3_q6_rm_request_resource(void)
+{
+	queue_delayed_work(rmnet_ipa3_ctx->rm_q6_wq,
+	   &ipa3_q6_con_rm_request, 0);
+	return 0;
+}
+
+static void ipa3_q6_prod_rm_release_resource(struct work_struct *work)
+{
+	int ret = 0;
+
+	ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD);
+	if (ret < 0 && ret != -EINPROGRESS) {
+		IPAWANERR("%s: ipa_rm_release_resource failed %d\n", __func__,
+		      ret);
+		return;
+	}
+}
+
+
+static int ipa3_q6_rm_release_resource(void)
+{
+	queue_delayed_work(rmnet_ipa3_ctx->rm_q6_wq,
+	   &ipa3_q6_con_rm_release, 0);
+	return 0;
+}
+
+
+static void ipa3_q6_rm_notify_cb(void *user_data,
+		enum ipa_rm_event event,
+		unsigned long data)
+{
+	switch (event) {
+	case IPA_RM_RESOURCE_GRANTED:
+		IPAWANDBG_LOW("%s: Q6_PROD GRANTED CB\n", __func__);
+		break;
+	case IPA_RM_RESOURCE_RELEASED:
+		IPAWANDBG_LOW("%s: Q6_PROD RELEASED CB\n", __func__);
+		break;
+	default:
+		return;
+	}
+}
+static int ipa3_q6_initialize_rm(void)
+{
+	struct ipa_rm_create_params create_params;
+	struct ipa_rm_perf_profile profile;
+	int result;
+
+	/* Initialize IPA_RM workqueue */
+	rmnet_ipa3_ctx->rm_q6_wq = create_singlethread_workqueue("clnt_req");
+	if (!rmnet_ipa3_ctx->rm_q6_wq)
+		return -ENOMEM;
+
+	memset(&create_params, 0, sizeof(create_params));
+	create_params.name = IPA_RM_RESOURCE_Q6_PROD;
+	create_params.reg_params.notify_cb = &ipa3_q6_rm_notify_cb;
+	result = ipa_rm_create_resource(&create_params);
+	if (result)
+		goto create_rsrc_err1;
+	memset(&create_params, 0, sizeof(create_params));
+	create_params.name = IPA_RM_RESOURCE_Q6_CONS;
+	create_params.release_resource = &ipa3_q6_rm_release_resource;
+	create_params.request_resource = &ipa3_q6_rm_request_resource;
+	result = ipa_rm_create_resource(&create_params);
+	if (result)
+		goto create_rsrc_err2;
+	/* add dependency*/
+	result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+			IPA_RM_RESOURCE_APPS_CONS);
+	if (result)
+		goto add_dpnd_err;
+	/* setup Performance profile */
+	memset(&profile, 0, sizeof(profile));
+	profile.max_supported_bandwidth_mbps = 100;
+	result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
+			&profile);
+	if (result)
+		goto set_perf_err;
+	result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS,
+			&profile);
+	if (result)
+		goto set_perf_err;
+	return result;
+
+set_perf_err:
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+			IPA_RM_RESOURCE_APPS_CONS);
+add_dpnd_err:
+	result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
+	if (result < 0)
+		IPAWANERR("Error deleting resource %d, ret=%d\n",
+			IPA_RM_RESOURCE_Q6_CONS, result);
+create_rsrc_err2:
+	result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
+	if (result < 0)
+		IPAWANERR("Error deleting resource %d, ret=%d\n",
+			IPA_RM_RESOURCE_Q6_PROD, result);
+create_rsrc_err1:
+	destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
+	return result;
+}
+
+void ipa3_q6_deinitialize_rm(void)
+{
+	int ret;
+
+	ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+			IPA_RM_RESOURCE_APPS_CONS);
+	if (ret < 0)
+		IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
+			IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS,
+			ret);
+	ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
+	if (ret < 0)
+		IPAWANERR("Error deleting resource %d, ret=%d\n",
+			IPA_RM_RESOURCE_Q6_CONS, ret);
+	ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
+	if (ret < 0)
+		IPAWANERR("Error deleting resource %d, ret=%d\n",
+			IPA_RM_RESOURCE_Q6_PROD, ret);
+	destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
+}
+
+static void ipa3_wake_tx_queue(struct work_struct *work)
+{
+	if (IPA_NETDEV()) {
+		__netif_tx_lock_bh(netdev_get_tx_queue(IPA_NETDEV(), 0));
+		netif_wake_queue(IPA_NETDEV());
+		__netif_tx_unlock_bh(netdev_get_tx_queue(IPA_NETDEV(), 0));
+	}
+}
+
+/**
+ * ipa3_rm_resource_granted() - Called upon
+ * IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped.
+ *
+ * @work: work object supplied ny workqueue
+ *
+ * Return codes:
+ * None
+ */
+static void ipa3_rm_resource_granted(void *dev)
+{
+	IPAWANDBG_LOW("Resource Granted - starting queue\n");
+	schedule_work(&ipa3_tx_wakequeue_work);
+}
+
+/**
+ * ipa3_rm_notify() - Callback function for RM events. Handles
+ * IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events.
+ * IPA_RM_RESOURCE_GRANTED is handled in the context of shared
+ * workqueue.
+ *
+ * @dev: network device
+ * @event: IPA RM event
+ * @data: Additional data provided by IPA RM
+ *
+ * Return codes:
+ * None
+ */
+static void ipa3_rm_notify(void *dev, enum ipa_rm_event event,
+			  unsigned long data)
+{
+	struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
+
+	pr_debug("%s: event %d\n", __func__, event);
+	switch (event) {
+	case IPA_RM_RESOURCE_GRANTED:
+		if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) {
+			complete_all(&wwan_ptr->resource_granted_completion);
+			break;
+		}
+		ipa3_rm_resource_granted(dev);
+		break;
+	case IPA_RM_RESOURCE_RELEASED:
+		break;
+	default:
+		pr_err("%s: unknown event %d\n", __func__, event);
+		break;
+	}
+}
+
+/* IPA_RM related functions end*/
+
+static int ipa3_ssr_notifier_cb(struct notifier_block *this,
+			   unsigned long code,
+			   void *data);
+
+static struct notifier_block ipa3_ssr_notifier = {
+	.notifier_call = ipa3_ssr_notifier_cb,
+};
+
+static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
+		struct ipa3_rmnet_plat_drv_res *ipa_rmnet_drv_res)
+{
+	ipa_rmnet_drv_res->ipa_rmnet_ssr =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,rmnet-ipa-ssr");
+	pr_info("IPA SSR support = %s\n",
+		ipa_rmnet_drv_res->ipa_rmnet_ssr ? "True" : "False");
+	ipa_rmnet_drv_res->ipa_loaduC =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,ipa-loaduC");
+	pr_info("IPA ipa-loaduC = %s\n",
+		ipa_rmnet_drv_res->ipa_loaduC ? "True" : "False");
+
+	ipa_rmnet_drv_res->ipa_advertise_sg_support =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,ipa-advertise-sg-support");
+	pr_info("IPA SG support = %s\n",
+		ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False");
+	return 0;
+}
+
+struct ipa3_rmnet_context ipa3_rmnet_ctx;
+static int ipa3_wwan_probe(struct platform_device *pdev);
+struct platform_device *m_pdev;
+
+static void ipa3_delayed_probe(struct work_struct *work)
+{
+	(void)ipa3_wwan_probe(m_pdev);
+}
+
+static DECLARE_WORK(ipa3_scheduled_probe, ipa3_delayed_probe);
+
+static void ipa3_ready_cb(void *user_data)
+{
+	struct platform_device *pdev = (struct platform_device *)(user_data);
+
+	m_pdev = pdev;
+
+	IPAWANDBG("IPA ready callback has been triggered!\n");
+
+	schedule_work(&ipa3_scheduled_probe);
+}
+
+/**
+ * ipa3_wwan_probe() - Initialized the module and registers as a
+ * network interface to the network stack
+ *
+ * Note: In case IPA driver hasn't initialized already, the probe function
+ * will return immediately after registering a callback to be invoked when
+ * IPA driver initialization is complete.
+ *
+ * Return codes:
+ * 0: success
+ * -ENOMEM: No memory available
+ * -EFAULT: Internal error
+ */
+static int ipa3_wwan_probe(struct platform_device *pdev)
+{
+	int ret, i;
+	struct net_device *dev;
+	struct ipa_rm_create_params ipa_rm_params;	/* IPA_RM */
+	struct ipa_rm_perf_profile profile;			/* IPA_RM */
+
+	pr_info("rmnet_ipa3 started initialization\n");
+
+	if (!ipa3_is_ready()) {
+		IPAWANDBG("IPA driver not ready, registering callback\n");
+		ret = ipa_register_ipa_ready_cb(ipa3_ready_cb, (void *)pdev);
+
+		/*
+		 * If we received -EEXIST, IPA has initialized. So we need
+		 * to continue the probing process.
+		 */
+		if (ret != -EEXIST) {
+			if (ret)
+				IPAWANERR("IPA CB reg failed - %d\n", ret);
+			return ret;
+		}
+	}
+
+	ret = get_ipa_rmnet_dts_configuration(pdev, &ipa3_rmnet_res);
+	ipa3_rmnet_ctx.ipa_rmnet_ssr = ipa3_rmnet_res.ipa_rmnet_ssr;
+
+	ret = ipa3_init_q6_smem();
+	if (ret) {
+		IPAWANERR("ipa3_init_q6_smem failed!\n");
+		return ret;
+	}
+
+	/* initialize tx/rx endpoint setup */
+	memset(&rmnet_ipa3_ctx->apps_to_ipa_ep_cfg, 0,
+		sizeof(struct ipa_sys_connect_params));
+	memset(&rmnet_ipa3_ctx->ipa_to_apps_ep_cfg, 0,
+		sizeof(struct ipa_sys_connect_params));
+
+	/* initialize ex property setup */
+	rmnet_ipa3_ctx->num_q6_rules = 0;
+	rmnet_ipa3_ctx->old_num_q6_rules = 0;
+	rmnet_ipa3_ctx->rmnet_index = 0;
+	rmnet_ipa3_ctx->egress_set = false;
+	rmnet_ipa3_ctx->a7_ul_flt_set = false;
+	for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++)
+		memset(&rmnet_ipa3_ctx->mux_channel[i], 0,
+				sizeof(struct ipa3_rmnet_mux_val));
+
+	/* start A7 QMI service/client */
+	if (ipa3_rmnet_res.ipa_loaduC)
+		/* Android platform loads uC */
+		ipa3_qmi_service_init(QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01);
+	else
+		/* LE platform not loads uC */
+		ipa3_qmi_service_init(QMI_IPA_PLATFORM_TYPE_LE_V01);
+
+	/* construct default WAN RT tbl for IPACM */
+	ret = ipa3_setup_a7_qmap_hdr();
+	if (ret)
+		goto setup_a7_qmap_hdr_err;
+	ret = ipa3_setup_dflt_wan_rt_tables();
+	if (ret)
+		goto setup_dflt_wan_rt_tables_err;
+
+	if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
+		/* Start transport-driver fd ioctl for ipacm for first init */
+		ret = ipa3_wan_ioctl_init();
+		if (ret)
+			goto wan_ioctl_init_err;
+	} else {
+		/* Enable sending QMI messages after SSR */
+		ipa3_wan_ioctl_enable_qmi_messages();
+	}
+
+	/* initialize wan-driver netdev */
+	dev = alloc_netdev(sizeof(struct ipa3_wwan_private),
+			   IPA_WWAN_DEV_NAME,
+			   NET_NAME_UNKNOWN,
+			   ipa3_wwan_setup);
+	if (!dev) {
+		IPAWANERR("no memory for netdev\n");
+		ret = -ENOMEM;
+		goto alloc_netdev_err;
+	}
+	rmnet_ipa3_ctx->wwan_priv = netdev_priv(dev);
+	memset(rmnet_ipa3_ctx->wwan_priv, 0,
+		sizeof(*(rmnet_ipa3_ctx->wwan_priv)));
+	IPAWANDBG("wwan_ptr (private) = %p", rmnet_ipa3_ctx->wwan_priv);
+	rmnet_ipa3_ctx->wwan_priv->net = dev;
+	rmnet_ipa3_ctx->wwan_priv->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
+	rmnet_ipa3_ctx->wwan_priv->outstanding_low = DEFAULT_OUTSTANDING_LOW;
+	atomic_set(&rmnet_ipa3_ctx->wwan_priv->outstanding_pkts, 0);
+	spin_lock_init(&rmnet_ipa3_ctx->wwan_priv->lock);
+	init_completion(
+		&rmnet_ipa3_ctx->wwan_priv->resource_granted_completion);
+
+	if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
+		/* IPA_RM configuration starts */
+		ret = ipa3_q6_initialize_rm();
+		if (ret) {
+			IPAWANERR("%s: ipa3_q6_initialize_rm failed, ret: %d\n",
+				__func__, ret);
+			goto q6_init_err;
+		}
+	}
+
+	memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params));
+	ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD;
+	ipa_rm_params.reg_params.user_data = dev;
+	ipa_rm_params.reg_params.notify_cb = ipa3_rm_notify;
+	ret = ipa_rm_create_resource(&ipa_rm_params);
+	if (ret) {
+		pr_err("%s: unable to create resourse %d in IPA RM\n",
+		       __func__, IPA_RM_RESOURCE_WWAN_0_PROD);
+		goto create_rsrc_err;
+	}
+	ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD,
+					   IPA_RM_INACTIVITY_TIMER);
+	if (ret) {
+		pr_err("%s: ipa rm timer init failed %d on resourse %d\n",
+		       __func__, ret, IPA_RM_RESOURCE_WWAN_0_PROD);
+		goto timer_init_err;
+	}
+	/* add dependency */
+	ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+			IPA_RM_RESOURCE_Q6_CONS);
+	if (ret)
+		goto add_dpnd_err;
+	/* setup Performance profile */
+	memset(&profile, 0, sizeof(profile));
+	profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
+	ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD,
+			&profile);
+	if (ret)
+		goto set_perf_err;
+	/* IPA_RM configuration ends */
+
+	/* Enable SG support in netdevice. */
+	if (ipa3_rmnet_res.ipa_advertise_sg_support)
+		dev->hw_features |= NETIF_F_SG;
+
+	if (ipa3_rmnet_res.ipa_napi_enable)
+		netif_napi_add(dev, &(rmnet_ipa3_ctx->wwan_priv->napi),
+		       ipa3_rmnet_poll, NAPI_WEIGHT);
+	ret = register_netdev(dev);
+	if (ret) {
+		IPAWANERR("unable to register ipa_netdev %d rc=%d\n",
+			0, ret);
+		goto set_perf_err;
+	}
+
+	IPAWANDBG("IPA-WWAN devices (%s) initialization ok :>>>>\n", dev->name);
+	if (ret) {
+		IPAWANERR("default configuration failed rc=%d\n",
+				ret);
+		goto config_err;
+	}
+	atomic_set(&rmnet_ipa3_ctx->is_initialized, 1);
+	if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
+		/* offline charging mode */
+		ipa3_proxy_clk_unvote();
+	}
+	atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
+
+	pr_info("rmnet_ipa completed initialization\n");
+	return 0;
+config_err:
+	if (ipa3_rmnet_res.ipa_napi_enable)
+		netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi));
+	unregister_netdev(dev);
+set_perf_err:
+	ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+		IPA_RM_RESOURCE_Q6_CONS);
+	if (ret)
+		IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
+			IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
+			ret);
+add_dpnd_err:
+	ret = ipa_rm_inactivity_timer_destroy(
+		IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */
+	if (ret)
+		IPAWANERR("Error ipa_rm_inactivity_timer_destroy %d, ret=%d\n",
+		IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+timer_init_err:
+	ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+	if (ret)
+		IPAWANERR("Error deleting resource %d, ret=%d\n",
+		IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+create_rsrc_err:
+	ipa3_q6_deinitialize_rm();
+q6_init_err:
+	free_netdev(dev);
+	rmnet_ipa3_ctx->wwan_priv = NULL;
+alloc_netdev_err:
+	ipa3_wan_ioctl_deinit();
+wan_ioctl_init_err:
+	ipa3_del_dflt_wan_rt_tables();
+setup_dflt_wan_rt_tables_err:
+	ipa3_del_a7_qmap_hdr();
+setup_a7_qmap_hdr_err:
+	ipa3_qmi_service_exit();
+	atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
+	return ret;
+}
+
+static int ipa3_wwan_remove(struct platform_device *pdev)
+{
+	int ret;
+
+	pr_info("rmnet_ipa started deinitialization\n");
+	mutex_lock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+	ret = ipa3_teardown_sys_pipe(rmnet_ipa3_ctx->ipa3_to_apps_hdl);
+	if (ret < 0)
+		IPAWANERR("Failed to teardown IPA->APPS pipe\n");
+	else
+		rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
+	if (ipa3_rmnet_res.ipa_napi_enable)
+		netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi));
+	mutex_unlock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+	unregister_netdev(IPA_NETDEV());
+	ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+		IPA_RM_RESOURCE_Q6_CONS);
+	if (ret < 0)
+		IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
+			IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
+			ret);
+	ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD);
+	if (ret < 0)
+		IPAWANERR(
+		"Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n",
+		IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+	ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+	if (ret < 0)
+		IPAWANERR("Error deleting resource %d, ret=%d\n",
+		IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+	cancel_work_sync(&ipa3_tx_wakequeue_work);
+	cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
+	if (IPA_NETDEV())
+		free_netdev(IPA_NETDEV());
+	rmnet_ipa3_ctx->wwan_priv = NULL;
+	/* No need to remove wwan_ioctl during SSR */
+	if (!atomic_read(&rmnet_ipa3_ctx->is_ssr))
+		ipa3_wan_ioctl_deinit();
+	ipa3_del_dflt_wan_rt_tables();
+	ipa3_del_a7_qmap_hdr();
+	ipa3_del_mux_qmap_hdrs();
+	if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false)
+		ipa3_wwan_del_ul_flt_rule_to_ipa();
+	ipa3_cleanup_deregister_intf();
+	atomic_set(&rmnet_ipa3_ctx->is_initialized, 0);
+	pr_info("rmnet_ipa completed deinitialization\n");
+	return 0;
+}
+
+/**
+* rmnet_ipa_ap_suspend() - suspend callback for runtime_pm
+* @dev: pointer to device
+*
+* This callback will be invoked by the runtime_pm framework when an AP suspend
+* operation is invoked, usually by pressing a suspend button.
+*
+* Returns -EAGAIN to runtime_pm framework in case there are pending packets
+* in the Tx queue. This will postpone the suspend operation until all the
+* pending packets will be transmitted.
+*
+* In case there are no packets to send, releases the WWAN0_PROD entity.
+* As an outcome, the number of IPA active clients should be decremented
+* until IPA clocks can be gated.
+*/
+static int rmnet_ipa_ap_suspend(struct device *dev)
+{
+	struct net_device *netdev = IPA_NETDEV();
+	struct ipa3_wwan_private *wwan_ptr;
+
+	IPAWANDBG_LOW("Enter...\n");
+	if (netdev == NULL) {
+		IPAWANERR("netdev is NULL.\n");
+		return 0;
+	}
+
+	wwan_ptr = netdev_priv(netdev);
+	if (wwan_ptr == NULL) {
+		IPAWANERR("wwan_ptr is NULL.\n");
+		return 0;
+	}
+
+	/* Do not allow A7 to suspend in case there are oustanding packets */
+	if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) {
+		IPAWANDBG("Outstanding packets, postponing AP suspend.\n");
+		return -EAGAIN;
+	}
+
+	/* Make sure that there is no Tx operation ongoing */
+	netif_tx_lock_bh(netdev);
+	ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+	netif_tx_unlock_bh(netdev);
+	IPAWANDBG_LOW("Exit\n");
+
+	return 0;
+}
+
+/**
+* rmnet_ipa_ap_resume() - resume callback for runtime_pm
+* @dev: pointer to device
+*
+* This callback will be invoked by the runtime_pm framework when an AP resume
+* operation is invoked.
+*
+* Enables the network interface queue and returns success to the
+* runtime_pm framework.
+*/
+static int rmnet_ipa_ap_resume(struct device *dev)
+{
+	struct net_device *netdev = IPA_NETDEV();
+
+	IPAWANDBG_LOW("Enter...\n");
+	if (netdev)
+		netif_wake_queue(netdev);
+	IPAWANDBG_LOW("Exit\n");
+
+	return 0;
+}
+
+static void ipa_stop_polling_stats(void)
+{
+	cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
+	ipa3_rmnet_ctx.polling_interval = 0;
+}
+
+static const struct of_device_id rmnet_ipa_dt_match[] = {
+	{.compatible = "qcom,rmnet-ipa3"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, rmnet_ipa_dt_match);
+
+static const struct dev_pm_ops rmnet_ipa_pm_ops = {
+	.suspend_noirq = rmnet_ipa_ap_suspend,
+	.resume_noirq = rmnet_ipa_ap_resume,
+};
+
+static struct platform_driver rmnet_ipa_driver = {
+	.driver = {
+		.name = "rmnet_ipa3",
+		.owner = THIS_MODULE,
+		.pm = &rmnet_ipa_pm_ops,
+		.of_match_table = rmnet_ipa_dt_match,
+	},
+	.probe = ipa3_wwan_probe,
+	.remove = ipa3_wwan_remove,
+};
+
+static int ipa3_ssr_notifier_cb(struct notifier_block *this,
+			   unsigned long code,
+			   void *data)
+{
+	if (!ipa3_rmnet_ctx.ipa_rmnet_ssr)
+		return NOTIFY_DONE;
+
+	switch (code) {
+	case SUBSYS_BEFORE_SHUTDOWN:
+		IPAWANINFO("IPA received MPSS BEFORE_SHUTDOWN\n");
+		atomic_set(&rmnet_ipa3_ctx->is_ssr, 1);
+		ipa3_q6_pre_shutdown_cleanup();
+		if (IPA_NETDEV())
+			netif_stop_queue(IPA_NETDEV());
+		ipa3_qmi_stop_workqueues();
+		ipa3_wan_ioctl_stop_qmi_messages();
+		ipa_stop_polling_stats();
+		if (atomic_read(&rmnet_ipa3_ctx->is_initialized))
+			platform_driver_unregister(&rmnet_ipa_driver);
+		IPAWANINFO("IPA BEFORE_SHUTDOWN handling is complete\n");
+		break;
+	case SUBSYS_AFTER_SHUTDOWN:
+		IPAWANINFO("IPA Received MPSS AFTER_SHUTDOWN\n");
+		if (atomic_read(&rmnet_ipa3_ctx->is_ssr))
+			ipa3_q6_post_shutdown_cleanup();
+		IPAWANINFO("IPA AFTER_SHUTDOWN handling is complete\n");
+		break;
+	case SUBSYS_BEFORE_POWERUP:
+		IPAWANINFO("IPA received MPSS BEFORE_POWERUP\n");
+		if (atomic_read(&rmnet_ipa3_ctx->is_ssr))
+			/* clean up cached QMI msg/handlers */
+			ipa3_qmi_service_exit();
+		/*hold a proxy vote for the modem*/
+		ipa3_proxy_clk_vote();
+		IPAWANINFO("IPA BEFORE_POWERUP handling is complete\n");
+		break;
+	case SUBSYS_AFTER_POWERUP:
+		IPAWANINFO("%s:%d IPA received MPSS AFTER_POWERUP\n",
+			__func__, __LINE__);
+		if (!atomic_read(&rmnet_ipa3_ctx->is_initialized) &&
+		       atomic_read(&rmnet_ipa3_ctx->is_ssr))
+			platform_driver_register(&rmnet_ipa_driver);
+
+		IPAWANINFO("IPA AFTER_POWERUP handling is complete\n");
+		break;
+	default:
+		IPAWANDBG("Unsupported subsys notification, IPA received: %lu",
+			code);
+		break;
+	}
+
+	IPAWANDBG_LOW("Exit\n");
+	return NOTIFY_DONE;
+}
+
+/**
+ * rmnet_ipa_free_msg() - Free the msg sent to user space via ipa_send_msg
+ * @buff: pointer to buffer containing the message
+ * @len: message len
+ * @type: message type
+ *
+ * This function is invoked when ipa_send_msg is complete (Provided as a
+ * free function pointer along with the message).
+ */
+static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type)
+{
+	if (!buff) {
+		IPAWANERR("Null buffer\n");
+		return;
+	}
+
+	if (type != IPA_TETHERING_STATS_UPDATE_STATS &&
+		type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) {
+		IPAWANERR("Wrong type given. buff %p type %d\n",
+			  buff, type);
+	}
+	kfree(buff);
+}
+
+/**
+ * rmnet_ipa_get_stats_and_update() - Gets pipe stats from Modem
+ *
+ * This function queries the IPA Modem driver for the pipe stats
+ * via QMI, and updates the user space IPA entity.
+ */
+static void rmnet_ipa_get_stats_and_update(void)
+{
+	struct ipa_get_data_stats_req_msg_v01 req;
+	struct ipa_get_data_stats_resp_msg_v01 *resp;
+	struct ipa_msg_meta msg_meta;
+	int rc;
+
+	resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
+		       GFP_KERNEL);
+	if (!resp) {
+		IPAWANERR("Can't allocate memory for stats message\n");
+		return;
+	}
+
+	memset(&req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
+	memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
+
+	req.ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
+
+	rc = ipa3_qmi_get_data_stats(&req, resp);
+
+	if (!rc) {
+		memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+		msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS;
+		msg_meta.msg_len =
+			sizeof(struct ipa_get_data_stats_resp_msg_v01);
+		rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
+		if (rc) {
+			IPAWANERR("ipa_send_msg failed: %d\n", rc);
+			kfree(resp);
+			return;
+		}
+	}
+}
+
+/**
+ * tethering_stats_poll_queue() - Stats polling function
+ * @work - Work entry
+ *
+ * This function is scheduled periodically (per the interval) in
+ * order to poll the IPA Modem driver for the pipe stats.
+ */
+static void tethering_stats_poll_queue(struct work_struct *work)
+{
+	rmnet_ipa_get_stats_and_update();
+
+	/* Schedule again only if there's an active polling interval */
+	if (ipa3_rmnet_ctx.polling_interval != 0)
+		schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work,
+			msecs_to_jiffies(ipa3_rmnet_ctx.polling_interval*1000));
+}
+
+/**
+ * rmnet_ipa_get_network_stats_and_update() - Get network stats from IPA Modem
+ *
+ * This function retrieves the data usage (used quota) from the IPA Modem driver
+ * via QMI, and updates IPA user space entity.
+ */
+static void rmnet_ipa_get_network_stats_and_update(void)
+{
+	struct ipa_get_apn_data_stats_req_msg_v01 req;
+	struct ipa_get_apn_data_stats_resp_msg_v01 *resp;
+	struct ipa_msg_meta msg_meta;
+	int rc;
+
+	resp = kzalloc(sizeof(struct ipa_get_apn_data_stats_resp_msg_v01),
+		       GFP_KERNEL);
+	if (!resp) {
+		IPAWANERR("Can't allocate memory for network stats message\n");
+		return;
+	}
+
+	memset(&req, 0, sizeof(struct ipa_get_apn_data_stats_req_msg_v01));
+	memset(resp, 0, sizeof(struct ipa_get_apn_data_stats_resp_msg_v01));
+
+	req.mux_id_list_valid = true;
+	req.mux_id_list_len = 1;
+	req.mux_id_list[0] = ipa3_rmnet_ctx.metered_mux_id;
+
+	rc = ipa3_qmi_get_network_stats(&req, resp);
+
+	if (!rc) {
+		memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+		msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS;
+		msg_meta.msg_len =
+			sizeof(struct ipa_get_apn_data_stats_resp_msg_v01);
+		rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
+		if (rc) {
+			IPAWANERR("ipa_send_msg failed: %d\n", rc);
+			kfree(resp);
+			return;
+		}
+	}
+}
+
+/**
+ * rmnet_ipa3_poll_tethering_stats() - Tethering stats polling IOCTL handler
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_POLL_TETHERING_STATS.
+ * In case polling interval received is 0, polling will stop
+ * (If there's a polling in progress, it will allow it to finish), and then will
+ * fetch network stats, and update the IPA user space.
+ *
+ * Return codes:
+ * 0: Success
+ */
+int rmnet_ipa3_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data)
+{
+	ipa3_rmnet_ctx.polling_interval = data->polling_interval_secs;
+
+	cancel_delayed_work_sync(&ipa_tether_stats_poll_wakequeue_work);
+
+	if (ipa3_rmnet_ctx.polling_interval == 0) {
+		ipa3_qmi_stop_data_qouta();
+		rmnet_ipa_get_network_stats_and_update();
+		rmnet_ipa_get_stats_and_update();
+		return 0;
+	}
+
+	schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work, 0);
+	return 0;
+}
+
+/**
+ * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SET_DATA_QUOTA.
+ * It translates the given interface name to the Modem MUX ID and
+ * sends the request of the quota to the IPA Modem driver via QMI.
+ *
+ * Return codes:
+ * 0: Success
+ * -EFAULT: Invalid interface name provided
+ * other: See ipa_qmi_set_data_quota
+ */
+int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data)
+{
+	u32 mux_id;
+	int index;
+	struct ipa_set_data_usage_quota_req_msg_v01 req;
+
+	index = find_vchannel_name_index(data->interface_name);
+	IPAWANERR("iface name %s, quota %lu\n",
+		  data->interface_name,
+		  (unsigned long int) data->quota_mbytes);
+
+	if (index == MAX_NUM_OF_MUX_CHANNEL) {
+		IPAWANERR("%s is an invalid iface name\n",
+			  data->interface_name);
+		return -EFAULT;
+	}
+
+	mux_id = rmnet_ipa3_ctx->mux_channel[index].mux_id;
+	ipa3_rmnet_ctx.metered_mux_id = mux_id;
+
+	memset(&req, 0, sizeof(struct ipa_set_data_usage_quota_req_msg_v01));
+	req.apn_quota_list_valid = true;
+	req.apn_quota_list_len = 1;
+	req.apn_quota_list[0].mux_id = mux_id;
+	req.apn_quota_list[0].num_Mbytes = data->quota_mbytes;
+
+	return ipa3_qmi_set_data_quota(&req);
+}
+
+ /* rmnet_ipa_set_tether_client_pipe() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SET_DATA_QUOTA.
+ * It translates the given interface name to the Modem MUX ID and
+ * sends the request of the quota to the IPA Modem driver via QMI.
+ *
+ * Return codes:
+ * 0: Success
+ * -EFAULT: Invalid interface name provided
+ * other: See ipa_qmi_set_data_quota
+ */
+int rmnet_ipa3_set_tether_client_pipe(
+	struct wan_ioctl_set_tether_client_pipe *data)
+{
+	int number, i;
+
+	IPAWANDBG("client %d, UL %d, DL %d, reset %d\n",
+	data->ipa_client,
+	data->ul_src_pipe_len,
+	data->dl_dst_pipe_len,
+	data->reset_client);
+	number = data->ul_src_pipe_len;
+	for (i = 0; i < number; i++) {
+		IPAWANDBG("UL index-%d pipe %d\n", i,
+			data->ul_src_pipe_list[i]);
+		if (data->reset_client)
+			ipa3_set_client(data->ul_src_pipe_list[i],
+				0, false);
+		else
+			ipa3_set_client(data->ul_src_pipe_list[i],
+				data->ipa_client, true);
+	}
+	number = data->dl_dst_pipe_len;
+	for (i = 0; i < number; i++) {
+		IPAWANDBG("DL index-%d pipe %d\n", i,
+			data->dl_dst_pipe_list[i]);
+		if (data->reset_client)
+			ipa3_set_client(data->dl_dst_pipe_list[i],
+				0, false);
+		else
+			ipa3_set_client(data->dl_dst_pipe_list[i],
+				data->ipa_client, false);
+	}
+	return 0;
+}
+
+int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
+	bool reset)
+{
+	struct ipa_get_data_stats_req_msg_v01 *req;
+	struct ipa_get_data_stats_resp_msg_v01 *resp;
+	int pipe_len, rc;
+
+	req = kzalloc(sizeof(struct ipa_get_data_stats_req_msg_v01),
+			GFP_KERNEL);
+	if (!req) {
+		IPAWANERR("Can't allocate memory for stats message\n");
+		return -ENOMEM;
+	}
+	resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
+			GFP_KERNEL);
+	if (!resp) {
+		IPAWANERR("Can't allocate memory for stats message\n");
+		kfree(req);
+		return -ENOMEM;
+	}
+	memset(req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
+	memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
+
+	req->ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
+	if (reset) {
+		req->reset_stats_valid = true;
+		req->reset_stats = true;
+		IPAWANERR("reset the pipe stats\n");
+	} else {
+		/* print tethered-client enum */
+		IPAWANDBG_LOW("Tethered-client enum(%d)\n", data->ipa_client);
+	}
+
+	rc = ipa3_qmi_get_data_stats(req, resp);
+	if (rc) {
+		IPAWANERR("can't get ipa_qmi_get_data_stats\n");
+		kfree(req);
+		kfree(resp);
+		return rc;
+	} else if (reset) {
+		kfree(req);
+		kfree(resp);
+		return 0;
+	}
+
+	if (resp->dl_dst_pipe_stats_list_valid) {
+		for (pipe_len = 0; pipe_len < resp->dl_dst_pipe_stats_list_len;
+			pipe_len++) {
+			IPAWANDBG_LOW("Check entry(%d) dl_dst_pipe(%d)\n",
+				pipe_len, resp->dl_dst_pipe_stats_list
+					[pipe_len].pipe_index);
+			IPAWANDBG_LOW("dl_p_v4(%lu)v6(%lu)\n",
+				(unsigned long int) resp->
+				dl_dst_pipe_stats_list[pipe_len].
+				num_ipv4_packets,
+				(unsigned long int) resp->
+				dl_dst_pipe_stats_list[pipe_len].
+				num_ipv6_packets);
+			IPAWANDBG_LOW("dl_b_v4(%lu)v6(%lu)\n",
+				(unsigned long int) resp->
+				dl_dst_pipe_stats_list[pipe_len].
+				num_ipv4_bytes,
+				(unsigned long int) resp->
+				dl_dst_pipe_stats_list[pipe_len].
+				num_ipv6_bytes);
+			if (ipa_get_client_uplink(resp->
+				dl_dst_pipe_stats_list[pipe_len].
+				pipe_index) == false) {
+				if (data->ipa_client == ipa_get_client(resp->
+					dl_dst_pipe_stats_list[pipe_len].
+					pipe_index)) {
+					/* update the DL stats */
+					data->ipv4_rx_packets += resp->
+					dl_dst_pipe_stats_list[pipe_len].
+					num_ipv4_packets;
+					data->ipv6_rx_packets += resp->
+					dl_dst_pipe_stats_list[pipe_len].
+					num_ipv6_packets;
+					data->ipv4_rx_bytes += resp->
+					dl_dst_pipe_stats_list[pipe_len].
+					num_ipv4_bytes;
+					data->ipv6_rx_bytes += resp->
+					dl_dst_pipe_stats_list[pipe_len].
+					num_ipv6_bytes;
+				}
+			}
+		}
+	}
+	IPAWANDBG_LOW("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
+		(unsigned long int) data->ipv4_rx_packets,
+		(unsigned long int) data->ipv6_rx_packets,
+		(unsigned long int) data->ipv4_rx_bytes,
+		(unsigned long int) data->ipv6_rx_bytes);
+
+	if (resp->ul_src_pipe_stats_list_valid) {
+		for (pipe_len = 0; pipe_len < resp->ul_src_pipe_stats_list_len;
+			pipe_len++) {
+			IPAWANDBG_LOW("Check entry(%d) ul_dst_pipe(%d)\n",
+				pipe_len,
+				resp->ul_src_pipe_stats_list[pipe_len].
+				pipe_index);
+			IPAWANDBG_LOW("ul_p_v4(%lu)v6(%lu)\n",
+				(unsigned long int) resp->
+				ul_src_pipe_stats_list[pipe_len].
+				num_ipv4_packets,
+				(unsigned long int) resp->
+				ul_src_pipe_stats_list[pipe_len].
+				num_ipv6_packets);
+			IPAWANDBG_LOW("ul_b_v4(%lu)v6(%lu)\n",
+				(unsigned long int) resp->
+				ul_src_pipe_stats_list[pipe_len].
+				num_ipv4_bytes,
+				(unsigned long int) resp->
+				ul_src_pipe_stats_list[pipe_len].
+				num_ipv6_bytes);
+			if (ipa_get_client_uplink(resp->
+				ul_src_pipe_stats_list[pipe_len].
+				pipe_index) == true) {
+				if (data->ipa_client == ipa_get_client(resp->
+				ul_src_pipe_stats_list[pipe_len].
+				pipe_index)) {
+					/* update the DL stats */
+					data->ipv4_tx_packets += resp->
+					ul_src_pipe_stats_list[pipe_len].
+					num_ipv4_packets;
+					data->ipv6_tx_packets += resp->
+					ul_src_pipe_stats_list[pipe_len].
+					num_ipv6_packets;
+					data->ipv4_tx_bytes += resp->
+					ul_src_pipe_stats_list[pipe_len].
+					num_ipv4_bytes;
+					data->ipv6_tx_bytes += resp->
+					ul_src_pipe_stats_list[pipe_len].
+					num_ipv6_bytes;
+				}
+			}
+		}
+	}
+	IPAWANDBG_LOW("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
+		(unsigned long int) data->ipv4_tx_packets,
+		(unsigned long  int) data->ipv6_tx_packets,
+		(unsigned long int) data->ipv4_tx_bytes,
+		(unsigned long int) data->ipv6_tx_bytes);
+	kfree(req);
+	kfree(resp);
+	return 0;
+}
+
+/**
+ * ipa3_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota
+ * @mux_id - The MUX ID on which the quota has been reached
+ *
+ * This function broadcasts a Netlink event using the kobject of the
+ * rmnet_ipa interface in order to alert the user space that the quota
+ * on the specific interface which matches the mux_id has been reached.
+ *
+ */
+void ipa3_broadcast_quota_reach_ind(u32 mux_id)
+{
+	char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE];
+	char iface_name_m[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
+	char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
+	char *envp[IPA_UEVENT_NUM_EVNP] = {
+		alert_msg, iface_name_l, iface_name_m, NULL };
+	int res;
+	int index;
+
+	index = ipa3_find_mux_channel_index(mux_id);
+
+	if (index == MAX_NUM_OF_MUX_CHANNEL) {
+		IPAWANERR("%u is an mux ID\n", mux_id);
+		return;
+	}
+
+	res = snprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE,
+			"ALERT_NAME=%s", "quotaReachedAlert");
+	if (res >= IPA_QUOTA_REACH_ALERT_MAX_SIZE) {
+		IPAWANERR("message too long (%d)", res);
+		return;
+	}
+	/* posting msg for L-release for CNE */
+	res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
+	    "UPSTREAM=%s", rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
+	if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
+		IPAWANERR("message too long (%d)", res);
+		return;
+	}
+	/* posting msg for M-release for CNE */
+	res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
+	    "INTERFACE=%s", rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
+	if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
+		IPAWANERR("message too long (%d)", res);
+		return;
+	}
+
+	IPAWANERR("putting nlmsg: <%s> <%s> <%s>\n",
+		alert_msg, iface_name_l, iface_name_m);
+	kobject_uevent_env(&(IPA_NETDEV()->dev.kobj),
+		KOBJ_CHANGE, envp);
+}
+
+/**
+ * ipa3_q6_handshake_complete() - Perform operations once Q6 is up
+ * @ssr_bootup - Indicates whether this is a cold boot-up or post-SSR.
+ *
+ * This function is invoked once the handshake between the IPA AP driver
+ * and IPA Q6 driver is complete. At this point, it is possible to perform
+ * operations which can't be performed until IPA Q6 driver is up.
+ *
+ */
+void ipa3_q6_handshake_complete(bool ssr_bootup)
+{
+	/* It is required to recover the network stats after SSR recovery */
+	if (ssr_bootup) {
+		/*
+		 * In case the uC is required to be loaded by the Modem,
+		 * the proxy vote will be removed only when uC loading is
+		 * complete and indication is received by the AP. After SSR,
+		 * uC is already loaded. Therefore, proxy vote can be removed
+		 * once Modem init is complete.
+		 */
+		ipa3_proxy_clk_unvote();
+
+		/*
+		 * It is required to recover the network stats after
+		 * SSR recovery
+		 */
+		rmnet_ipa_get_network_stats_and_update();
+	}
+}
+
+static int __init ipa3_wwan_init(void)
+{
+	rmnet_ipa3_ctx = kzalloc(sizeof(*rmnet_ipa3_ctx), GFP_KERNEL);
+	if (!rmnet_ipa3_ctx) {
+		IPAWANERR("no memory\n");
+		return -ENOMEM;
+	}
+
+	atomic_set(&rmnet_ipa3_ctx->is_initialized, 0);
+	atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
+
+	mutex_init(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+	rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
+	/* Register for Modem SSR */
+	rmnet_ipa3_ctx->subsys_notify_handle = subsys_notif_register_notifier(
+			SUBSYS_MODEM,
+			&ipa3_ssr_notifier);
+	if (!IS_ERR(rmnet_ipa3_ctx->subsys_notify_handle))
+		return platform_driver_register(&rmnet_ipa_driver);
+	else
+		return (int)PTR_ERR(rmnet_ipa3_ctx->subsys_notify_handle);
+}
+
+static void __exit ipa3_wwan_cleanup(void)
+{
+	int ret;
+
+	mutex_destroy(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+	ret = subsys_notif_unregister_notifier(
+		rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier);
+	if (ret)
+		IPAWANERR(
+		"Error subsys_notif_unregister_notifier system %s, ret=%d\n",
+		SUBSYS_MODEM, ret);
+	platform_driver_unregister(&rmnet_ipa_driver);
+	kfree(rmnet_ipa3_ctx);
+	rmnet_ipa3_ctx = NULL;
+}
+
+static void ipa3_wwan_msg_free_cb(void *buff, u32 len, u32 type)
+{
+	if (!buff)
+		IPAWANERR("Null buffer.\n");
+	kfree(buff);
+}
+
+static void ipa3_rmnet_rx_cb(void *priv)
+{
+	IPAWANDBG_LOW("\n");
+	napi_schedule(&(rmnet_ipa3_ctx->wwan_priv->napi));
+}
+
+static int ipa3_rmnet_poll(struct napi_struct *napi, int budget)
+{
+	int rcvd_pkts = 0;
+
+	rcvd_pkts = ipa_rx_poll(rmnet_ipa3_ctx->ipa3_to_apps_hdl,
+					NAPI_WEIGHT);
+	IPAWANDBG_LOW("rcvd packets: %d\n", rcvd_pkts);
+	return rcvd_pkts;
+}
+
+late_initcall(ipa3_wwan_init);
+module_exit(ipa3_wwan_cleanup);
+MODULE_DESCRIPTION("WWAN Network Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
new file mode 100644
index 0000000..80b07ab
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -0,0 +1,391 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/rmnet_ipa_fd_ioctl.h>
+#include "ipa_qmi_service.h"
+
+#define DRIVER_NAME "wwan_ioctl"
+
+#ifdef CONFIG_COMPAT
+#define WAN_IOC_ADD_FLT_RULE32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ADD_FLT_RULE, \
+		compat_uptr_t)
+#define WAN_IOC_ADD_FLT_RULE_INDEX32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ADD_FLT_INDEX, \
+		compat_uptr_t)
+#define WAN_IOC_POLL_TETHERING_STATS32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_POLL_TETHERING_STATS, \
+		compat_uptr_t)
+#define WAN_IOC_SET_DATA_QUOTA32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_SET_DATA_QUOTA, \
+		compat_uptr_t)
+#define WAN_IOC_SET_TETHER_CLIENT_PIPE32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_SET_TETHER_CLIENT_PIPE, \
+		compat_uptr_t)
+#define WAN_IOC_QUERY_TETHER_STATS32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_QUERY_TETHER_STATS, \
+		compat_uptr_t)
+#define WAN_IOC_RESET_TETHER_STATS32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_RESET_TETHER_STATS, \
+		compat_uptr_t)
+#define WAN_IOC_QUERY_DL_FILTER_STATS32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_QUERY_DL_FILTER_STATS, \
+		compat_uptr_t)
+#endif
+
+static unsigned int dev_num = 1;
+static struct cdev ipa3_wan_ioctl_cdev;
+static unsigned int ipa3_process_ioctl = 1;
+static struct class *class;
+static dev_t device;
+
+static long ipa3_wan_ioctl(struct file *filp,
+		unsigned int cmd,
+		unsigned long arg)
+{
+	int retval = 0;
+	u32 pyld_sz;
+	u8 *param = NULL;
+
+	IPAWANDBG("device %s got ioctl events :>>>\n",
+		DRIVER_NAME);
+
+	if (!ipa3_process_ioctl) {
+		IPAWANDBG("modem is in SSR, ignoring ioctl\n");
+		return -EAGAIN;
+	}
+
+	switch (cmd) {
+	case WAN_IOC_ADD_FLT_RULE:
+		IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE :>>>\n",
+		DRIVER_NAME);
+		pyld_sz = sizeof(struct ipa_install_fltr_rule_req_msg_v01);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_qmi_filter_request_send(
+			(struct ipa_install_fltr_rule_req_msg_v01 *)param)) {
+			IPAWANDBG("IPACM->Q6 add filter rule failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_ADD_FLT_RULE_INDEX:
+		IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE_INDEX :>>>\n",
+		DRIVER_NAME);
+		pyld_sz = sizeof(struct ipa_fltr_installed_notif_req_msg_v01);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_qmi_filter_notify_send(
+		(struct ipa_fltr_installed_notif_req_msg_v01 *)param)) {
+			IPAWANDBG("IPACM->Q6 rule index fail\n");
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_VOTE_FOR_BW_MBPS:
+		IPAWANDBG("device %s got WAN_IOC_VOTE_FOR_BW_MBPS :>>>\n",
+		DRIVER_NAME);
+		pyld_sz = sizeof(uint32_t);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_vote_for_bus_bw((uint32_t *)param)) {
+			IPAWANERR("Failed to vote for bus BW\n");
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_POLL_TETHERING_STATS:
+		IPAWANDBG_LOW("got WAN_IOCTL_POLL_TETHERING_STATS :>>>\n");
+		pyld_sz = sizeof(struct wan_ioctl_poll_tethering_stats);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (rmnet_ipa3_poll_tethering_stats(
+		(struct wan_ioctl_poll_tethering_stats *)param)) {
+			IPAWANERR("WAN_IOCTL_POLL_TETHERING_STATS failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_SET_DATA_QUOTA:
+		IPAWANDBG_LOW("got WAN_IOCTL_SET_DATA_QUOTA :>>>\n");
+		pyld_sz = sizeof(struct wan_ioctl_set_data_quota);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (rmnet_ipa3_set_data_quota(
+		(struct wan_ioctl_set_data_quota *)param)) {
+			IPAWANERR("WAN_IOC_SET_DATA_QUOTA failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_SET_TETHER_CLIENT_PIPE:
+		IPAWANDBG_LOW("got WAN_IOC_SET_TETHER_CLIENT_PIPE :>>>\n");
+		pyld_sz = sizeof(struct wan_ioctl_set_tether_client_pipe);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (rmnet_ipa3_set_tether_client_pipe(
+			(struct wan_ioctl_set_tether_client_pipe *)param)) {
+			IPAWANERR("WAN_IOC_SET_TETHER_CLIENT_PIPE failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_QUERY_TETHER_STATS:
+		IPAWANDBG_LOW("got WAN_IOC_QUERY_TETHER_STATS :>>>\n");
+		pyld_sz = sizeof(struct wan_ioctl_query_tether_stats);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (rmnet_ipa3_query_tethering_stats(
+			(struct wan_ioctl_query_tether_stats *)param, false)) {
+			IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n");
+			retval = -EFAULT;
+			break;
+		}
+
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_RESET_TETHER_STATS:
+		IPAWANDBG_LOW("device %s got WAN_IOC_RESET_TETHER_STATS :>>>\n",
+				DRIVER_NAME);
+		pyld_sz = sizeof(struct wan_ioctl_reset_tether_stats);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (rmnet_ipa3_query_tethering_stats(NULL, true)) {
+			IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	default:
+		retval = -ENOTTY;
+	}
+	kfree(param);
+	return retval;
+}
+
+#ifdef CONFIG_COMPAT
+long ipa3_compat_wan_ioctl(struct file *file,
+		unsigned int cmd,
+		unsigned long arg)
+{
+	switch (cmd) {
+	case WAN_IOC_ADD_FLT_RULE32:
+		cmd = WAN_IOC_ADD_FLT_RULE;
+		break;
+	case WAN_IOC_ADD_FLT_RULE_INDEX32:
+		cmd = WAN_IOC_ADD_FLT_RULE_INDEX;
+		break;
+	case WAN_IOC_POLL_TETHERING_STATS32:
+		cmd = WAN_IOC_POLL_TETHERING_STATS;
+		break;
+	case WAN_IOC_SET_DATA_QUOTA32:
+		cmd = WAN_IOC_SET_DATA_QUOTA;
+		break;
+	case WAN_IOC_SET_TETHER_CLIENT_PIPE32:
+		cmd = WAN_IOC_SET_TETHER_CLIENT_PIPE;
+		break;
+	case WAN_IOC_QUERY_TETHER_STATS32:
+		cmd = WAN_IOC_QUERY_TETHER_STATS;
+		break;
+	case WAN_IOC_RESET_TETHER_STATS32:
+		cmd = WAN_IOC_RESET_TETHER_STATS;
+		break;
+	case WAN_IOC_QUERY_DL_FILTER_STATS32:
+		cmd = WAN_IOC_QUERY_DL_FILTER_STATS;
+		break;
+	default:
+		return -ENOIOCTLCMD;
+	}
+	return ipa3_wan_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
+static int ipa3_wan_ioctl_open(struct inode *inode, struct file *filp)
+{
+	IPAWANDBG("\n IPA A7 ipa3_wan_ioctl open OK :>>>> ");
+	return 0;
+}
+
+const struct file_operations rmnet_ipa3_fops = {
+	.owner = THIS_MODULE,
+	.open = ipa3_wan_ioctl_open,
+	.read = NULL,
+	.unlocked_ioctl = ipa3_wan_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = ipa3_compat_wan_ioctl,
+#endif
+};
+
+int ipa3_wan_ioctl_init(void)
+{
+	unsigned int wan_ioctl_major = 0;
+	int ret;
+	struct device *dev;
+
+	device = MKDEV(wan_ioctl_major, 0);
+
+	ret = alloc_chrdev_region(&device, 0, dev_num, DRIVER_NAME);
+	if (ret) {
+		IPAWANERR(":device_alloc err.\n");
+		goto dev_alloc_err;
+	}
+	wan_ioctl_major = MAJOR(device);
+
+	class = class_create(THIS_MODULE, DRIVER_NAME);
+	if (IS_ERR(class)) {
+		IPAWANERR(":class_create err.\n");
+		goto class_err;
+	}
+
+	dev = device_create(class, NULL, device,
+		NULL, DRIVER_NAME);
+	if (IS_ERR(dev)) {
+		IPAWANERR(":device_create err.\n");
+		goto device_err;
+	}
+
+	cdev_init(&ipa3_wan_ioctl_cdev, &rmnet_ipa3_fops);
+	ret = cdev_add(&ipa3_wan_ioctl_cdev, device, dev_num);
+	if (ret) {
+		IPAWANERR(":cdev_add err.\n");
+		goto cdev_add_err;
+	}
+
+	ipa3_process_ioctl = 1;
+
+	IPAWANDBG("IPA %s major(%d) initial ok :>>>>\n",
+	DRIVER_NAME, wan_ioctl_major);
+	return 0;
+
+cdev_add_err:
+	device_destroy(class, device);
+device_err:
+	class_destroy(class);
+class_err:
+	unregister_chrdev_region(device, dev_num);
+dev_alloc_err:
+	return -ENODEV;
+}
+
+void ipa3_wan_ioctl_stop_qmi_messages(void)
+{
+	ipa3_process_ioctl = 0;
+}
+
+void ipa3_wan_ioctl_enable_qmi_messages(void)
+{
+	ipa3_process_ioctl = 1;
+}
+
+void ipa3_wan_ioctl_deinit(void)
+{
+	cdev_del(&ipa3_wan_ioctl_cdev);
+	device_destroy(class, device);
+	class_destroy(class);
+	unregister_chrdev_region(device, dev_num);
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
new file mode 100644
index 0000000..3ed3e44
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
@@ -0,0 +1,253 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msm_ipa.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/ipa.h>
+#include <linux/netdevice.h>
+#include "ipa_i.h"
+
+#define TETH_BRIDGE_DRV_NAME "ipa_tethering_bridge"
+
+#define TETH_DBG(fmt, args...) \
+	pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, \
+		 __func__, __LINE__, ## args)
+#define TETH_DBG_FUNC_ENTRY() \
+	pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d ENTRY\n", __func__, __LINE__)
+#define TETH_DBG_FUNC_EXIT() \
+	pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d EXIT\n", __func__, __LINE__)
+#define TETH_ERR(fmt, args...) \
+	pr_err(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+
+/**
+ * struct ipa3_teth_bridge_ctx - Tethering bridge driver context information
+ * @class: kernel class pointer
+ * @dev_num: kernel device number
+ * @dev: kernel device struct pointer
+ * @cdev: kernel character device struct
+ */
+struct ipa3_teth_bridge_ctx {
+	struct class *class;
+	dev_t dev_num;
+	struct device *dev;
+	struct cdev cdev;
+};
+static struct ipa3_teth_bridge_ctx *ipa3_teth_ctx;
+
+/**
+* teth_bridge_ipa_cb() - Callback to handle IPA data path events
+* @priv - private data
+* @evt - event type
+* @data - event specific data (usually skb)
+*
+* This callback is called by IPA driver for exception packets from USB.
+* All exception packets are handled by Q6 and should not reach this function.
+* Packets will arrive to AP exception pipe only in case where packets are
+* sent from USB before Q6 has setup the call.
+*/
+static void teth_bridge_ipa_cb(void *priv, enum ipa_dp_evt_type evt,
+	unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+
+	TETH_DBG_FUNC_ENTRY();
+	if (evt != IPA_RECEIVE) {
+		TETH_ERR("unexpected event %d\n", evt);
+		WARN_ON(1);
+		return;
+	}
+
+	TETH_ERR("Unexpected exception packet from USB, dropping packet\n");
+	dev_kfree_skb_any(skb);
+	TETH_DBG_FUNC_EXIT();
+}
+
+/**
+* ipa3_teth_bridge_init() - Initialize the Tethering bridge driver
+* @params - in/out params for USB initialization API (please look at struct
+*  definition for more info)
+*
+* USB driver gets a pointer to a callback function (usb_notify_cb) and an
+* associated data. USB driver installs this callback function in the call to
+* ipa3_connect().
+*
+* Builds IPA resource manager dependency graph.
+*
+* Return codes: 0: success,
+*		-EINVAL - Bad parameter
+*		Other negative value - Failure
+*/
+int ipa3_teth_bridge_init(struct teth_bridge_init_params *params)
+{
+	TETH_DBG_FUNC_ENTRY();
+
+	if (!params) {
+		TETH_ERR("Bad parameter\n");
+		TETH_DBG_FUNC_EXIT();
+		return -EINVAL;
+	}
+
+	params->usb_notify_cb = teth_bridge_ipa_cb;
+	params->private_data = NULL;
+	params->skip_ep_cfg = true;
+
+	TETH_DBG_FUNC_EXIT();
+	return 0;
+}
+
+/**
+* ipa3_teth_bridge_disconnect() - Disconnect tethering bridge module
+*/
+int ipa3_teth_bridge_disconnect(enum ipa_client_type client)
+{
+	TETH_DBG_FUNC_ENTRY();
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+				 IPA_RM_RESOURCE_Q6_CONS);
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+				 IPA_RM_RESOURCE_USB_CONS);
+	TETH_DBG_FUNC_EXIT();
+
+	return 0;
+}
+
+/**
+* ipa3_teth_bridge_connect() - Connect bridge for a tethered Rmnet / MBIM call
+* @connect_params:	Connection info
+*
+* Return codes: 0: success
+*		-EINVAL: invalid parameters
+*		-EPERM: Operation not permitted as the bridge is already
+*		connected
+*/
+int ipa3_teth_bridge_connect(struct teth_bridge_connect_params *connect_params)
+{
+	int res = 0;
+
+	TETH_DBG_FUNC_ENTRY();
+
+	/* Build the dependency graph, first add_dependency call is sync
+	 * in order to make sure the IPA clocks are up before we continue
+	 * and notify the USB driver it may continue.
+	 */
+	res = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_PROD,
+				    IPA_RM_RESOURCE_Q6_CONS);
+	if (res < 0) {
+		TETH_ERR("ipa_rm_add_dependency() failed.\n");
+		goto bail;
+	}
+
+	/* this add_dependency call can't be sync since it will block until USB
+	 * status is connected (which can happen only after the tethering
+	 * bridge is connected), the clocks are already up so the call doesn't
+	 * need to block.
+	 */
+	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+				    IPA_RM_RESOURCE_USB_CONS);
+	if (res < 0 && res != -EINPROGRESS) {
+		ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+					IPA_RM_RESOURCE_Q6_CONS);
+		TETH_ERR("ipa_rm_add_dependency() failed.\n");
+		goto bail;
+	}
+
+	res = 0;
+
+bail:
+	TETH_DBG_FUNC_EXIT();
+	return res;
+}
+
+static long ipa3_teth_bridge_ioctl(struct file *filp,
+			      unsigned int cmd,
+			      unsigned long arg)
+{
+	IPAERR("No ioctls are supported!\n");
+	return -ENOIOCTLCMD;
+}
+
+static const struct file_operations ipa3_teth_bridge_drv_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = ipa3_teth_bridge_ioctl,
+};
+
+/**
+* ipa3_teth_bridge_driver_init() - Initialize tethering bridge driver
+*
+*/
+int ipa3_teth_bridge_driver_init(void)
+{
+	int res;
+
+	TETH_DBG("Tethering bridge driver init\n");
+	ipa3_teth_ctx = kzalloc(sizeof(*ipa3_teth_ctx), GFP_KERNEL);
+	if (!ipa3_teth_ctx) {
+		TETH_ERR("kzalloc err.\n");
+		return -ENOMEM;
+	}
+
+	ipa3_teth_ctx->class = class_create(THIS_MODULE, TETH_BRIDGE_DRV_NAME);
+
+	res = alloc_chrdev_region(&ipa3_teth_ctx->dev_num, 0, 1,
+				  TETH_BRIDGE_DRV_NAME);
+	if (res) {
+		TETH_ERR("alloc_chrdev_region err.\n");
+		res = -ENODEV;
+		goto fail_alloc_chrdev_region;
+	}
+
+	ipa3_teth_ctx->dev = device_create(ipa3_teth_ctx->class,
+			NULL,
+			ipa3_teth_ctx->dev_num,
+			ipa3_teth_ctx,
+			TETH_BRIDGE_DRV_NAME);
+	if (IS_ERR(ipa3_teth_ctx->dev)) {
+		TETH_ERR(":device_create err.\n");
+		res = -ENODEV;
+		goto fail_device_create;
+	}
+
+	cdev_init(&ipa3_teth_ctx->cdev, &ipa3_teth_bridge_drv_fops);
+	ipa3_teth_ctx->cdev.owner = THIS_MODULE;
+	ipa3_teth_ctx->cdev.ops = &ipa3_teth_bridge_drv_fops;
+
+	res = cdev_add(&ipa3_teth_ctx->cdev, ipa3_teth_ctx->dev_num, 1);
+	if (res) {
+		TETH_ERR(":cdev_add err=%d\n", -res);
+		res = -ENODEV;
+		goto fail_cdev_add;
+	}
+	TETH_DBG("Tethering bridge driver init OK\n");
+
+	return 0;
+fail_cdev_add:
+	device_destroy(ipa3_teth_ctx->class, ipa3_teth_ctx->dev_num);
+fail_device_create:
+	unregister_chrdev_region(ipa3_teth_ctx->dev_num, 1);
+fail_alloc_chrdev_region:
+	kfree(ipa3_teth_ctx);
+	ipa3_teth_ctx = NULL;
+
+	return res;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Tethering bridge driver");