msm: ipa: add Driver for the IPA core

The IPA (Internet Protocol Accelerator) driver provides an API for
interacting with the IPA HW.
Kernel and user-space processes can call the IPA driver
to configure IPA.
Kernel processes can also use IPA driver as a data
transport for traffic to/from apps including exception traffic.
Exception traffic is any traffic that cannot be handled by IPA HW
or is specially marked through SW configuration.
Such traffic will always be sent to apps for further processing.

Change-Id: Ia940176536079a7cf7a78f49b5ea82288a0638d4
Signed-off-by: Talel Atias <tatias@codeaurora.org>
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
new file mode 100644
index 0000000..86c60e8
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -0,0 +1,81 @@
+Qualcomm Internet Packet Accelerator
+
+Internet Packet Accelerator (IPA) is a programmable protocol
+processor HW block. It is designed to support generic HW processing
+of UL/DL IP packets for various use cases independent of radio technology.
+
+Required properties:
+
+IPA node:
+
+- compatible : "qcom,ipa"
+- reg: Specifies the base physical addresses and the sizes of the IPA
+       registers.
+- reg-names: "ipa-base" - string to identify the IPA CORE base registers.
+	     "bam-base" - string to identify the IPA BAM base registers.
+- interrupts: Specifies the interrupt associated with IPA.
+- interrupt-names: "ipa-irq" - string to identify the IPA core interrupt.
+                   "bam-irq" - string to identify the IPA BAM interrupt.
+
+IPA pipe sub nodes (A2 static pipes configurations):
+
+-label: two labels are supported, a2-to-ipa and ipa-to-a2 which
+supply static configuration for A2-IPA connection.
+-qcom,src-bam-physical-address: The physical address of the source BAM
+-qcom,ipa-bam-mem-type:The memory type:
+                       0(Pipe memory), 1(Private memory), 2(System memory)
+-qcom,src-bam-pipe-index: Source pipe index
+-qcom,dst-bam-physical-address: The physical address of the
+                                destination BAM
+-qcom,dst-bam-pipe-index: Destination pipe index
+-qcom,data-fifo-offset: Data fifo base offset
+-qcom,data-fifo-size:  Data fifo size (bytes)
+-qcom,descriptor-fifo-offset: Descriptor fifo base offset
+-qcom,descriptor-fifo-size: Descriptor fifo size (bytes)
+
+Optional properties:
+-qcom,ipa-pipe-mem: Specifies the base physical address and the
+                    size of the IPA pipe memory region.
+                    Pipe memory is a feature which may be supported by the
+                    target (HW platform). The Driver support using pipe
+                    memory instead of system memory. In case this property
+                    will not appear in the IPA DTS entry, the driver will
+                    use system memory.
+
+Example:
+
+qcom,ipa@fd4c0000 {
+	compatible = "qcom,ipa";
+	reg = <0xfd4c0000 0x26000>,
+	      <0xfd4c4000 0x14818>;
+	reg-names = "ipa-base", "bam-base";
+	interrupts = <0 252 0>,
+	             <0 253 0>;
+	interrupt-names = "ipa-irq", "bam-irq";
+
+	qcom,pipe1 {
+		label = "a2-to-ipa";
+		qcom,src-bam-physical-address = <0xfc834000>;
+		qcom,ipa-bam-mem-type = <0>;
+		qcom,src-bam-pipe-index = <1>;
+		qcom,dst-bam-physical-address = <0xfd4c0000>;
+		qcom,dst-bam-pipe-index = <6>;
+		qcom,data-fifo-offset = <0x1000>;
+		qcom,data-fifo-size = <0xd00>;
+		qcom,descriptor-fifo-offset = <0x1d00>;
+		qcom,descriptor-fifo-size = <0x300>;
+	};
+
+	qcom,pipe2 {
+		label = "ipa-to-a2";
+		qcom,src-bam-physical-address = <0xfd4c0000>;
+		qcom,ipa-bam-mem-type = <0>;
+		qcom,src-bam-pipe-index = <7>;
+		qcom,dst-bam-physical-address = <0xfc834000>;
+		qcom,dst-bam-pipe-index = <0>;
+		qcom,data-fifo-offset = <0x00>;
+		qcom,data-fifo-size = <0xd00>;
+		qcom,descriptor-fifo-offset = <0xd00>;
+		qcom,descriptor-fifo-size = <0x300>;
+	};
+};
diff --git a/arch/arm/mach-msm/include/mach/ipa.h b/arch/arm/mach-msm/include/mach/ipa.h
new file mode 100644
index 0000000..0f689ac
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/ipa.h
@@ -0,0 +1,458 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_H_
+#define _IPA_H_
+
+#include <linux/msm_ipa.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <mach/sps.h>
+
+/**
+ * enum ipa_nat_en_type - NAT setting type in IPA end-point
+ */
+enum ipa_nat_en_type {
+	IPA_BYPASS_NAT,
+	IPA_SRC_NAT,
+	IPA_DST_NAT,
+};
+
+/**
+ * enum ipa_mode_type - mode setting type in IPA end-point
+ * @BASIC: basic mode
+ * @ENABLE_FRAMING_HDLC: not currently supported
+ * @ENABLE_DEFRAMING_HDLC: not currently supported
+ */
+enum ipa_mode_type {
+	IPA_BASIC,
+	IPA_ENABLE_FRAMING_HDLC,
+	IPA_ENABLE_DEFRAMING_HDLC,
+	IPA_DMA,
+};
+
+/**
+ *  enum ipa_aggr_en_type - aggregation setting type in IPA
+ *  end-point
+ */
+enum ipa_aggr_en_type {
+	IPA_BYPASS_AGGR,
+	IPA_ENABLE_AGGR,
+	IPA_ENABLE_DEAGGR,
+};
+
+/**
+ *  enum ipa_aggr_type - type of aggregation in IPA end-point
+ */
+enum ipa_aggr_type {
+	IPA_MBIM_16,
+	IPA_MBIM_32,
+	IPA_TLP,
+};
+
+/**
+ * enum ipa_aggr_mode - global aggregation mode
+ */
+enum ipa_aggr_mode {
+	IPA_MBIM,
+	IPA_QCNCM,
+};
+
+/**
+ * enum ipa_dp_evt_type - type of event client callback is
+ * invoked for on data path
+ * @IPA_RECEIVE: data is struct sk_buff
+ * @IPA_WRITE_DONE: data is struct sk_buff
+ */
+enum ipa_dp_evt_type {
+	IPA_RECEIVE,
+	IPA_WRITE_DONE,
+};
+
+/**
+ * struct ipa_ep_cfg_nat - NAT configuration in IPA end-point
+ * @nat_en:	This defines the default NAT mode for the pipe: in case of
+ *		filter miss - the default NAT mode defines the NATing operation
+ *		on the packet. Valid for Input Pipes only (IPA consumer)
+ */
+struct ipa_ep_cfg_nat {
+	enum ipa_nat_en_type nat_en;
+};
+
+/**
+ * struct ipa_ep_cfg_hdr - header configuration in IPA end-point
+ * @hdr_len:	Header length in bytes to be added/removed. Assuming header len
+ *		is constant per endpoint. Valid for both Input and Output Pipes
+ * @hdr_ofst_metadata_valid:	0: Metadata_Ofst  value is invalid, i.e., no
+ *				metadata within header.
+ *				1: Metadata_Ofst  value is valid, i.e., metadata
+ *				within header is in offset Metadata_Ofst Valid
+ *				for Input Pipes only (IPA Consumer) (for output
+ *				pipes, metadata already set within the header)
+ * @hdr_ofst_metadata:	Offset within header in which metadata resides
+ *			Size of metadata - 4bytes
+ *			Example -  Stream ID/SSID/mux ID.
+ *			Valid for  Input Pipes only (IPA Consumer) (for output
+ *			pipes, metadata already set within the header)
+ * @hdr_additional_const_len:	Defines the constant length that should be added
+ *				to the payload length in order for IPA to update
+ *				correctly the length field within the header
+ *				(valid only in case Hdr_Ofst_Pkt_Size_Valid=1)
+ *				Valid for Output Pipes (IPA Producer)
+ * @hdr_ofst_pkt_size_valid:	0: Hdr_Ofst_Pkt_Size  value is invalid, i.e., no
+ *				length field within the inserted header
+ *				1: Hdr_Ofst_Pkt_Size  value is valid, i.e., a
+ *				packet length field resides within the header
+ *				Valid for Output Pipes (IPA Producer)
+ * @hdr_ofst_pkt_size:	Offset within header in which packet size reside. Upon
+ *			Header Insertion, IPA will update this field within the
+ *			header with the packet length . Assumption is that
+ *			header length field size is constant and is 2Bytes
+ *			Valid for Output Pipes (IPA Producer)
+ * @hdr_a5_mux:	Determines whether A5 Mux header should be added to the packet.
+ *		This bit is valid only when Hdr_En=01(Header Insertion)
+ *		SW should set this bit for IPA-to-A5 pipes.
+ *		0: Do not insert A5 Mux Header
+ *		1: Insert A5 Mux Header
+ *		Valid for Output Pipes (IPA Producer)
+ */
+struct ipa_ep_cfg_hdr {
+	u32 hdr_len;
+	u32 hdr_ofst_metadata_valid;
+	u32 hdr_ofst_metadata;
+	u32 hdr_additional_const_len;
+	u32 hdr_ofst_pkt_size_valid;
+	u32 hdr_ofst_pkt_size;
+	u32 hdr_a5_mux;
+};
+
+/**
+ * struct ipa_ep_cfg_mode - mode configuration in IPA end-point
+ * @mode:	Valid for Input Pipes only (IPA Consumer)
+ * @dst:	This parameter specifies the output pipe to which the packets
+ *		will be routed to.
+ *		This parameter is valid for Mode=DMA and not valid for
+ *		Mode=Basic
+ *		Valid for Input Pipes only (IPA Consumer)
+ */
+struct ipa_ep_cfg_mode {
+	enum ipa_mode_type mode;
+	enum ipa_client_type dst;
+};
+
+/**
+ * struct ipa_ep_cfg_aggr - aggregation configuration in IPA end-point
+ * @aggr_en:	Valid for both Input and Output Pipes
+ * @aggr:	Valid for both Input and Output Pipes
+ * @aggr_byte_limit:	Limit of aggregated packet size in KB (<=32KB) When set
+ *			to 0, there is no size limitation on the aggregation.
+ *			When both, Aggr_Byte_Limit and Aggr_Time_Limit are set
+ *			to 0, there is no aggregation, every packet is sent
+ *			independently according to the aggregation structure
+ *			Valid for Output Pipes only (IPA Producer )
+ * @aggr_time_limit:	Timer to close aggregated packet (<=32ms) When set to 0,
+ *			there is no time limitation on the aggregation.  When
+ *			both, Aggr_Byte_Limit and Aggr_Time_Limit are set to 0,
+ *			there is no aggregation, every packet is sent
+ *			independently according to the aggregation structure
+ *			Valid for Output Pipes only (IPA Producer)
+ */
+struct ipa_ep_cfg_aggr {
+	enum ipa_aggr_en_type aggr_en;
+	enum ipa_aggr_type aggr;
+	u32 aggr_byte_limit;
+	u32 aggr_time_limit;
+};
+
+/**
+ * struct ipa_ep_cfg_route - route configuration in IPA end-point
+ * @rt_tbl_hdl:	Defines the default routing table index to be used in case there
+ *		is no filter rule matching, valid for Input Pipes only (IPA
+ *		Consumer). Clients should set this to 0 which will cause default
+ *		v4 and v6 routes setup internally by IPA driver to be used for
+ *		this end-point
+ */
+struct ipa_ep_cfg_route {
+	u32 rt_tbl_hdl;
+};
+
+/**
+ * struct ipa_ep_cfg - configuration of IPA end-point
+ * @nat:	NAT parmeters
+ * @hdr:	Header parameters
+ * @mode:	Mode parameters
+ * @aggr:	Aggregation parameters
+ * @route:	Routing parameters
+ */
+struct ipa_ep_cfg {
+	struct ipa_ep_cfg_nat nat;
+	struct ipa_ep_cfg_hdr hdr;
+	struct ipa_ep_cfg_mode mode;
+	struct ipa_ep_cfg_aggr aggr;
+	struct ipa_ep_cfg_route route;
+};
+
+/**
+ * struct ipa_connect_params - low-level client connect input parameters. Either
+ * client allocates the data and desc FIFO and specifies that in data+desc OR
+ * specifies sizes and pipe_mem pref and IPA does the allocation.
+ *
+ * @ipa_ep_cfg:	IPA EP configuration
+ * @client:	type of "client"
+ * @client_bam_hdl:	 client SPS handle
+ * @client_ep_idx:	 client PER EP index
+ * @priv:	callback cookie
+ * @notify:	callback
+ *		priv - callback cookie evt - type of event data - data relevant
+ *		to event.  May not be valid. See event_type enum for valid
+ *		cases.
+ * @desc_fifo_sz:	size of desc FIFO
+ * @data_fifo_sz:	size of data FIFO
+ * @pipe_mem_preferred:	if true, try to alloc the FIFOs in pipe mem, fallback
+ *			to sys mem if pipe mem alloc fails
+ * @desc:	desc FIFO meta-data when client has allocated it
+ * @data:	data FIFO meta-data when client has allocated it
+ */
+struct ipa_connect_params {
+	struct ipa_ep_cfg ipa_ep_cfg;
+	enum ipa_client_type client;
+	u32 client_bam_hdl;
+	u32 client_ep_idx;
+	void *priv;
+	void (*notify)(void *priv, enum ipa_dp_evt_type evt,
+			unsigned long data);
+	u32 desc_fifo_sz;
+	u32 data_fifo_sz;
+	bool pipe_mem_preferred;
+	struct sps_mem_buffer desc;
+	struct sps_mem_buffer data;
+};
+
+/**
+ *  struct ipa_sps_params - SPS related output parameters resulting from
+ *  low/high level client connect
+ *  @ipa_bam_hdl:	IPA SPS handle
+ *  @ipa_ep_idx:	IPA PER EP index
+ *  @desc:	desc FIFO meta-data
+ *  @data:	data FIFO meta-data
+ */
+struct ipa_sps_params {
+	u32 ipa_bam_hdl;
+	u32 ipa_ep_idx;
+	struct sps_mem_buffer desc;
+	struct sps_mem_buffer data;
+};
+
+/**
+ * struct ipa_tx_intf - interface tx properties
+ * @num_props:	number of tx properties
+ * @prop:	the tx properties array
+ */
+struct ipa_tx_intf {
+	u32 num_props;
+	struct ipa_ioc_tx_intf_prop *prop;
+};
+
+/**
+ * struct ipa_rx_intf - interface rx properties
+ * @num_props:	number of rx properties
+ * @prop:	the rx properties array
+ */
+struct ipa_rx_intf {
+	u32 num_props;
+	struct ipa_ioc_rx_intf_prop *prop;
+};
+
+/**
+ * struct ipa_sys_connect_params - information needed to setup an IPA end-point
+ * in system-BAM mode
+ * @ipa_ep_cfg:	IPA EP configuration
+ * @client:	the type of client who "owns" the EP
+ * @desc_fifo_sz:	size of desc FIFO
+ * @priv:	callback cookie
+ * @notify:	callback
+ *		priv - callback cookie
+ *		evt - type of event
+ *		data - data relevant to event.  May not be valid. See event_type
+ *		enum for valid cases.
+ */
+struct ipa_sys_connect_params {
+	struct ipa_ep_cfg ipa_ep_cfg;
+	enum ipa_client_type client;
+	u32 desc_fifo_sz;
+	void *priv;
+	void (*notify)(void *priv,
+			enum ipa_dp_evt_type evt,
+			unsigned long data);
+};
+
+/**
+ * struct ipa_msg_meta_wrapper - message meta-data wrapper
+ * @meta:	the meta-data itself
+ * @link:	opaque to client
+ * @meta_wrapper_free:	function to free the metadata wrapper when IPA driver
+ *			is done with it
+ */
+struct ipa_msg_meta_wrapper {
+	struct ipa_msg_meta meta;
+	struct list_head link;
+	void (*meta_wrapper_free)(struct ipa_msg_meta_wrapper *buff);
+};
+
+/**
+ * struct ipa_tx_meta - meta-data for the TX packet
+ * @mbim_stream_id:	the stream ID used in NDP signature
+ * @mbim_stream_id_valid:	 is above field valid?
+ */
+struct ipa_tx_meta {
+	u8 mbim_stream_id;
+	bool mbim_stream_id_valid;
+};
+
+/**
+ * struct ipa_msg_wrapper - message wrapper
+ * @msg:	the message buffer itself, MUST exist after call returns, will
+ *		be freed by IPA driver when it is done with it
+ * @link:	opaque to client
+ * @msg_free:	function to free the message when IPA driver is done with it
+ * @msg_wrapper_free:	function to free the message wrapper when IPA driver is
+ *			done with it
+ */
+struct ipa_msg_wrapper {
+	void *msg;
+	struct list_head link;
+	void (*msg_free)(void *msg);
+	void (*msg_wrapper_free)(struct ipa_msg_wrapper *buff);
+};
+
+/**
+ * typedef ipa_pull_fn - callback function
+ * @buf - [in] the buffer to populate the message into
+ * @sz - [in] the size of the buffer
+ *
+ * callback function registered by kernel client with IPA driver for IPA driver
+ * to be able to pull messages from the kernel client asynchronously.
+ *
+ * Returns how many bytes were copied into the buffer, negative on failure.
+ */
+typedef int (*ipa_pull_fn)(void *buf, uint16_t sz);
+
+/*
+ * Connect / Disconnect
+ */
+int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps,
+		u32 *clnt_hdl);
+int ipa_disconnect(u32 clnt_hdl);
+
+/*
+ * Configuration
+ */
+int ipa_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
+
+int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg);
+
+int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg);
+
+int ipa_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg);
+
+int ipa_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg);
+
+int ipa_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg);
+
+/*
+ * Header removal / addition
+ */
+int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs);
+
+int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls);
+
+int ipa_commit_hdr(void);
+
+int ipa_reset_hdr(void);
+
+int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup);
+
+int ipa_put_hdr(u32 hdr_hdl);
+
+int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy);
+
+/*
+ * Routing
+ */
+int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
+
+int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
+
+int ipa_commit_rt(enum ipa_ip_type ip);
+
+int ipa_reset_rt(enum ipa_ip_type ip);
+
+int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup);
+
+int ipa_put_rt_tbl(u32 rt_tbl_hdl);
+
+/*
+ * Filtering
+ */
+int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules);
+
+int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls);
+
+int ipa_commit_flt(enum ipa_ip_type ip);
+
+int ipa_reset_flt(enum ipa_ip_type ip);
+
+/*
+ * NAT
+ */
+int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem);
+
+int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init);
+
+int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma);
+
+int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del);
+
+/*
+ * Aggregation
+ */
+int ipa_set_aggr_mode(enum ipa_aggr_mode mode);
+
+int ipa_set_qcncm_ndp_sig(char sig[3]);
+
+int ipa_set_single_ndp_per_mbim(bool enable);
+
+/*
+ * rmnet bridge
+ */
+int rmnet_bridge_init(void);
+
+int rmnet_bridge_disconnect(void);
+
+int rmnet_bridge_connect(u32 producer_hdl,
+			 u32 consumer_hdl,
+			 int wwan_logical_channel_id);
+
+/*
+ * Data path
+ */
+int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+		struct ipa_tx_meta *metadata);
+
+/*
+ * System pipes
+ */
+int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl);
+
+int ipa_teardown_sys_pipe(u32 clnt_hdl);
+
+#endif /* _IPA_H_ */
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 34e1d40..75cc086 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -76,4 +76,18 @@
 	  PNP PMIC. It configures the frequency of clkdiv outputs on the
 	  PMIC. These clocks are typically wired through alternate functions
 	  on gpio pins.
+
+config IPA
+	tristate "IPA support"
+	depends on SPS
+	help
+	  This driver supports the Internet Packet Accelerator (IPA) core.
+	  IPA is a programmable protocol processor HW block.
+	  It is designed to support generic HW processing of UL/DL IP packets
+	  for various use cases independent of radio technology.
+	  The driver support client connection and configuration
+	  for the IPA core.
+	  Kernel and user-space processes can call the IPA driver
+	  to configure IPA core.
+
 endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index 35efd91..0a755d3 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -3,6 +3,7 @@
 #
 obj-$(CONFIG_MSM_SSBI) += ssbi.o
 obj-$(CONFIG_USB_BAM) += usb_bam.o
+obj-$(CONFIG_IPA) += ipa/
 obj-$(CONFIG_SPS) += sps/
 obj-$(CONFIG_QPNP_PWM) += qpnp-pwm.o
 obj-$(CONFIG_QPNP_POWER_ON) += qpnp-power-on.o
diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile
new file mode 100644
index 0000000..ded5b50
--- /dev/null
+++ b/drivers/platform/msm/ipa/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_IPA) += ipat.o
+ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
+	ipa_utils.o ipa_nat.o rmnet_bridge.o a2_service.o ipa_bridge.o
diff --git a/drivers/platform/msm/ipa/a2_service.c b/drivers/platform/msm/ipa/a2_service.c
new file mode 100644
index 0000000..0ae2552
--- /dev/null
+++ b/drivers/platform/msm/ipa/a2_service.c
@@ -0,0 +1,276 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <mach/bam_dmux.h>
+#include <mach/ipa.h>
+#include <mach/sps.h>
+#include "ipa_i.h"
+
+static struct a2_service_cb_type {
+	void *tx_complete_cb;
+	void *rx_cb;
+	u32 producer_handle;
+	u32 consumer_handle;
+} a2_service_cb;
+
+static struct sps_mem_buffer data_mem_buf[2];
+static struct sps_mem_buffer desc_mem_buf[2];
+
+static int connect_pipe_ipa(enum a2_mux_pipe_direction pipe_dir,
+			u8 *usb_pipe_idx,
+			u32 *clnt_hdl,
+			struct sps_pipe *pipe);
+
+static int a2_ipa_connect_pipe(struct ipa_connect_params *in_params,
+		struct ipa_sps_params *out_params, u32 *clnt_hdl);
+
+/**
+ * a2_mux_initialize() - initialize A2 MUX module
+ *
+ * Return codes:
+ * 0: success
+ */
+int a2_mux_initialize(void)
+{
+	(void) msm_bam_dmux_ul_power_vote();
+
+	return 0;
+}
+
+/**
+ * a2_mux_close() - close A2 MUX module
+ *
+ * Return codes:
+ * 0: success
+ * -EINVAL: invalid parameters
+ */
+int a2_mux_close(void)
+{
+	int ret = 0;
+
+	(void) msm_bam_dmux_ul_power_unvote();
+
+	ret = ipa_disconnect(a2_service_cb.consumer_handle);
+	if (0 != ret) {
+		pr_err("%s: ipa_disconnect failure\n", __func__);
+		goto bail;
+	}
+
+	ret = ipa_disconnect(a2_service_cb.producer_handle);
+	if (0 != ret) {
+		pr_err("%s: ipa_disconnect failure\n", __func__);
+		goto bail;
+	}
+
+	ret = 0;
+
+bail:
+
+	return ret;
+}
+
+/**
+ * a2_mux_open_port() - open connection to A2
+ * @wwan_logical_channel_id:	 WWAN logical channel ID
+ * @rx_cb:	Rx callback
+ * @tx_complete_cb:	Tx completed callback
+ *
+ * Return codes:
+ * 0: success
+ * -EINVAL: invalid parameters
+ */
+int a2_mux_open_port(int wwan_logical_channel_id, void *rx_cb,
+		void *tx_complete_cb)
+{
+	int ret = 0;
+	u8 src_pipe = 0;
+	u8 dst_pipe = 0;
+	struct sps_pipe *a2_to_ipa_pipe = NULL;
+	struct sps_pipe *ipa_to_a2_pipe = NULL;
+
+	(void) wwan_logical_channel_id;
+
+	a2_service_cb.rx_cb = rx_cb;
+	a2_service_cb.tx_complete_cb = tx_complete_cb;
+
+	ret = connect_pipe_ipa(A2_TO_IPA,
+			&src_pipe,
+			&(a2_service_cb.consumer_handle),
+			a2_to_ipa_pipe);
+	if (ret) {
+		pr_err("%s: A2 to IPA pipe connection failure\n", __func__);
+		goto bail;
+	}
+
+	ret = connect_pipe_ipa(IPA_TO_A2,
+			&dst_pipe,
+			&(a2_service_cb.producer_handle),
+			ipa_to_a2_pipe);
+	if (ret) {
+		pr_err("%s: IPA to A2 pipe connection failure\n", __func__);
+		sps_disconnect(a2_to_ipa_pipe);
+		sps_free_endpoint(a2_to_ipa_pipe);
+		(void) ipa_disconnect(a2_service_cb.consumer_handle);
+		goto bail;
+	}
+
+	ret = 0;
+
+bail:
+
+	return ret;
+}
+
+static int connect_pipe_ipa(enum a2_mux_pipe_direction pipe_dir,
+			u8 *usb_pipe_idx,
+			u32 *clnt_hdl,
+			struct sps_pipe *pipe)
+{
+	int ret;
+	struct sps_connect connection = {0, };
+	u32 a2_handle = 0;
+	u32 a2_phy_addr = 0;
+	struct a2_mux_pipe_connection pipe_connection = { 0, };
+	struct ipa_connect_params ipa_in_params;
+	struct ipa_sps_params sps_out_params;
+
+	memset(&ipa_in_params, 0, sizeof(ipa_in_params));
+	memset(&sps_out_params, 0, sizeof(sps_out_params));
+
+	if (!usb_pipe_idx || !clnt_hdl) {
+		pr_err("connect_pipe_ipa :: null arguments\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	ret = ipa_get_a2_mux_pipe_info(pipe_dir, &pipe_connection);
+	if (ret) {
+		pr_err("ipa_get_a2_mux_pipe_info failed\n");
+		goto bail;
+	}
+
+	if (pipe_dir == A2_TO_IPA) {
+		a2_phy_addr = pipe_connection.src_phy_addr;
+		ipa_in_params.client = IPA_CLIENT_A2_TETHERED_PROD;
+		ipa_in_params.ipa_ep_cfg.mode.mode = IPA_DMA;
+		ipa_in_params.ipa_ep_cfg.mode.dst = IPA_CLIENT_USB_CONS;
+		pr_err("-*&- pipe_connection->src_pipe_index = %d\n",
+				pipe_connection.src_pipe_index);
+		ipa_in_params.client_ep_idx = pipe_connection.src_pipe_index;
+	} else {
+		a2_phy_addr = pipe_connection.dst_phy_addr;
+		ipa_in_params.client = IPA_CLIENT_A2_TETHERED_CONS;
+		ipa_in_params.client_ep_idx = pipe_connection.dst_pipe_index;
+	}
+
+	ret = sps_phy2h(a2_phy_addr, &a2_handle);
+	if (ret) {
+		pr_err("%s: sps_phy2h failed (A2 BAM) %d\n", __func__, ret);
+		goto bail;
+	}
+
+	ipa_in_params.client_bam_hdl = a2_handle;
+	ipa_in_params.desc_fifo_sz = pipe_connection.desc_fifo_size;
+	ipa_in_params.data_fifo_sz = pipe_connection.data_fifo_size;
+
+	if (pipe_connection.mem_type == IPA_SPS_PIPE_MEM) {
+		pr_debug("%s: A2 BAM using SPS pipe memory\n", __func__);
+		ret = sps_setup_bam2bam_fifo(&data_mem_buf[pipe_dir],
+				pipe_connection.data_fifo_base_offset,
+				pipe_connection.data_fifo_size, 1);
+		if (ret) {
+			pr_err("%s: data fifo setup failure %d\n",
+					__func__, ret);
+			goto bail;
+		}
+
+		ret = sps_setup_bam2bam_fifo(&desc_mem_buf[pipe_dir],
+				pipe_connection.desc_fifo_base_offset,
+				pipe_connection.desc_fifo_size, 1);
+		if (ret) {
+			pr_err("%s: desc. fifo setup failure %d\n",
+					__func__, ret);
+			goto bail;
+		}
+
+		ipa_in_params.data = data_mem_buf[pipe_dir];
+		ipa_in_params.desc = desc_mem_buf[pipe_dir];
+	}
+
+	ret = a2_ipa_connect_pipe(&ipa_in_params,
+			&sps_out_params,
+			clnt_hdl);
+	if (ret) {
+		pr_err("-**- USB-IPA info: ipa_connect failed\n");
+		pr_err("%s: usb_ipa_connect_pipe failed\n", __func__);
+		goto bail;
+	}
+
+	pipe = sps_alloc_endpoint();
+	if (pipe == NULL) {
+		pr_err("%s: sps_alloc_endpoint failed\n", __func__);
+		ret = -ENOMEM;
+		goto a2_ipa_connect_pipe_failed;
+	}
+
+	ret = sps_get_config(pipe, &connection);
+	if (ret) {
+		pr_err("%s: tx get config failed %d\n", __func__, ret);
+		goto get_config_failed;
+	}
+
+	if (pipe_dir == A2_TO_IPA) {
+		connection.mode = SPS_MODE_SRC;
+		*usb_pipe_idx = connection.src_pipe_index;
+		connection.source = a2_handle;
+		connection.destination = sps_out_params.ipa_bam_hdl;
+		connection.src_pipe_index = pipe_connection.src_pipe_index;
+		connection.dest_pipe_index = sps_out_params.ipa_ep_idx;
+	} else {
+		connection.mode = SPS_MODE_DEST;
+		*usb_pipe_idx = connection.dest_pipe_index;
+		connection.source = sps_out_params.ipa_bam_hdl;
+		connection.destination = a2_handle;
+		connection.src_pipe_index = sps_out_params.ipa_ep_idx;
+		connection.dest_pipe_index = pipe_connection.dst_pipe_index;
+	}
+
+	connection.event_thresh = 16;
+	connection.data = sps_out_params.data;
+	connection.desc = sps_out_params.desc;
+
+	ret = sps_connect(pipe, &connection);
+	if (ret < 0) {
+		pr_err("%s: tx connect error %d\n", __func__, ret);
+		goto error;
+	}
+
+	ret = 0;
+	goto bail;
+error:
+	sps_disconnect(pipe);
+get_config_failed:
+	sps_free_endpoint(pipe);
+a2_ipa_connect_pipe_failed:
+	(void) ipa_disconnect(*clnt_hdl);
+bail:
+	return ret;
+}
+
+static int a2_ipa_connect_pipe(struct ipa_connect_params *in_params,
+		struct ipa_sps_params *out_params, u32 *clnt_hdl)
+{
+	return ipa_connect(in_params, out_params, clnt_hdl);
+}
+
diff --git a/drivers/platform/msm/ipa/a2_service.h b/drivers/platform/msm/ipa/a2_service.h
new file mode 100644
index 0000000..80885da
--- /dev/null
+++ b/drivers/platform/msm/ipa/a2_service.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _A2_SERVICE_H_
+#define _A2_SERVICE_H_
+
+int a2_mux_initialize(void);
+
+int a2_mux_close(void);
+
+int a2_mux_open_port(int wwan_logical_channel_id, void *rx_cb,
+		void *tx_complete_cb);
+
+#endif /* _A2_SERVICE_H_ */
+
diff --git a/drivers/platform/msm/ipa/ipa.c b/drivers/platform/msm/ipa/ipa.c
new file mode 100644
index 0000000..8f68ef5
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa.c
@@ -0,0 +1,1790 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/fs.h>
+#include <linux/genalloc.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rbtree.h>
+#include <linux/uaccess.h>
+#include "ipa_i.h"
+
+#define IPA_SUMMING_THRESHOLD (0x10)
+#define IPA_PIPE_MEM_START_OFST (0x0)
+#define IPA_PIPE_MEM_SIZE (0x0)
+#define IPA_READ_MAX (16)
+#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
+			       x == IPA_MODE_MOBILE_AP_WAN || \
+			       x == IPA_MODE_MOBILE_AP_WLAN)
+#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
+#define IPA_V1_CLK_RATE (92.31 * 1000 * 1000UL)
+#define IPA_DMA_POOL_SIZE (512)
+#define IPA_DMA_POOL_ALIGNMENT (4)
+#define IPA_DMA_POOL_BOUNDARY (1024)
+#define WLAN_AMPDU_TX_EP (15)
+#define IPA_ROUTING_RULE_BYTE_SIZE (4)
+#define IPA_BAM_CNFG_BITS_VAL (0x7FFFE004)
+
+#define IPA_AGGR_MAX_STR_LENGTH (10)
+
+#define IPA_AGGR_STR_IN_BYTES(str) \
+	(strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
+
+struct ipa_plat_drv_res {
+	u32 ipa_mem_base;
+	u32 ipa_mem_size;
+	u32 bam_mem_base;
+	u32 bam_mem_size;
+	u32 ipa_irq;
+	u32 bam_irq;
+	u32 ipa_pipe_mem_start_ofst;
+	u32 ipa_pipe_mem_size;
+	struct a2_mux_pipe_connection a2_to_ipa_pipe;
+	struct a2_mux_pipe_connection ipa_to_a2_pipe;
+};
+
+static struct ipa_plat_drv_res ipa_res = {0, };
+static struct of_device_id ipa_plat_drv_match[] = {
+	{
+		.compatible = "qcom,ipa",
+	},
+
+	{
+	}
+};
+
+static struct clk *ipa_clk_src;
+static struct clk *ipa_clk;
+static struct clk *sys_noc_ipa_axi_clk;
+static struct clk *ipa_cnoc_clk;
+static struct device *ipa_dev;
+
+struct ipa_context *ipa_ctx;
+
+static bool polling_mode;
+module_param(polling_mode, bool, 0644);
+MODULE_PARM_DESC(polling_mode,
+		"1 - pure polling mode; 0 - interrupt+polling mode");
+static uint polling_delay_ms = 50;
+module_param(polling_delay_ms, uint, 0644);
+MODULE_PARM_DESC(polling_delay_ms, "set to desired delay between polls");
+static bool hdr_tbl_lcl = 1;
+module_param(hdr_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(hdr_tbl_lcl, "where hdr tbl resides 1-local; 0-system");
+static bool ip4_rt_tbl_lcl = 1;
+module_param(ip4_rt_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(ip4_rt_tbl_lcl,
+		"where ip4 rt tables reside 1-local; 0-system");
+static bool ip6_rt_tbl_lcl = 1;
+module_param(ip6_rt_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(ip6_rt_tbl_lcl,
+		"where ip6 rt tables reside 1-local; 0-system");
+static bool ip4_flt_tbl_lcl = 1;
+module_param(ip4_flt_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(ip4_flt_tbl_lcl,
+		"where ip4 flt tables reside 1-local; 0-system");
+static bool ip6_flt_tbl_lcl = 1;
+module_param(ip6_flt_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(ip6_flt_tbl_lcl,
+		"where ip6 flt tables reside 1-local; 0-system");
+
+static int ipa_load_pipe_connection(struct platform_device *pdev,
+				    enum a2_mux_pipe_direction pipe_dir,
+				    struct a2_mux_pipe_connection     *pdata);
+
+static int ipa_update_connections_info(struct device_node *node,
+			struct a2_mux_pipe_connection *pipe_connection);
+
+static void ipa_set_aggregation_params(void);
+
+static ssize_t ipa_read(struct file *filp, char __user *buf, size_t count,
+		loff_t *f_pos)
+{
+	u32 reg_val = 0xfeedface;
+	char str[IPA_READ_MAX];
+	int result;
+	static int read_cnt;
+
+	if (read_cnt) {
+		IPAERR("only supports one call to read\n");
+		return 0;
+	}
+
+	reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_COMP_HW_VERSION_OFST);
+	result = scnprintf(str, IPA_READ_MAX, "%x\n", reg_val);
+	if (copy_to_user(buf, str, result))
+		return -EFAULT;
+	read_cnt = 1;
+
+	return result;
+}
+
+static int ipa_open(struct inode *inode, struct file *filp)
+{
+	struct ipa_context *ctx = NULL;
+
+	IPADBG("ENTER\n");
+	ctx = container_of(inode->i_cdev, struct ipa_context, cdev);
+	filp->private_data = ctx;
+
+	return 0;
+}
+
+static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	int retval = 0;
+	u32 pyld_sz;
+	u8 header[128] = { 0 };
+	u8 *param = NULL;
+	struct ipa_ioc_nat_alloc_mem nat_mem;
+	struct ipa_ioc_v4_nat_init nat_init;
+	struct ipa_ioc_v4_nat_del nat_del;
+
+	IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
+
+	if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
+		return -ENOTTY;
+	if (_IOC_NR(cmd) >= IPA_IOCTL_MAX)
+		return -ENOTTY;
+
+	switch (cmd) {
+	case IPA_IOC_ALLOC_NAT_MEM:
+		if (copy_from_user((u8 *)&nat_mem, (u8 *)arg,
+					sizeof(struct ipa_ioc_nat_alloc_mem))) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (allocate_nat_device(&nat_mem)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, (u8 *)&nat_mem,
+					sizeof(struct ipa_ioc_nat_alloc_mem))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_V4_INIT_NAT:
+		if (copy_from_user((u8 *)&nat_init, (u8 *)arg,
+					sizeof(struct ipa_ioc_v4_nat_init))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_nat_init_cmd(&nat_init)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_NAT_DMA:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_nat_dma_cmd))) {
+			retval = -EFAULT;
+			break;
+		}
+
+		pyld_sz =
+		   sizeof(struct ipa_ioc_nat_dma_cmd) +
+		   ((struct ipa_ioc_nat_dma_cmd *)header)->entries *
+		   sizeof(struct ipa_ioc_nat_dma_one);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (ipa_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_V4_DEL_NAT:
+		if (copy_from_user((u8 *)&nat_del, (u8 *)arg,
+					sizeof(struct ipa_ioc_v4_nat_del))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_nat_del_cmd(&nat_del)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_HDR:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_add_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_hdr) +
+		   ((struct ipa_ioc_add_hdr *)header)->num_hdrs *
+		   sizeof(struct ipa_hdr_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_add_hdr((struct ipa_ioc_add_hdr *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_HDR:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_del_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_hdr) +
+		   ((struct ipa_ioc_del_hdr *)header)->num_hdls *
+		   sizeof(struct ipa_hdr_del);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_del_hdr((struct ipa_ioc_del_hdr *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_RT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_add_rt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_rt_rule) +
+		   ((struct ipa_ioc_add_rt_rule *)header)->num_rules *
+		   sizeof(struct ipa_rt_rule_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_RT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_del_rt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_rt_rule) +
+		   ((struct ipa_ioc_del_rt_rule *)header)->num_hdls *
+		   sizeof(struct ipa_rt_rule_del);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_FLT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_add_flt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_flt_rule) +
+		   ((struct ipa_ioc_add_flt_rule *)header)->num_rules *
+		   sizeof(struct ipa_flt_rule_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_FLT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_del_flt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_flt_rule) +
+		   ((struct ipa_ioc_del_flt_rule *)header)->num_hdls *
+		   sizeof(struct ipa_flt_rule_del);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_COMMIT_HDR:
+		retval = ipa_commit_hdr();
+		break;
+	case IPA_IOC_RESET_HDR:
+		retval = ipa_reset_hdr();
+		break;
+	case IPA_IOC_COMMIT_RT:
+		retval = ipa_commit_rt(arg);
+		break;
+	case IPA_IOC_RESET_RT:
+		retval = ipa_reset_rt(arg);
+		break;
+	case IPA_IOC_COMMIT_FLT:
+		retval = ipa_commit_flt(arg);
+		break;
+	case IPA_IOC_RESET_FLT:
+		retval = ipa_reset_flt(arg);
+		break;
+	case IPA_IOC_DUMP:
+		ipa_dump();
+		break;
+	case IPA_IOC_GET_RT_TBL:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_get_rt_tbl))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+					sizeof(struct ipa_ioc_get_rt_tbl))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_PUT_RT_TBL:
+		retval = ipa_put_rt_tbl(arg);
+		break;
+	case IPA_IOC_GET_HDR:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_get_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_get_hdr((struct ipa_ioc_get_hdr *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+					sizeof(struct ipa_ioc_get_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_PUT_HDR:
+		retval = ipa_put_hdr(arg);
+		break;
+	case IPA_IOC_SET_FLT:
+		retval = ipa_cfg_filter(arg);
+		break;
+	case IPA_IOC_COPY_HDR:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_copy_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_copy_hdr((struct ipa_ioc_copy_hdr *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+					sizeof(struct ipa_ioc_copy_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	default:        /* redundant, as cmd was checked against MAXNR */
+		return -ENOTTY;
+	}
+	kfree(param);
+
+	return retval;
+}
+
+/**
+* ipa_setup_dflt_rt_tables() - Setup default routing tables
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate memory
+* -EPERM: failed to add the tables
+*/
+int ipa_setup_dflt_rt_tables(void)
+{
+	struct ipa_ioc_add_rt_rule *rt_rule;
+	struct ipa_rt_rule_add *rt_rule_entry;
+
+	rt_rule =
+	   kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
+			   sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
+	if (!rt_rule) {
+		IPAERR("fail to alloc mem\n");
+		return -ENOMEM;
+	}
+	/* setup a default v4 route to point to A5 */
+	rt_rule->num_rules = 1;
+	rt_rule->commit = 1;
+	rt_rule->ip = IPA_IP_v4;
+	strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
+			IPA_RESOURCE_NAME_MAX);
+
+	rt_rule_entry = &rt_rule->rules[0];
+	rt_rule_entry->at_rear = 1;
+	rt_rule_entry->rule.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+	rt_rule_entry->rule.hdr_hdl = ipa_ctx->excp_hdr_hdl;
+
+	if (ipa_add_rt_rule(rt_rule)) {
+		IPAERR("fail to add dflt v4 rule\n");
+		kfree(rt_rule);
+		return -EPERM;
+	}
+	IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+	ipa_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+	/* setup a default v6 route to point to A5 */
+	rt_rule->ip = IPA_IP_v6;
+	if (ipa_add_rt_rule(rt_rule)) {
+		IPAERR("fail to add dflt v6 rule\n");
+		kfree(rt_rule);
+		return -EPERM;
+	}
+	IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+	ipa_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+	/*
+	 * because these tables are the very first to be added, they will both
+	 * have the same index (0) which is essential for programming the
+	 * "route" end-point config
+	 */
+
+	kfree(rt_rule);
+
+	return 0;
+}
+
+static int ipa_setup_exception_path(void)
+{
+	struct ipa_ioc_add_hdr *hdr;
+	struct ipa_hdr_add *hdr_entry;
+	struct ipa_route route = { 0 };
+	int ret;
+
+	/* install the basic exception header */
+	hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
+		      sizeof(struct ipa_hdr_add), GFP_KERNEL);
+	if (!hdr) {
+		IPAERR("fail to alloc exception hdr\n");
+		return -ENOMEM;
+	}
+	hdr->num_hdrs = 1;
+	hdr->commit = 1;
+	hdr_entry = &hdr->hdr[0];
+	strlcpy(hdr_entry->name, IPA_DFLT_HDR_NAME, IPA_RESOURCE_NAME_MAX);
+
+	/*
+	 * only single stream for MBIM supported and no exception packets
+	 * expected so set default header to zero
+	 */
+	hdr_entry->hdr_len = 1;
+	hdr_entry->hdr[0] = 0;
+
+	/*
+	 * SW does not know anything about default exception header so
+	 * we don't set it. IPA HW will use it as a template
+	 */
+	if (ipa_add_hdr(hdr)) {
+		IPAERR("fail to add exception hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	if (hdr_entry->status) {
+		IPAERR("fail to add exception hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	ipa_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
+
+	/* exception packets goto LAN-WAN pipe from IPA to A5 */
+	route.route_def_pipe = IPA_A5_LAN_WAN_IN;
+	route.route_def_hdr_table = !ipa_ctx->hdr_tbl_lcl;
+
+	if (ipa_cfg_route(&route)) {
+		IPAERR("fail to add exception hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	ret = 0;
+bail:
+	kfree(hdr);
+	return ret;
+}
+
+static void ipa_handle_tx_poll_for_pipe(struct ipa_sys_context *sys)
+{
+	struct ipa_tx_pkt_wrapper *tx_pkt, *t;
+	struct sps_iovec iov;
+	unsigned long irq_flags;
+	int ret;
+
+	while (1) {
+		iov.addr = 0;
+		ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+		if (ret) {
+			pr_err("%s: sps_get_iovec failed %d\n", __func__, ret);
+			break;
+		}
+		if (!iov.addr)
+			break;
+		spin_lock_irqsave(&sys->spinlock, irq_flags);
+		tx_pkt = list_first_entry(&sys->head_desc_list,
+					  struct ipa_tx_pkt_wrapper, link);
+		spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+		switch (tx_pkt->cnt) {
+		case 1:
+			ipa_write_done(&tx_pkt->work);
+			break;
+		case 0xFFFF:
+			/* reached end of set */
+			spin_lock_irqsave(&sys->spinlock, irq_flags);
+			list_for_each_entry_safe(tx_pkt, t,
+						 &sys->wait_desc_list, link) {
+				list_del(&tx_pkt->link);
+				list_add(&tx_pkt->link, &sys->head_desc_list);
+			}
+			tx_pkt =
+			   list_first_entry(&sys->head_desc_list,
+					    struct ipa_tx_pkt_wrapper, link);
+			spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+			ipa_write_done(&tx_pkt->work);
+			break;
+		default:
+			/* keep looping till reach the end of the set */
+			spin_lock_irqsave(&sys->spinlock,
+					  irq_flags);
+			list_del(&tx_pkt->link);
+			list_add_tail(&tx_pkt->link,
+				      &sys->wait_desc_list);
+			spin_unlock_irqrestore(&sys->spinlock,
+					       irq_flags);
+			break;
+		}
+	}
+}
+
+static void ipa_poll_function(struct work_struct *work)
+{
+	int ret;
+	int tx_pipes[] = { IPA_A5_CMD, IPA_A5_LAN_WAN_OUT,
+		IPA_A5_WLAN_AMPDU_OUT };
+	int i;
+	int num_tx_pipes;
+
+	/* check all the system pipes for tx completions and rx available */
+	if (ipa_ctx->sys[IPA_A5_LAN_WAN_IN].ep->valid)
+		ipa_handle_rx_core();
+
+	num_tx_pipes = sizeof(tx_pipes) / sizeof(tx_pipes[0]);
+
+	if (!IPA_MOBILE_AP_MODE(ipa_ctx->mode))
+		num_tx_pipes--;
+
+	for (i = 0; i < num_tx_pipes; i++)
+		if (ipa_ctx->sys[tx_pipes[i]].ep->valid)
+			ipa_handle_tx_poll_for_pipe(&ipa_ctx->sys[tx_pipes[i]]);
+
+	/* re-post the poll work */
+	INIT_DELAYED_WORK(&ipa_ctx->poll_work, ipa_poll_function);
+	ret = schedule_delayed_work_on(smp_processor_id(), &ipa_ctx->poll_work,
+			msecs_to_jiffies(polling_delay_ms));
+
+	return;
+}
+
+static int ipa_setup_a5_pipes(void)
+{
+	struct ipa_sys_connect_params sys_in;
+	int result = 0;
+
+	/* CMD OUT (A5->IPA) */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_A5_CMD_PROD;
+	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+	sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+	if (ipa_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_cmd)) {
+		IPAERR(":setup sys pipe failed.\n");
+		result = -EPERM;
+		goto fail;
+	}
+
+	if (ipa_setup_exception_path()) {
+		IPAERR(":fail to setup excp path\n");
+		result = -EPERM;
+		goto fail_cmd;
+	}
+
+	/* LAN-WAN IN (IPA->A5) */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_A5_LAN_WAN_CONS;
+	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	sys_in.ipa_ep_cfg.hdr.hdr_a5_mux = 1;
+	sys_in.ipa_ep_cfg.hdr.hdr_len = 8;  /* size of A5 exception hdr */
+	if (ipa_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_in)) {
+		IPAERR(":setup sys pipe failed.\n");
+		result = -EPERM;
+		goto fail_cmd;
+	}
+	/* LAN-WAN OUT (A5->IPA) */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_A5_LAN_WAN_PROD;
+	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+	sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+	if (ipa_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_out)) {
+		IPAERR(":setup sys pipe failed.\n");
+		result = -EPERM;
+		goto fail_data_out;
+	}
+	if (ipa_ctx->polling_mode) {
+		INIT_DELAYED_WORK(&ipa_ctx->poll_work, ipa_poll_function);
+		result =
+		   schedule_delayed_work_on(smp_processor_id(),
+					&ipa_ctx->poll_work,
+					msecs_to_jiffies(polling_delay_ms));
+		if (!result) {
+			IPAERR(":schedule delayed work failed.\n");
+			goto fail_schedule_delayed_work;
+		}
+	}
+
+	return 0;
+
+fail_schedule_delayed_work:
+	ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_out);
+fail_data_out:
+	ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in);
+fail_cmd:
+	ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd);
+fail:
+	return result;
+}
+
+static void ipa_teardown_a5_pipes(void)
+{
+	cancel_delayed_work(&ipa_ctx->poll_work);
+	ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_out);
+	ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in);
+	ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd);
+}
+
+static int ipa_load_pipe_connection(struct platform_device *pdev,
+				    enum a2_mux_pipe_direction  pipe_dir,
+				    struct a2_mux_pipe_connection *pdata)
+{
+	struct device_node *node = pdev->dev.of_node;
+	int rc = 0;
+
+	if (!pdata || !pdev)
+		goto err;
+
+	/* retrieve device tree parameters */
+	for_each_child_of_node(pdev->dev.of_node, node)
+	{
+		const char *str;
+
+		rc = of_property_read_string(node, "label", &str);
+		if (rc) {
+			IPAERR("Cannot read string\n");
+			goto err;
+		}
+
+		/* Check if connection type is supported */
+		if (strncmp(str, "a2-to-ipa", 10)
+			&& strncmp(str, "ipa-to-a2", 10))
+			goto err;
+
+		if (strnstr(str, "a2-to-ipa", strnlen("a2-to-ipa", 10))
+				&& IPA_TO_A2 == pipe_dir)
+			continue; /* skip to the next pipe */
+		else if (strnstr(str, "ipa-to-a2", strnlen("ipa-to-a2", 10))
+				&& A2_TO_IPA == pipe_dir)
+			continue; /* skip to the next pipe */
+
+
+		rc = ipa_update_connections_info(node, pdata);
+		if (rc)
+			goto err;
+	}
+
+	return 0;
+err:
+	IPAERR("%s: failed\n", __func__);
+
+	return rc;
+}
+
+static int ipa_update_connections_info(struct device_node *node,
+		struct a2_mux_pipe_connection     *pipe_connection)
+{
+	u32      rc;
+	char     *key;
+	uint32_t val;
+	enum ipa_pipe_mem_type mem_type;
+
+	if (!pipe_connection || !node)
+		goto err;
+
+	key = "qcom,src-bam-physical-address";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->src_phy_addr = val;
+
+	key = "qcom,ipa-bam-mem-type";
+	rc = of_property_read_u32(node, key, &mem_type);
+	if (rc)
+		goto err;
+	pipe_connection->mem_type = mem_type;
+
+	key = "qcom,src-bam-pipe-index";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->src_pipe_index = val;
+
+	key = "qcom,dst-bam-physical-address";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->dst_phy_addr = val;
+
+	key = "qcom,dst-bam-pipe-index";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->dst_pipe_index = val;
+
+	key = "qcom,data-fifo-offset";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->data_fifo_base_offset = val;
+
+	key = "qcom,data-fifo-size";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->data_fifo_size = val;
+
+	key = "qcom,descriptor-fifo-offset";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->desc_fifo_base_offset = val;
+
+	key = "qcom,descriptor-fifo-size";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+
+	pipe_connection->desc_fifo_size = val;
+
+	return 0;
+err:
+	IPAERR("%s: Error in name %s key %s\n", __func__, node->full_name, key);
+
+	return rc;
+}
+
+/**
+* ipa_get_a2_mux_pipe_info() - Exposes A2 parameters fetched from DTS
+*
+* @pipe_dir: pipe direction
+* @pipe_connect: connect structure containing the parameters fetched from DTS
+*
+* Return codes:
+* 0: success
+* -EFAULT: invalid parameters
+*/
+int ipa_get_a2_mux_pipe_info(enum a2_mux_pipe_direction  pipe_dir,
+			     struct a2_mux_pipe_connection *pipe_connect)
+{
+	if (!pipe_connect) {
+		IPAERR("ipa_get_a2_mux_pipe_info switch null args\n");
+		return -EFAULT;
+	}
+
+	switch (pipe_dir) {
+	case A2_TO_IPA:
+		*pipe_connect = ipa_res.a2_to_ipa_pipe;
+		break;
+	case IPA_TO_A2:
+		*pipe_connect = ipa_res.ipa_to_a2_pipe;
+		break;
+	default:
+		IPAERR("ipa_get_a2_mux_pipe_info switch in default\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static void ipa_set_aggregation_params(void)
+{
+	struct ipa_ep_cfg_aggr agg_params;
+	u32 producer_hdl = 0;
+	u32 consumer_hdl = 0;
+
+	rmnet_bridge_get_client_handles(&producer_hdl, &consumer_hdl);
+
+	agg_params.aggr = ipa_ctx->aggregation_type;
+	agg_params.aggr_byte_limit = ipa_ctx->aggregation_byte_limit;
+	agg_params.aggr_time_limit = ipa_ctx->aggregation_time_limit;
+
+	/* configure aggregation on producer */
+	agg_params.aggr_en = IPA_ENABLE_AGGR;
+	ipa_cfg_ep_aggr(producer_hdl, &agg_params);
+
+	/* configure deaggregation on consumer */
+	agg_params.aggr_en = IPA_ENABLE_DEAGGR;
+	ipa_cfg_ep_aggr(consumer_hdl, &agg_params);
+
+}
+
+/*
+ * The following device attributes are for configuring the aggregation
+ * attributes when the driver is already running.
+ * The attributes are for configuring the aggregation type
+ * (MBIM_16/MBIM_32/TLP), the aggregation byte limit and the aggregation
+ * time limit.
+ */
+static ssize_t ipa_show_aggregation_type(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	ssize_t ret_val;
+	char str[IPA_AGGR_MAX_STR_LENGTH];
+
+	if (!buf) {
+		IPAERR("buffer for ipa_show_aggregation_type is NULL\n");
+		return -EINVAL;
+	}
+
+	memset(str, 0, sizeof(str));
+
+	switch (ipa_ctx->aggregation_type) {
+	case IPA_MBIM_16:
+		strlcpy(str, "MBIM_16", IPA_AGGR_STR_IN_BYTES("MBIM_16"));
+		break;
+	case IPA_MBIM_32:
+		strlcpy(str, "MBIM_32", IPA_AGGR_STR_IN_BYTES("MBIM_32"));
+		break;
+	case IPA_TLP:
+		strlcpy(str, "TLP", IPA_AGGR_STR_IN_BYTES("TLP"));
+		break;
+	default:
+		strlcpy(str, "NONE", IPA_AGGR_STR_IN_BYTES("NONE"));
+		break;
+	}
+
+	ret_val = scnprintf(buf, PAGE_SIZE, "%s\n", str);
+
+	return ret_val;
+}
+
+static ssize_t ipa_store_aggregation_type(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	char str[IPA_AGGR_MAX_STR_LENGTH], *pstr;
+
+	if (!buf) {
+		IPAERR("buffer for ipa_store_aggregation_type is NULL\n");
+		return -EINVAL;
+	}
+
+	strlcpy(str, buf, sizeof(str));
+	pstr = strim(str);
+
+	if (!strncmp(pstr, "MBIM_16", IPA_AGGR_STR_IN_BYTES("MBIM_16")))
+		ipa_ctx->aggregation_type = IPA_MBIM_16;
+	else if (!strncmp(pstr, "MBIM_32", IPA_AGGR_STR_IN_BYTES("MBIM_32")))
+		ipa_ctx->aggregation_type = IPA_MBIM_32;
+	else if (!strncmp(pstr, "TLP", IPA_AGGR_STR_IN_BYTES("TLP")))
+		ipa_ctx->aggregation_type = IPA_TLP;
+	else {
+		IPAERR("ipa_store_aggregation_type wrong input\n");
+		return -EINVAL;
+	}
+
+	ipa_set_aggregation_params();
+
+	return count;
+}
+
+static DEVICE_ATTR(aggregation_type, S_IWUSR | S_IRUSR,
+		ipa_show_aggregation_type,
+		ipa_store_aggregation_type);
+
+static ssize_t ipa_show_aggregation_byte_limit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	ssize_t ret_val;
+
+	if (!buf) {
+		IPAERR("buffer for ipa_show_aggregation_byte_limit is NULL\n");
+		return -EINVAL;
+	}
+
+	ret_val = scnprintf(buf, PAGE_SIZE, "%u\n",
+			    ipa_ctx->aggregation_byte_limit);
+
+	return ret_val;
+}
+
+static ssize_t ipa_store_aggregation_byte_limit(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	char str[IPA_AGGR_MAX_STR_LENGTH];
+	char *pstr;
+	u32 ret = 0;
+
+	if (!buf) {
+		IPAERR("buffer for ipa_store_aggregation_byte_limit is NULL\n");
+		return -EINVAL;
+	}
+
+	strlcpy(str, buf, sizeof(str));
+	pstr = strim(str);
+
+	if (kstrtouint(pstr, IPA_AGGR_MAX_STR_LENGTH, &ret)) {
+		IPAERR("ipa_store_aggregation_byte_limit wrong input\n");
+		return -EINVAL;
+	}
+
+	ipa_ctx->aggregation_byte_limit = ret;
+
+	ipa_set_aggregation_params();
+
+	return count;
+}
+
+static DEVICE_ATTR(aggregation_byte_limit, S_IWUSR | S_IRUSR,
+		ipa_show_aggregation_byte_limit,
+		ipa_store_aggregation_byte_limit);
+
+static ssize_t ipa_show_aggregation_time_limit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	ssize_t ret_val;
+
+	if (!buf) {
+		IPAERR("buffer for ipa_show_aggregation_time_limit is NULL\n");
+		return -EINVAL;
+	}
+
+	ret_val = scnprintf(buf,
+			    PAGE_SIZE,
+			    "%u\n",
+			    ipa_ctx->aggregation_time_limit);
+
+	return ret_val;
+}
+
+static ssize_t ipa_store_aggregation_time_limit(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	char str[IPA_AGGR_MAX_STR_LENGTH], *pstr;
+	u32 ret = 0;
+
+	if (!buf) {
+		IPAERR("buffer for ipa_store_aggregation_time_limit is NULL\n");
+		return -EINVAL;
+	}
+
+	strlcpy(str, buf, sizeof(str));
+	pstr = strim(str);
+
+	if (kstrtouint(pstr, IPA_AGGR_MAX_STR_LENGTH, &ret)) {
+		IPAERR("ipa_store_aggregation_time_limit wrong input\n");
+		return -EINVAL;
+	}
+
+	ipa_ctx->aggregation_time_limit = ret;
+
+	ipa_set_aggregation_params();
+
+	return count;
+}
+
+static DEVICE_ATTR(aggregation_time_limit, S_IWUSR | S_IRUSR,
+		ipa_show_aggregation_time_limit,
+		ipa_store_aggregation_time_limit);
+
+static const struct file_operations ipa_drv_fops = {
+	.owner = THIS_MODULE,
+	.open = ipa_open,
+	.read = ipa_read,
+	.unlocked_ioctl = ipa_ioctl,
+};
+
+static int ipa_get_clks(struct device *dev)
+{
+	ipa_cnoc_clk = clk_get(dev, "iface_clk");
+	if (IS_ERR(ipa_cnoc_clk)) {
+		ipa_cnoc_clk = NULL;
+		IPAERR("fail to get cnoc clk\n");
+		return -ENODEV;
+	}
+
+	ipa_clk_src = clk_get(dev, "core_src_clk");
+	if (IS_ERR(ipa_clk_src)) {
+		ipa_clk_src = NULL;
+		IPAERR("fail to get ipa clk src\n");
+		return -ENODEV;
+	}
+
+	ipa_clk = clk_get(dev, "core_clk");
+	if (IS_ERR(ipa_clk)) {
+		ipa_clk = NULL;
+		IPAERR("fail to get ipa clk\n");
+		return -ENODEV;
+	}
+
+	sys_noc_ipa_axi_clk = clk_get(dev, "bus_clk");
+	if (IS_ERR(sys_noc_ipa_axi_clk)) {
+		sys_noc_ipa_axi_clk = NULL;
+		IPAERR("fail to get sys_noc_ipa_axi clk\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/**
+* ipa_enable_clks() - Turn on IPA clocks
+*
+* Return codes:
+* None
+*/
+void ipa_enable_clks(void)
+{
+	if (ipa_cnoc_clk) {
+		clk_prepare(ipa_cnoc_clk);
+		clk_enable(ipa_cnoc_clk);
+		clk_set_rate(ipa_cnoc_clk, IPA_CNOC_CLK_RATE);
+	} else {
+		WARN_ON(1);
+	}
+
+	if (ipa_clk_src)
+		clk_set_rate(ipa_clk_src, IPA_V1_CLK_RATE);
+	else
+		WARN_ON(1);
+
+	if (ipa_clk)
+		clk_prepare(ipa_clk);
+	else
+		WARN_ON(1);
+
+	if (sys_noc_ipa_axi_clk)
+		clk_prepare(sys_noc_ipa_axi_clk);
+	else
+		WARN_ON(1);
+
+	if (ipa_clk)
+		clk_enable(ipa_clk);
+	else
+		WARN_ON(1);
+
+	if (sys_noc_ipa_axi_clk)
+		clk_enable(sys_noc_ipa_axi_clk);
+	else
+		WARN_ON(1);
+}
+
+/**
+* ipa_disable_clks() - Turn off IPA clocks
+*
+* Return codes:
+* None
+*/
+void ipa_disable_clks(void)
+{
+	if (sys_noc_ipa_axi_clk)
+		clk_disable_unprepare(sys_noc_ipa_axi_clk);
+	else
+		WARN_ON(1);
+
+	if (ipa_clk)
+		clk_disable_unprepare(ipa_clk);
+	else
+		WARN_ON(1);
+
+	if (ipa_cnoc_clk)
+		clk_disable_unprepare(ipa_cnoc_clk);
+	else
+		WARN_ON(1);
+}
+
+static int ipa_setup_bam_cfg(const struct ipa_plat_drv_res *res)
+{
+	void *bam_cnfg_bits;
+
+	bam_cnfg_bits = ioremap(res->ipa_mem_base + IPA_BAM_REG_BASE_OFST,
+				IPA_BAM_REMAP_SIZE);
+	if (!bam_cnfg_bits)
+		return -ENOMEM;
+	ipa_write_reg(bam_cnfg_bits, IPA_BAM_CNFG_BITS_OFST,
+		      IPA_BAM_CNFG_BITS_VAL);
+	iounmap(bam_cnfg_bits);
+
+	return 0;
+}
+/**
+* ipa_init() - Initialize the IPA Driver
+*@resource_p:	contain platform specific values from DST file
+*
+* Function initialization process:
+* - Allocate memory for the driver context data struct
+* - Initializing the ipa_ctx with:
+*    1)parsed values from the dts file
+*    2)parameters passed to the module initialization
+*    3)read HW values(such as core memory size)
+* - Map IPA core registers to CPU memory
+* - Restart IPA core(HW reset)
+* - Register IPA BAM to SPS driver and get a BAM handler
+* - Set configuration for IPA BAM via BAM_CNFG_BITS
+* - Initialize the look-aside caches(kmem_cache/slab) for filter,
+*   routing and IPA-tree
+* - Create memory pool with 4 objects for DMA operations(each object
+*   is 512Bytes long), this object will be use for tx(A5->IPA)
+* - Initialize lists head(routing,filter,hdr,system pipes)
+* - Initialize mutexes (for ipa_ctx and NAT memory mutexes)
+* - Initialize spinlocks (for list related to A5<->IPA pipes)
+* - Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
+* - Initialize Red-Black-Tree(s) for handles of header,routing rule,
+*   routing table ,filtering rule
+* - Setup all A5<->IPA pipes by calling to ipa_setup_a5_pipes
+* - Preparing the descriptors for System pipes
+* - Initialize the filter block by committing IPV4 and IPV6 default rules
+* - Create empty routing table in system memory(no committing)
+* - Initialize pipes memory pool with ipa_pipe_mem_init for supported platforms
+* - Create a char-device for IPA
+*/
+static int ipa_init(const struct ipa_plat_drv_res *resource_p)
+{
+	int result = 0;
+	int i;
+	struct sps_bam_props bam_props = { 0 };
+	struct ipa_flt_tbl *flt_tbl;
+	struct ipa_rt_tbl_set *rset;
+
+	IPADBG("IPA init\n");
+
+	ipa_ctx = kzalloc(sizeof(*ipa_ctx), GFP_KERNEL);
+	if (!ipa_ctx) {
+		IPAERR(":kzalloc err.\n");
+		result = -ENOMEM;
+		goto fail_mem;
+	}
+
+	IPADBG("polling_mode=%u delay_ms=%u\n", polling_mode, polling_delay_ms);
+	ipa_ctx->polling_mode = polling_mode;
+	IPADBG("hdr_lcl=%u ip4_rt=%u ip6_rt=%u ip4_flt=%u ip6_flt=%u\n",
+	       hdr_tbl_lcl, ip4_rt_tbl_lcl, ip6_rt_tbl_lcl, ip4_flt_tbl_lcl,
+	       ip6_flt_tbl_lcl);
+	ipa_ctx->hdr_tbl_lcl = hdr_tbl_lcl;
+	ipa_ctx->ip4_rt_tbl_lcl = ip4_rt_tbl_lcl;
+	ipa_ctx->ip6_rt_tbl_lcl = ip6_rt_tbl_lcl;
+	ipa_ctx->ip4_flt_tbl_lcl = ip4_flt_tbl_lcl;
+	ipa_ctx->ip6_flt_tbl_lcl = ip6_flt_tbl_lcl;
+
+	ipa_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
+
+	/* setup IPA register access */
+	ipa_ctx->mmio = ioremap(resource_p->ipa_mem_base + IPA_REG_BASE_OFST,
+			resource_p->ipa_mem_size);
+	if (!ipa_ctx->mmio) {
+		IPAERR(":ipa-base ioremap err.\n");
+		result = -EFAULT;
+		goto fail_remap;
+	}
+	/* do POR programming to setup HW */
+	result = ipa_init_hw();
+	if (result) {
+		IPAERR(":error initializing driver.\n");
+		result = -ENODEV;
+		goto fail_init_hw;
+	}
+	/* read how much SRAM is available for SW use */
+	ipa_ctx->smem_sz = ipa_read_reg(ipa_ctx->mmio,
+			IPA_SHARED_MEM_SIZE_OFST);
+
+	if (IPA_RAM_END_OFST > ipa_ctx->smem_sz) {
+		IPAERR("SW expect more core memory, needed %d, avail %d\n",
+				IPA_RAM_END_OFST, ipa_ctx->smem_sz);
+		result = -ENOMEM;
+		goto fail_init_hw;
+	}
+	/* register IPA with SPS driver */
+	bam_props.phys_addr = resource_p->bam_mem_base;
+	bam_props.virt_addr = ioremap(resource_p->bam_mem_base,
+			resource_p->bam_mem_size);
+	if (!bam_props.virt_addr) {
+		IPAERR(":bam-base ioremap err.\n");
+		result = -EFAULT;
+		goto fail_bam_remap;
+	}
+	bam_props.virt_size = resource_p->bam_mem_size;
+	bam_props.irq = resource_p->bam_irq;
+	bam_props.num_pipes = IPA_NUM_PIPES;
+	bam_props.summing_threshold = IPA_SUMMING_THRESHOLD;
+	bam_props.event_threshold = IPA_EVENT_THRESHOLD;
+
+	result = sps_register_bam_device(&bam_props, &ipa_ctx->bam_handle);
+	if (result) {
+		IPAERR(":bam register err.\n");
+		result = -ENODEV;
+		goto fail_bam_register;
+	}
+
+	if (ipa_setup_bam_cfg(resource_p)) {
+		IPAERR(":bam cfg err.\n");
+		result = -ENODEV;
+		goto fail_flt_rule_cache;
+	}
+
+	/* set up the default op mode */
+	ipa_ctx->mode = IPA_MODE_USB_DONGLE;
+
+	/* init the lookaside cache */
+	ipa_ctx->flt_rule_cache = kmem_cache_create("IPA FLT",
+			sizeof(struct ipa_flt_entry), 0, 0, NULL);
+	if (!ipa_ctx->flt_rule_cache) {
+		IPAERR(":ipa flt cache create failed\n");
+		result = -ENOMEM;
+		goto fail_flt_rule_cache;
+	}
+	ipa_ctx->rt_rule_cache = kmem_cache_create("IPA RT",
+			sizeof(struct ipa_rt_entry), 0, 0, NULL);
+	if (!ipa_ctx->rt_rule_cache) {
+		IPAERR(":ipa rt cache create failed\n");
+		result = -ENOMEM;
+		goto fail_rt_rule_cache;
+	}
+	ipa_ctx->hdr_cache = kmem_cache_create("IPA HDR",
+			sizeof(struct ipa_hdr_entry), 0, 0, NULL);
+	if (!ipa_ctx->hdr_cache) {
+		IPAERR(":ipa hdr cache create failed\n");
+		result = -ENOMEM;
+		goto fail_hdr_cache;
+	}
+	ipa_ctx->hdr_offset_cache =
+	   kmem_cache_create("IPA HDR OFF", sizeof(struct ipa_hdr_offset_entry),
+			   0, 0, NULL);
+	if (!ipa_ctx->hdr_offset_cache) {
+		IPAERR(":ipa hdr off cache create failed\n");
+		result = -ENOMEM;
+		goto fail_hdr_offset_cache;
+	}
+	ipa_ctx->rt_tbl_cache = kmem_cache_create("IPA RT TBL",
+			sizeof(struct ipa_rt_tbl), 0, 0, NULL);
+	if (!ipa_ctx->rt_tbl_cache) {
+		IPAERR(":ipa rt tbl cache create failed\n");
+		result = -ENOMEM;
+		goto fail_rt_tbl_cache;
+	}
+	ipa_ctx->tx_pkt_wrapper_cache =
+	   kmem_cache_create("IPA TX PKT WRAPPER",
+			   sizeof(struct ipa_tx_pkt_wrapper), 0, 0, NULL);
+	if (!ipa_ctx->tx_pkt_wrapper_cache) {
+		IPAERR(":ipa tx pkt wrapper cache create failed\n");
+		result = -ENOMEM;
+		goto fail_tx_pkt_wrapper_cache;
+	}
+	ipa_ctx->rx_pkt_wrapper_cache =
+	   kmem_cache_create("IPA RX PKT WRAPPER",
+			   sizeof(struct ipa_rx_pkt_wrapper), 0, 0, NULL);
+	if (!ipa_ctx->rx_pkt_wrapper_cache) {
+		IPAERR(":ipa rx pkt wrapper cache create failed\n");
+		result = -ENOMEM;
+		goto fail_rx_pkt_wrapper_cache;
+	}
+	ipa_ctx->tree_node_cache =
+	   kmem_cache_create("IPA TREE", sizeof(struct ipa_tree_node), 0, 0,
+			   NULL);
+	if (!ipa_ctx->tree_node_cache) {
+		IPAERR(":ipa tree node cache create failed\n");
+		result = -ENOMEM;
+		goto fail_tree_node_cache;
+	}
+
+	/*
+	 * setup DMA pool 4 byte aligned, don't cross 1k boundaries, nominal
+	 * size 512 bytes
+	 */
+	ipa_ctx->one_kb_no_straddle_pool = dma_pool_create("ipa_1k", NULL,
+			IPA_DMA_POOL_SIZE, IPA_DMA_POOL_ALIGNMENT,
+			IPA_DMA_POOL_BOUNDARY);
+	if (!ipa_ctx->one_kb_no_straddle_pool) {
+		IPAERR("cannot setup 1kb alloc DMA pool.\n");
+		result = -ENOMEM;
+		goto fail_dma_pool;
+	}
+
+	ipa_ctx->glob_flt_tbl[IPA_IP_v4].in_sys = !ipa_ctx->ip4_flt_tbl_lcl;
+	ipa_ctx->glob_flt_tbl[IPA_IP_v6].in_sys = !ipa_ctx->ip6_flt_tbl_lcl;
+
+	/* init the various list heads */
+	INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v4].head_flt_rule_list);
+	INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v6].head_flt_rule_list);
+	INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_hdr_entry_list);
+	for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+		INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_offset_list[i]);
+		INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_free_offset_list[i]);
+	}
+	INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
+	INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
+	for (i = 0; i < IPA_NUM_PIPES; i++) {
+		flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v4];
+		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+		flt_tbl->in_sys = !ipa_ctx->ip4_flt_tbl_lcl;
+
+		flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v6];
+		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+		flt_tbl->in_sys = !ipa_ctx->ip6_flt_tbl_lcl;
+	}
+
+	rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v4];
+	INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+	rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v6];
+	INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+
+	mutex_init(&ipa_ctx->lock);
+	mutex_init(&ipa_ctx->nat_mem.lock);
+
+	for (i = 0; i < IPA_A5_SYS_MAX; i++) {
+		INIT_LIST_HEAD(&ipa_ctx->sys[i].head_desc_list);
+		spin_lock_init(&ipa_ctx->sys[i].spinlock);
+		if (i != IPA_A5_WLAN_AMPDU_OUT)
+			ipa_ctx->sys[i].ep = &ipa_ctx->ep[i];
+		else
+			ipa_ctx->sys[i].ep = &ipa_ctx->ep[WLAN_AMPDU_TX_EP];
+		INIT_LIST_HEAD(&ipa_ctx->sys[i].wait_desc_list);
+	}
+
+	ipa_ctx->rx_wq = create_singlethread_workqueue("ipa rx wq");
+	if (!ipa_ctx->rx_wq) {
+		IPAERR(":fail to create rx wq\n");
+		result = -ENOMEM;
+		goto fail_rx_wq;
+	}
+
+	ipa_ctx->tx_wq = create_singlethread_workqueue("ipa tx wq");
+	if (!ipa_ctx->tx_wq) {
+		IPAERR(":fail to create tx wq\n");
+		result = -ENOMEM;
+		goto fail_tx_wq;
+	}
+
+	ipa_ctx->hdr_hdl_tree = RB_ROOT;
+	ipa_ctx->rt_rule_hdl_tree = RB_ROOT;
+	ipa_ctx->rt_tbl_hdl_tree = RB_ROOT;
+	ipa_ctx->flt_rule_hdl_tree = RB_ROOT;
+
+	atomic_set(&ipa_ctx->ipa_active_clients, 0);
+
+	result = ipa_bridge_init();
+	if (result) {
+		IPAERR("ipa bridge init err.\n");
+		result = -ENODEV;
+		goto fail_bridge_init;
+	}
+
+	/* setup the A5-IPA pipes */
+	if (ipa_setup_a5_pipes()) {
+		IPAERR(":failed to setup IPA-A5 pipes.\n");
+		result = -ENODEV;
+		goto fail_a5_pipes;
+	}
+
+	ipa_replenish_rx_cache();
+
+	/* init the filtering block */
+	ipa_commit_flt(IPA_IP_v4);
+	ipa_commit_flt(IPA_IP_v6);
+
+	/*
+	 * setup an empty routing table in system memory, this will be used
+	 * to delete a routing table cleanly and safely
+	 */
+	ipa_ctx->empty_rt_tbl_mem.size = IPA_ROUTING_RULE_BYTE_SIZE;
+
+	ipa_ctx->empty_rt_tbl_mem.base =
+		dma_alloc_coherent(NULL, ipa_ctx->empty_rt_tbl_mem.size,
+				    &ipa_ctx->empty_rt_tbl_mem.phys_base,
+				    GFP_KERNEL);
+	if (!ipa_ctx->empty_rt_tbl_mem.base) {
+		IPAERR("DMA buff alloc fail %d bytes for empty routing tbl\n",
+				ipa_ctx->empty_rt_tbl_mem.size);
+		result = -ENOMEM;
+		goto fail_empty_rt_tbl;
+	}
+	memset(ipa_ctx->empty_rt_tbl_mem.base, 0,
+			ipa_ctx->empty_rt_tbl_mem.size);
+
+	/* setup the IPA pipe mem pool */
+	ipa_pipe_mem_init(resource_p->ipa_pipe_mem_start_ofst,
+			resource_p->ipa_pipe_mem_size);
+
+	ipa_ctx->class = class_create(THIS_MODULE, DRV_NAME);
+
+	result = alloc_chrdev_region(&ipa_ctx->dev_num, 0, 1, DRV_NAME);
+	if (result) {
+		IPAERR("alloc_chrdev_region err.\n");
+		result = -ENODEV;
+		goto fail_alloc_chrdev_region;
+	}
+
+	ipa_ctx->dev = device_create(ipa_ctx->class, NULL, ipa_ctx->dev_num,
+			ipa_ctx, DRV_NAME);
+	if (IS_ERR(ipa_ctx->dev)) {
+		IPAERR(":device_create err.\n");
+		result = -ENODEV;
+		goto fail_device_create;
+	}
+
+	cdev_init(&ipa_ctx->cdev, &ipa_drv_fops);
+	ipa_ctx->cdev.owner = THIS_MODULE;
+	ipa_ctx->cdev.ops = &ipa_drv_fops;  /* from LDD3 */
+
+	result = cdev_add(&ipa_ctx->cdev, ipa_ctx->dev_num, 1);
+	if (result) {
+		IPAERR(":cdev_add err=%d\n", -result);
+		result = -ENODEV;
+		goto fail_cdev_add;
+	}
+
+	/* default aggregation parameters */
+	ipa_ctx->aggregation_type = IPA_MBIM_16;
+	ipa_ctx->aggregation_byte_limit = 1;
+	ipa_ctx->aggregation_time_limit = 0;
+	IPADBG(":IPA driver init OK.\n");
+
+	/* gate IPA clocks */
+	ipa_disable_clks();
+
+	return 0;
+
+fail_cdev_add:
+	device_destroy(ipa_ctx->class, ipa_ctx->dev_num);
+fail_device_create:
+	unregister_chrdev_region(ipa_ctx->dev_num, 1);
+fail_alloc_chrdev_region:
+	if (ipa_ctx->pipe_mem_pool)
+		gen_pool_destroy(ipa_ctx->pipe_mem_pool);
+	dma_free_coherent(NULL,
+			  ipa_ctx->empty_rt_tbl_mem.size,
+			  ipa_ctx->empty_rt_tbl_mem.base,
+			  ipa_ctx->empty_rt_tbl_mem.phys_base);
+fail_empty_rt_tbl:
+	ipa_cleanup_rx();
+	ipa_teardown_a5_pipes();
+fail_a5_pipes:
+	ipa_bridge_cleanup();
+fail_bridge_init:
+	destroy_workqueue(ipa_ctx->tx_wq);
+fail_tx_wq:
+	destroy_workqueue(ipa_ctx->rx_wq);
+fail_rx_wq:
+	dma_pool_destroy(ipa_ctx->one_kb_no_straddle_pool);
+fail_dma_pool:
+	kmem_cache_destroy(ipa_ctx->tree_node_cache);
+fail_tree_node_cache:
+	kmem_cache_destroy(ipa_ctx->rx_pkt_wrapper_cache);
+fail_rx_pkt_wrapper_cache:
+	kmem_cache_destroy(ipa_ctx->tx_pkt_wrapper_cache);
+fail_tx_pkt_wrapper_cache:
+	kmem_cache_destroy(ipa_ctx->rt_tbl_cache);
+fail_rt_tbl_cache:
+	kmem_cache_destroy(ipa_ctx->hdr_offset_cache);
+fail_hdr_offset_cache:
+	kmem_cache_destroy(ipa_ctx->hdr_cache);
+fail_hdr_cache:
+	kmem_cache_destroy(ipa_ctx->rt_rule_cache);
+fail_rt_rule_cache:
+	kmem_cache_destroy(ipa_ctx->flt_rule_cache);
+fail_flt_rule_cache:
+	sps_deregister_bam_device(ipa_ctx->bam_handle);
+fail_bam_register:
+	iounmap(bam_props.virt_addr);
+fail_bam_remap:
+fail_init_hw:
+	iounmap(ipa_ctx->mmio);
+fail_remap:
+	kfree(ipa_ctx);
+	ipa_ctx = NULL;
+fail_mem:
+	/* gate IPA clocks */
+	ipa_disable_clks();
+	return result;
+}
+
+static int ipa_plat_drv_probe(struct platform_device *pdev_p)
+{
+	int result = 0;
+	struct resource *resource_p;
+	IPADBG("IPA plat drv probe\n");
+
+	/* initialize ipa_res */
+	ipa_res.ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
+	ipa_res.ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
+
+	result = ipa_load_pipe_connection(pdev_p,
+					A2_TO_IPA,
+					&ipa_res.a2_to_ipa_pipe);
+	if (0 != result)
+		IPAERR(":ipa_load_pipe_connection failed!\n");
+
+	result = ipa_load_pipe_connection(pdev_p, IPA_TO_A2,
+					  &ipa_res.ipa_to_a2_pipe);
+	if (0 != result)
+		IPAERR(":ipa_load_pipe_connection failed!\n");
+
+	/* Get IPA wrapper address */
+	resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
+			"ipa-base");
+
+	if (!resource_p) {
+		IPAERR(":get resource failed for ipa-base!\n");
+		return -ENODEV;
+	} else {
+		ipa_res.ipa_mem_base = resource_p->start;
+		ipa_res.ipa_mem_size = resource_size(resource_p);
+	}
+
+	/* Get IPA BAM address */
+	resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
+			"bam-base");
+
+	if (!resource_p) {
+		IPAERR(":get resource failed for bam-base!\n");
+		return -ENODEV;
+	} else {
+		ipa_res.bam_mem_base = resource_p->start;
+		ipa_res.bam_mem_size = resource_size(resource_p);
+	}
+
+	/* Get IPA pipe mem start ofst */
+	resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
+			"ipa-pipe-mem");
+
+	if (!resource_p) {
+		IPADBG(":get resource failed for ipa-pipe-mem\n");
+	} else {
+		ipa_res.ipa_pipe_mem_start_ofst = resource_p->start;
+		ipa_res.ipa_pipe_mem_size = resource_size(resource_p);
+	}
+
+	/* Get IPA IRQ number */
+	resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_IRQ,
+			"ipa-irq");
+
+	if (!resource_p) {
+		IPAERR(":get resource failed for ipa-irq!\n");
+		return -ENODEV;
+	} else {
+		ipa_res.ipa_irq = resource_p->start;
+	}
+
+	/* Get IPA BAM IRQ number */
+	resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_IRQ,
+			"bam-irq");
+
+	if (!resource_p) {
+		IPAERR(":get resource failed for bam-irq!\n");
+		return -ENODEV;
+	} else {
+		ipa_res.bam_irq = resource_p->start;
+	}
+
+	IPADBG(":ipa_mem_base = 0x%x, ipa_mem_size = 0x%x\n",
+	       ipa_res.ipa_mem_base, ipa_res.ipa_mem_size);
+	IPADBG(":bam_mem_base = 0x%x, bam_mem_size = 0x%x\n",
+	       ipa_res.bam_mem_base, ipa_res.bam_mem_size);
+	IPADBG(":pipe_mem_start_ofst = 0x%x, pipe_mem_size = 0x%x\n",
+	       ipa_res.ipa_pipe_mem_start_ofst, ipa_res.ipa_pipe_mem_size);
+
+	IPADBG(":ipa_irq = %d\n", ipa_res.ipa_irq);
+	IPADBG(":bam_irq = %d\n", ipa_res.bam_irq);
+
+	/* stash the IPA dev ptr */
+	ipa_dev = &pdev_p->dev;
+
+	/* get IPA clocks */
+	if (ipa_get_clks(ipa_dev) != 0)
+		return -ENODEV;
+
+	/* enable IPA clocks */
+	ipa_enable_clks();
+
+	/* Proceed to real initialization */
+	result = ipa_init(&ipa_res);
+	if (result)
+		IPAERR("ipa_init failed\n");
+
+	result = device_create_file(&pdev_p->dev,
+			&dev_attr_aggregation_type);
+	if (result)
+		IPAERR("failed to create device file\n");
+
+	result = device_create_file(&pdev_p->dev,
+			&dev_attr_aggregation_byte_limit);
+	if (result)
+		IPAERR("failed to create device file\n");
+
+	result = device_create_file(&pdev_p->dev,
+			&dev_attr_aggregation_time_limit);
+	if (result)
+		IPAERR("failed to create device file\n");
+
+	return result;
+}
+
+static struct platform_driver ipa_plat_drv = {
+	.probe = ipa_plat_drv_probe,
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = ipa_plat_drv_match,
+	},
+};
+
+static int ipa_plat_drv_init(void)
+{
+	return platform_driver_register(&ipa_plat_drv);
+}
+
+struct ipa_context *ipa_get_ctx(void)
+{
+	return ipa_ctx;
+}
+
+static int __init ipa_module_init(void)
+{
+	int result = 0;
+
+	IPADBG("IPA module init\n");
+	ipa_debugfs_init();
+	/* Register as a platform device driver */
+	result = ipa_plat_drv_init();
+
+	return result;
+}
+
+late_initcall(ipa_module_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA HW device driver");
+
diff --git a/drivers/platform/msm/ipa/ipa_bridge.c b/drivers/platform/msm/ipa/ipa_bridge.c
new file mode 100644
index 0000000..cf51ab6
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_bridge.c
@@ -0,0 +1,789 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/ratelimit.h>
+#include "ipa_i.h"
+
+enum ipa_bridge_id {
+	IPA_DL_FROM_A2,
+	IPA_DL_TO_IPA,
+	IPA_UL_FROM_IPA,
+	IPA_UL_TO_A2,
+	IPA_BRIDGE_ID_MAX
+};
+
+static int polling_min_sleep[IPA_DIR_MAX] = { 950, 950 };
+static int polling_max_sleep[IPA_DIR_MAX] = { 1050, 1050 };
+static int polling_inactivity[IPA_DIR_MAX] = { 20, 20 };
+
+struct ipa_pkt_info {
+	void *buffer;
+	dma_addr_t dma_address;
+	uint32_t len;
+	struct list_head list_node;
+};
+
+struct ipa_bridge_pipe_context {
+	struct list_head head_desc_list;
+	struct sps_pipe *pipe;
+	struct sps_connect connection;
+	struct sps_mem_buffer desc_mem_buf;
+	struct sps_register_event register_event;
+	spinlock_t spinlock;
+	u32 len;
+	u32 free_len;
+	struct list_head free_desc_list;
+};
+
+static struct ipa_bridge_pipe_context bridge[IPA_BRIDGE_ID_MAX];
+
+static struct workqueue_struct *ipa_ul_workqueue;
+static struct workqueue_struct *ipa_dl_workqueue;
+static void ipa_do_bridge_work(enum ipa_bridge_dir dir);
+
+static u32 alloc_cnt[IPA_DIR_MAX];
+
+static void ul_work_func(struct work_struct *work)
+{
+	ipa_do_bridge_work(IPA_UL);
+}
+
+static void dl_work_func(struct work_struct *work)
+{
+	ipa_do_bridge_work(IPA_DL);
+}
+
+static DECLARE_WORK(ul_work, ul_work_func);
+static DECLARE_WORK(dl_work, dl_work_func);
+
+static int ipa_switch_to_intr_mode(enum ipa_bridge_dir dir)
+{
+	int ret;
+	struct ipa_bridge_pipe_context *sys = &bridge[2 * dir];
+
+	ret = sps_get_config(sys->pipe, &sys->connection);
+	if (ret) {
+		IPAERR("sps_get_config() failed %d\n", ret);
+		goto fail;
+	}
+	sys->register_event.options = SPS_O_EOT;
+	ret = sps_register_event(sys->pipe, &sys->register_event);
+	if (ret) {
+		IPAERR("sps_register_event() failed %d\n", ret);
+		goto fail;
+	}
+	sys->connection.options =
+	   SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+	ret = sps_set_config(sys->pipe, &sys->connection);
+	if (ret) {
+		IPAERR("sps_set_config() failed %d\n", ret);
+		goto fail;
+	}
+	ret = 0;
+fail:
+	return ret;
+}
+
+static int ipa_switch_to_poll_mode(enum ipa_bridge_dir dir)
+{
+	int ret;
+	struct ipa_bridge_pipe_context *sys = &bridge[2 * dir];
+
+	ret = sps_get_config(sys->pipe, &sys->connection);
+	if (ret) {
+		IPAERR("sps_get_config() failed %d\n", ret);
+		goto fail;
+	}
+	sys->connection.options =
+	   SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+	ret = sps_set_config(sys->pipe, &sys->connection);
+	if (ret) {
+		IPAERR("sps_set_config() failed %d\n", ret);
+		goto fail;
+	}
+	ret = 0;
+fail:
+	return ret;
+}
+
+static int queue_rx_single(enum ipa_bridge_dir dir)
+{
+	struct ipa_bridge_pipe_context *sys_rx = &bridge[2 * dir];
+	struct ipa_pkt_info *info;
+	int ret;
+
+	info = kmalloc(sizeof(struct ipa_pkt_info), GFP_KERNEL);
+	if (!info) {
+		IPAERR("unable to alloc rx_pkt_info\n");
+		goto fail_pkt;
+	}
+
+	info->buffer = kmalloc(IPA_RX_SKB_SIZE, GFP_KERNEL | GFP_DMA);
+	if (!info->buffer) {
+		IPAERR("unable to alloc rx_pkt_buffer\n");
+		goto fail_buffer;
+	}
+
+	info->dma_address = dma_map_single(NULL, info->buffer, IPA_RX_SKB_SIZE,
+					   DMA_BIDIRECTIONAL);
+	if (info->dma_address == 0 || info->dma_address == ~0) {
+		IPAERR("dma_map_single failure %p for %p\n",
+				(void *)info->dma_address, info->buffer);
+		goto fail_dma;
+	}
+
+	info->len = ~0;
+
+	list_add_tail(&info->list_node, &sys_rx->head_desc_list);
+	ret = sps_transfer_one(sys_rx->pipe, info->dma_address,
+			       IPA_RX_SKB_SIZE, info,
+			       SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
+	if (ret) {
+		list_del(&info->list_node);
+		dma_unmap_single(NULL, info->dma_address, IPA_RX_SKB_SIZE,
+				 DMA_BIDIRECTIONAL);
+		IPAERR("sps_transfer_one failed %d\n", ret);
+		goto fail_dma;
+	}
+	sys_rx->len++;
+	return 0;
+
+fail_dma:
+	kfree(info->buffer);
+fail_buffer:
+	kfree(info);
+fail_pkt:
+	IPAERR("failed\n");
+	return -ENOMEM;
+}
+
+static void ipa_do_bridge_work(enum ipa_bridge_dir dir)
+{
+	struct ipa_bridge_pipe_context *sys_rx = &bridge[2 * dir];
+	struct ipa_bridge_pipe_context *sys_tx = &bridge[2 * dir + 1];
+	struct ipa_pkt_info *tx_pkt;
+	struct ipa_pkt_info *rx_pkt;
+	struct ipa_pkt_info *tmp_pkt;
+	struct sps_iovec iov;
+	int ret;
+	int inactive_cycles = 0;
+
+	while (1) {
+		++inactive_cycles;
+		iov.addr = 0;
+		ret = sps_get_iovec(sys_tx->pipe, &iov);
+		if (ret || iov.addr == 0) {
+			/* no-op */
+		} else {
+			inactive_cycles = 0;
+
+			tx_pkt = list_first_entry(&sys_tx->head_desc_list,
+						  struct ipa_pkt_info,
+						  list_node);
+			list_move_tail(&tx_pkt->list_node,
+					&sys_tx->free_desc_list);
+			sys_tx->len--;
+			sys_tx->free_len++;
+			tx_pkt->len = ~0;
+		}
+
+		iov.addr = 0;
+		ret = sps_get_iovec(sys_rx->pipe, &iov);
+		if (ret || iov.addr == 0) {
+			/* no-op */
+		} else {
+			inactive_cycles = 0;
+
+			rx_pkt = list_first_entry(&sys_rx->head_desc_list,
+						  struct ipa_pkt_info,
+						  list_node);
+			list_del(&rx_pkt->list_node);
+			sys_rx->len--;
+			rx_pkt->len = iov.size;
+
+retry_alloc_tx:
+			if (list_empty(&sys_tx->free_desc_list)) {
+				tmp_pkt = kmalloc(sizeof(struct ipa_pkt_info),
+						GFP_KERNEL);
+				if (!tmp_pkt) {
+					pr_err_ratelimited("%s: unable to alloc tx_pkt_info\n",
+					       __func__);
+					usleep_range(polling_min_sleep[dir],
+							polling_max_sleep[dir]);
+					goto retry_alloc_tx;
+				}
+
+				tmp_pkt->buffer = kmalloc(IPA_RX_SKB_SIZE,
+						GFP_KERNEL | GFP_DMA);
+				if (!tmp_pkt->buffer) {
+					pr_err_ratelimited("%s: unable to alloc tx_pkt_buffer\n",
+					       __func__);
+					kfree(tmp_pkt);
+					usleep_range(polling_min_sleep[dir],
+							polling_max_sleep[dir]);
+					goto retry_alloc_tx;
+				}
+
+				tmp_pkt->dma_address = dma_map_single(NULL,
+						tmp_pkt->buffer,
+						IPA_RX_SKB_SIZE,
+						DMA_BIDIRECTIONAL);
+				if (tmp_pkt->dma_address == 0 ||
+						tmp_pkt->dma_address == ~0) {
+					pr_err_ratelimited("%s: dma_map_single failure %p for %p\n",
+					       __func__,
+					       (void *)tmp_pkt->dma_address,
+					       tmp_pkt->buffer);
+				}
+
+				list_add_tail(&tmp_pkt->list_node,
+						&sys_tx->free_desc_list);
+				sys_tx->free_len++;
+				alloc_cnt[dir]++;
+
+				tmp_pkt->len = ~0;
+			}
+
+			tx_pkt = list_first_entry(&sys_tx->free_desc_list,
+						  struct ipa_pkt_info,
+						  list_node);
+			list_del(&tx_pkt->list_node);
+			sys_tx->free_len--;
+
+retry_add_rx:
+			list_add_tail(&tx_pkt->list_node,
+					&sys_rx->head_desc_list);
+			ret = sps_transfer_one(sys_rx->pipe,
+					tx_pkt->dma_address,
+					IPA_RX_SKB_SIZE,
+					tx_pkt,
+					SPS_IOVEC_FLAG_INT |
+					SPS_IOVEC_FLAG_EOT);
+			if (ret) {
+				list_del(&tx_pkt->list_node);
+				pr_err_ratelimited("%s: sps_transfer_one failed %d\n",
+						__func__, ret);
+				usleep_range(polling_min_sleep[dir],
+						polling_max_sleep[dir]);
+				goto retry_add_rx;
+			}
+			sys_rx->len++;
+
+retry_add_tx:
+			list_add_tail(&rx_pkt->list_node,
+					&sys_tx->head_desc_list);
+			ret = sps_transfer_one(sys_tx->pipe,
+					       rx_pkt->dma_address,
+					       iov.size,
+					       rx_pkt,
+					       SPS_IOVEC_FLAG_INT |
+					       SPS_IOVEC_FLAG_EOT);
+			if (ret) {
+				pr_err_ratelimited("%s: fail to add to TX dir=%d\n",
+						__func__, dir);
+				list_del(&rx_pkt->list_node);
+				usleep_range(polling_min_sleep[dir],
+						polling_max_sleep[dir]);
+				goto retry_add_tx;
+			}
+			sys_tx->len++;
+		}
+
+		if (inactive_cycles >= polling_inactivity[dir]) {
+			ipa_switch_to_intr_mode(dir);
+			break;
+		}
+	}
+}
+
+static void ipa_rx_notify(struct sps_event_notify *notify)
+{
+	switch (notify->event_id) {
+	case SPS_EVENT_EOT:
+		ipa_switch_to_poll_mode(IPA_UL);
+		queue_work(ipa_ul_workqueue, &ul_work);
+		break;
+	default:
+		IPAERR("recieved unexpected event id %d\n", notify->event_id);
+	}
+}
+
+static int setup_bridge_to_ipa(enum ipa_bridge_dir dir)
+{
+	struct ipa_bridge_pipe_context *sys;
+	struct ipa_ep_cfg_mode mode;
+	dma_addr_t dma_addr;
+	int ipa_ep_idx;
+	int ret;
+	int i;
+
+	if (dir == IPA_DL) {
+		ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode,
+				IPA_CLIENT_A2_TETHERED_PROD);
+		if (ipa_ep_idx == -1) {
+			IPAERR("Invalid client.\n");
+			ret = -EINVAL;
+			goto tx_alloc_endpoint_failed;
+		}
+
+		sys = &bridge[IPA_DL_TO_IPA];
+		sys->pipe = sps_alloc_endpoint();
+		if (sys->pipe == NULL) {
+			IPAERR("tx alloc endpoint failed\n");
+			ret = -ENOMEM;
+			goto tx_alloc_endpoint_failed;
+		}
+		ret = sps_get_config(sys->pipe, &sys->connection);
+		if (ret) {
+			IPAERR("tx get config failed %d\n", ret);
+			goto tx_get_config_failed;
+		}
+
+		sys->connection.source = SPS_DEV_HANDLE_MEM;
+		sys->connection.src_pipe_index = ipa_ctx->a5_pipe_index++;
+		sys->connection.destination = ipa_ctx->bam_handle;
+		sys->connection.dest_pipe_index = ipa_ep_idx;
+		sys->connection.mode = SPS_MODE_DEST;
+		sys->connection.options =
+		   SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+		sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
+		sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
+				sys->desc_mem_buf.size,
+				&dma_addr,
+				0);
+		if (sys->desc_mem_buf.base == NULL) {
+			IPAERR("tx memory alloc failed\n");
+			ret = -ENOMEM;
+			goto tx_get_config_failed;
+		}
+		sys->desc_mem_buf.phys_base = dma_addr;
+		memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
+		sys->connection.desc = sys->desc_mem_buf;
+		sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
+
+		ret = sps_connect(sys->pipe, &sys->connection);
+		if (ret < 0) {
+			IPAERR("tx connect error %d\n", ret);
+			goto tx_connect_failed;
+		}
+
+		INIT_LIST_HEAD(&sys->head_desc_list);
+		INIT_LIST_HEAD(&sys->free_desc_list);
+		spin_lock_init(&sys->spinlock);
+
+		ipa_ctx->ep[ipa_ep_idx].valid = 1;
+
+		mode.mode = IPA_DMA;
+		mode.dst = IPA_CLIENT_USB_CONS;
+		ret = ipa_cfg_ep_mode(ipa_ep_idx, &mode);
+		if (ret < 0) {
+			IPAERR("DMA mode set error %d\n", ret);
+			goto tx_mode_set_failed;
+		}
+
+		return 0;
+
+tx_mode_set_failed:
+		sps_disconnect(sys->pipe);
+tx_connect_failed:
+		dma_free_coherent(NULL, sys->desc_mem_buf.size,
+				sys->desc_mem_buf.base,
+				sys->desc_mem_buf.phys_base);
+tx_get_config_failed:
+		sps_free_endpoint(sys->pipe);
+tx_alloc_endpoint_failed:
+		return ret;
+	} else {
+
+		ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode,
+				IPA_CLIENT_A2_TETHERED_CONS);
+		if (ipa_ep_idx == -1) {
+			IPAERR("Invalid client.\n");
+			ret = -EINVAL;
+			goto rx_alloc_endpoint_failed;
+		}
+
+		sys = &bridge[IPA_UL_FROM_IPA];
+		sys->pipe = sps_alloc_endpoint();
+		if (sys->pipe == NULL) {
+			IPAERR("rx alloc endpoint failed\n");
+			ret = -ENOMEM;
+			goto rx_alloc_endpoint_failed;
+		}
+		ret = sps_get_config(sys->pipe, &sys->connection);
+		if (ret) {
+			IPAERR("rx get config failed %d\n", ret);
+			goto rx_get_config_failed;
+		}
+
+		sys->connection.source = ipa_ctx->bam_handle;
+		sys->connection.src_pipe_index = 7;
+		sys->connection.destination = SPS_DEV_HANDLE_MEM;
+		sys->connection.dest_pipe_index = ipa_ctx->a5_pipe_index++;
+		sys->connection.mode = SPS_MODE_SRC;
+		sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
+		      SPS_O_ACK_TRANSFERS;
+		sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
+		sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
+				sys->desc_mem_buf.size,
+				&dma_addr,
+				0);
+		if (sys->desc_mem_buf.base == NULL) {
+			IPAERR("rx memory alloc failed\n");
+			ret = -ENOMEM;
+			goto rx_get_config_failed;
+		}
+		sys->desc_mem_buf.phys_base = dma_addr;
+		memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
+		sys->connection.desc = sys->desc_mem_buf;
+		sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
+
+		ret = sps_connect(sys->pipe, &sys->connection);
+		if (ret < 0) {
+			IPAERR("rx connect error %d\n", ret);
+			goto rx_connect_failed;
+		}
+
+		sys->register_event.options = SPS_O_EOT;
+		sys->register_event.mode = SPS_TRIGGER_CALLBACK;
+		sys->register_event.xfer_done = NULL;
+		sys->register_event.callback = ipa_rx_notify;
+		sys->register_event.user = NULL;
+		ret = sps_register_event(sys->pipe, &sys->register_event);
+		if (ret < 0) {
+			IPAERR("tx register event error %d\n", ret);
+			goto rx_event_reg_failed;
+		}
+
+		INIT_LIST_HEAD(&sys->head_desc_list);
+		INIT_LIST_HEAD(&sys->free_desc_list);
+		spin_lock_init(&sys->spinlock);
+
+		for (i = 0; i < IPA_RX_POOL_CEIL; i++) {
+			ret = queue_rx_single(dir);
+			if (ret < 0)
+				IPAERR("queue fail %d %d\n", dir, i);
+		}
+
+		return 0;
+
+rx_event_reg_failed:
+		sps_disconnect(sys->pipe);
+rx_connect_failed:
+		dma_free_coherent(NULL,
+				sys->desc_mem_buf.size,
+				sys->desc_mem_buf.base,
+				sys->desc_mem_buf.phys_base);
+rx_get_config_failed:
+		sps_free_endpoint(sys->pipe);
+rx_alloc_endpoint_failed:
+		return ret;
+	}
+}
+
+static void bam_mux_rx_notify(struct sps_event_notify *notify)
+{
+	switch (notify->event_id) {
+	case SPS_EVENT_EOT:
+		ipa_switch_to_poll_mode(IPA_DL);
+		queue_work(ipa_dl_workqueue, &dl_work);
+		break;
+	default:
+		IPAERR("recieved unexpected event id %d\n", notify->event_id);
+	}
+}
+
+static int setup_bridge_to_a2(enum ipa_bridge_dir dir)
+{
+	struct ipa_bridge_pipe_context *sys;
+	struct a2_mux_pipe_connection pipe_conn = { 0, };
+	dma_addr_t dma_addr;
+	u32 a2_handle;
+	int ret;
+	int i;
+
+	if (dir == IPA_UL) {
+		ret = ipa_get_a2_mux_pipe_info(IPA_TO_A2, &pipe_conn);
+		if (ret) {
+			IPAERR("ipa_get_a2_mux_pipe_info failed IPA_TO_A2\n");
+			goto tx_alloc_endpoint_failed;
+		}
+
+		ret = sps_phy2h(pipe_conn.dst_phy_addr, &a2_handle);
+		if (ret) {
+			IPAERR("sps_phy2h failed (A2 BAM) %d\n", ret);
+			goto tx_alloc_endpoint_failed;
+		}
+
+		sys = &bridge[IPA_UL_TO_A2];
+		sys->pipe = sps_alloc_endpoint();
+		if (sys->pipe == NULL) {
+			IPAERR("tx alloc endpoint failed\n");
+			ret = -ENOMEM;
+			goto tx_alloc_endpoint_failed;
+		}
+		ret = sps_get_config(sys->pipe, &sys->connection);
+		if (ret) {
+			IPAERR("tx get config failed %d\n", ret);
+			goto tx_get_config_failed;
+		}
+
+		sys->connection.source = SPS_DEV_HANDLE_MEM;
+		sys->connection.src_pipe_index = ipa_ctx->a5_pipe_index++;
+		sys->connection.destination = a2_handle;
+		sys->connection.dest_pipe_index = pipe_conn.dst_pipe_index;
+		sys->connection.mode = SPS_MODE_DEST;
+		sys->connection.options =
+		   SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+		sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
+		sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
+				sys->desc_mem_buf.size,
+				&dma_addr,
+				0);
+		if (sys->desc_mem_buf.base == NULL) {
+			IPAERR("tx memory alloc failed\n");
+			ret = -ENOMEM;
+			goto tx_get_config_failed;
+		}
+		sys->desc_mem_buf.phys_base = dma_addr;
+		memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
+		sys->connection.desc = sys->desc_mem_buf;
+		sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
+
+		ret = sps_connect(sys->pipe, &sys->connection);
+		if (ret < 0) {
+			IPAERR("tx connect error %d\n", ret);
+			goto tx_connect_failed;
+		}
+
+		INIT_LIST_HEAD(&sys->head_desc_list);
+		INIT_LIST_HEAD(&sys->free_desc_list);
+		spin_lock_init(&sys->spinlock);
+
+		return 0;
+
+tx_connect_failed:
+		dma_free_coherent(NULL,
+				sys->desc_mem_buf.size,
+				sys->desc_mem_buf.base,
+				sys->desc_mem_buf.phys_base);
+tx_get_config_failed:
+		sps_free_endpoint(sys->pipe);
+tx_alloc_endpoint_failed:
+		return ret;
+	} else { /* dir == IPA_UL */
+
+		ret = ipa_get_a2_mux_pipe_info(A2_TO_IPA, &pipe_conn);
+		if (ret) {
+			IPAERR("ipa_get_a2_mux_pipe_info failed A2_TO_IPA\n");
+			goto rx_alloc_endpoint_failed;
+		}
+
+		ret = sps_phy2h(pipe_conn.src_phy_addr, &a2_handle);
+		if (ret) {
+			IPAERR("sps_phy2h failed (A2 BAM) %d\n", ret);
+			goto rx_alloc_endpoint_failed;
+		}
+
+		sys = &bridge[IPA_DL_FROM_A2];
+		sys->pipe = sps_alloc_endpoint();
+		if (sys->pipe == NULL) {
+			IPAERR("rx alloc endpoint failed\n");
+			ret = -ENOMEM;
+			goto rx_alloc_endpoint_failed;
+		}
+		ret = sps_get_config(sys->pipe, &sys->connection);
+		if (ret) {
+			IPAERR("rx get config failed %d\n", ret);
+			goto rx_get_config_failed;
+		}
+
+		sys->connection.source = a2_handle;
+		sys->connection.src_pipe_index = pipe_conn.src_pipe_index;
+		sys->connection.destination = SPS_DEV_HANDLE_MEM;
+		sys->connection.dest_pipe_index = ipa_ctx->a5_pipe_index++;
+		sys->connection.mode = SPS_MODE_SRC;
+		sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
+		      SPS_O_ACK_TRANSFERS;
+		sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
+		sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
+				sys->desc_mem_buf.size,
+				&dma_addr,
+				0);
+		if (sys->desc_mem_buf.base == NULL) {
+			IPAERR("rx memory alloc failed\n");
+			ret = -ENOMEM;
+			goto rx_get_config_failed;
+		}
+		sys->desc_mem_buf.phys_base = dma_addr;
+		memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
+		sys->connection.desc = sys->desc_mem_buf;
+		sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
+
+		ret = sps_connect(sys->pipe, &sys->connection);
+		if (ret < 0) {
+			IPAERR("rx connect error %d\n", ret);
+			goto rx_connect_failed;
+		}
+
+		sys->register_event.options = SPS_O_EOT;
+		sys->register_event.mode = SPS_TRIGGER_CALLBACK;
+		sys->register_event.xfer_done = NULL;
+		sys->register_event.callback = bam_mux_rx_notify;
+		sys->register_event.user = NULL;
+		ret = sps_register_event(sys->pipe, &sys->register_event);
+		if (ret < 0) {
+			IPAERR("tx register event error %d\n", ret);
+			goto rx_event_reg_failed;
+		}
+
+		INIT_LIST_HEAD(&sys->head_desc_list);
+		INIT_LIST_HEAD(&sys->free_desc_list);
+		spin_lock_init(&sys->spinlock);
+
+
+		for (i = 0; i < IPA_RX_POOL_CEIL; i++) {
+			ret = queue_rx_single(dir);
+			if (ret < 0)
+				IPAERR("queue fail %d %d\n", dir, i);
+		}
+
+		return 0;
+
+rx_event_reg_failed:
+		sps_disconnect(sys->pipe);
+rx_connect_failed:
+		dma_free_coherent(NULL,
+				sys->desc_mem_buf.size,
+				sys->desc_mem_buf.base,
+				sys->desc_mem_buf.phys_base);
+rx_get_config_failed:
+		sps_free_endpoint(sys->pipe);
+rx_alloc_endpoint_failed:
+		return ret;
+	}
+}
+
+/**
+ * ipa_bridge_init() - initialize the tethered bridge, allocate UL and DL
+ * workqueues
+ *
+ * Return codes: 0: success, -ENOMEM: failure
+ */
+int ipa_bridge_init(void)
+{
+	int ret;
+
+	ipa_ul_workqueue = alloc_workqueue("ipa_ul",
+			WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+	if (!ipa_ul_workqueue) {
+		IPAERR("ipa ul wq alloc failed\n");
+		ret = -ENOMEM;
+		goto fail_ul;
+	}
+
+	ipa_dl_workqueue = alloc_workqueue("ipa_dl",
+			WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+	if (!ipa_dl_workqueue) {
+		IPAERR("ipa dl wq alloc failed\n");
+		ret = -ENOMEM;
+		goto fail_dl;
+	}
+
+	return 0;
+fail_dl:
+	destroy_workqueue(ipa_ul_workqueue);
+fail_ul:
+	return ret;
+}
+
+/**
+ * ipa_bridge_setup() - setup tethered SW bridge in specified direction
+ * @dir: downlink or uplink (from air interface perspective)
+ *
+ * Return codes:
+ * 0: success
+ * various negative error codes on errors
+ */
+int ipa_bridge_setup(enum ipa_bridge_dir dir)
+{
+	int ret;
+
+	if (atomic_inc_return(&ipa_ctx->ipa_active_clients) == 1)
+		ipa_enable_clks();
+
+	if (setup_bridge_to_a2(dir)) {
+		IPAERR("fail to setup SYS pipe to A2 %d\n", dir);
+		ret = -EINVAL;
+		goto bail_a2;
+	}
+
+	if (setup_bridge_to_ipa(dir)) {
+		IPAERR("fail to setup SYS pipe to IPA %d\n", dir);
+		ret = -EINVAL;
+		goto bail_ipa;
+	}
+
+	return 0;
+
+bail_ipa:
+	if (dir == IPA_UL)
+		sps_disconnect(bridge[IPA_UL_TO_A2].pipe);
+	else
+		sps_disconnect(bridge[IPA_DL_FROM_A2].pipe);
+bail_a2:
+	if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
+		ipa_disable_clks();
+	return ret;
+}
+
+/**
+ * ipa_bridge_teardown() - teardown the tethered bridge in the specified dir
+ * @dir: downlink or uplink (from air interface perspective)
+ *
+ * Return codes:
+ * 0: always
+ */
+int ipa_bridge_teardown(enum ipa_bridge_dir dir)
+{
+	struct ipa_bridge_pipe_context *sys;
+
+	if (dir == IPA_UL) {
+		sys = &bridge[IPA_UL_TO_A2];
+		sps_disconnect(sys->pipe);
+		sys = &bridge[IPA_UL_FROM_IPA];
+		sps_disconnect(sys->pipe);
+	} else {
+		sys = &bridge[IPA_DL_FROM_A2];
+		sps_disconnect(sys->pipe);
+		sys = &bridge[IPA_DL_TO_IPA];
+		sps_disconnect(sys->pipe);
+	}
+
+	if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
+		ipa_disable_clks();
+
+	return 0;
+}
+
+/**
+ * ipa_bridge_cleanup() - de-initialize the tethered bridge
+ *
+ * Return codes:
+ * None
+ */
+void ipa_bridge_cleanup(void)
+{
+	destroy_workqueue(ipa_dl_workqueue);
+	destroy_workqueue(ipa_ul_workqueue);
+}
diff --git a/drivers/platform/msm/ipa/ipa_client.c b/drivers/platform/msm/ipa/ipa_client.c
new file mode 100644
index 0000000..823b17d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_client.c
@@ -0,0 +1,325 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+
+static int ipa_connect_configure_sps(const struct ipa_connect_params *in,
+				     struct ipa_ep_context *ep, int ipa_ep_idx)
+{
+	int result = -EFAULT;
+
+	/* Default Config */
+	ep->ep_hdl = sps_alloc_endpoint();
+
+	if (ep->ep_hdl == NULL) {
+		IPAERR("SPS EP alloc failed EP.\n");
+		return -EFAULT;
+	}
+
+	result = sps_get_config(ep->ep_hdl,
+		&ep->connect);
+	if (result) {
+		IPAERR("fail to get config.\n");
+		return -EFAULT;
+	}
+
+	/* Specific Config */
+	if (IPA_CLIENT_IS_CONS(in->client)) {
+		ep->connect.mode = SPS_MODE_SRC;
+		ep->connect.destination =
+			in->client_bam_hdl;
+		ep->connect.source = ipa_ctx->bam_handle;
+		ep->connect.dest_pipe_index =
+			in->client_ep_idx;
+		ep->connect.src_pipe_index = ipa_ep_idx;
+	} else {
+		ep->connect.mode = SPS_MODE_DEST;
+		ep->connect.source = in->client_bam_hdl;
+		ep->connect.destination = ipa_ctx->bam_handle;
+		ep->connect.src_pipe_index = in->client_ep_idx;
+		ep->connect.dest_pipe_index = ipa_ep_idx;
+	}
+
+	return 0;
+}
+
+static int ipa_connect_allocate_fifo(const struct ipa_connect_params *in,
+				     struct sps_mem_buffer *mem_buff_ptr,
+				     bool *fifo_in_pipe_mem_ptr,
+				     u32 *fifo_pipe_mem_ofst_ptr,
+				     u32 fifo_size, int ipa_ep_idx)
+{
+	dma_addr_t dma_addr;
+	u32 ofst;
+	int result = -EFAULT;
+
+	mem_buff_ptr->size = fifo_size;
+	if (in->pipe_mem_preferred) {
+		if (ipa_pipe_mem_alloc(&ofst, fifo_size)) {
+			IPAERR("FIFO pipe mem alloc fail ep %u\n",
+				ipa_ep_idx);
+			mem_buff_ptr->base =
+				dma_alloc_coherent(NULL,
+				mem_buff_ptr->size,
+				&dma_addr, GFP_KERNEL);
+		} else {
+			memset(mem_buff_ptr, 0, sizeof(struct sps_mem_buffer));
+			result = sps_setup_bam2bam_fifo(mem_buff_ptr, ofst,
+				fifo_size, 1);
+			WARN_ON(result);
+			*fifo_in_pipe_mem_ptr = 1;
+			dma_addr = mem_buff_ptr->phys_base;
+			*fifo_pipe_mem_ofst_ptr = ofst;
+		}
+	} else {
+		mem_buff_ptr->base =
+			dma_alloc_coherent(NULL, mem_buff_ptr->size,
+			&dma_addr, GFP_KERNEL);
+	}
+	mem_buff_ptr->phys_base = dma_addr;
+	if (mem_buff_ptr->base == NULL) {
+		IPAERR("fail to get DMA memory.\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+
+/**
+ * ipa_connect() - low-level IPA client connect
+ * @in:	[in] input parameters from client
+ * @sps:	[out] sps output from IPA needed by client for sps_connect
+ * @clnt_hdl:	[out] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to connect to
+ * IPA in BAM-BAM mode. these peripherals are A2, USB and HSIC. this api
+ * expects caller to take responsibility to add any needed headers, routing
+ * and filtering tables and rules as needed.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps,
+		u32 *clnt_hdl)
+{
+	int ipa_ep_idx;
+	int ipa_ep_idx_dst;
+	int result = -EFAULT;
+	struct ipa_ep_context *ep;
+
+	if (atomic_inc_return(&ipa_ctx->ipa_active_clients) == 1)
+		ipa_enable_clks();
+
+	if (in == NULL || sps == NULL || clnt_hdl == NULL ||
+	    in->client >= IPA_CLIENT_MAX ||
+	    in->ipa_ep_cfg.mode.dst >= IPA_CLIENT_MAX ||
+	    in->desc_fifo_sz == 0 || in->data_fifo_sz == 0) {
+		IPAERR("bad parm.\n");
+		result = -EINVAL;
+		goto fail;
+	}
+
+	ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, in->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("fail to alloc EP.\n");
+		goto fail;
+	}
+
+	ep = &ipa_ctx->ep[ipa_ep_idx];
+
+	if (ep->valid) {
+		IPAERR("EP already allocated.\n");
+		goto fail;
+	}
+
+	if (IPA_CLIENT_IS_PROD(in->client) &&
+			(in->ipa_ep_cfg.mode.mode == IPA_DMA)) {
+		ipa_ep_idx_dst = ipa_get_ep_mapping(ipa_ctx->mode,
+				in->ipa_ep_cfg.mode.dst);
+		if ((ipa_ep_idx_dst == -1) ||
+				(ipa_ctx->ep[ipa_ep_idx_dst].valid)) {
+			IPADBG("dst EP for IPA input pipe doesn't yet exist\n");
+		}
+	}
+
+	memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+
+	ep->valid = 1;
+	ep->client = in->client;
+	ep->notify = in->notify;
+	ep->priv = in->priv;
+
+	if (ipa_cfg_ep(ipa_ep_idx, &in->ipa_ep_cfg)) {
+		IPAERR("fail to configure EP.\n");
+		goto ipa_cfg_ep_fail;
+	}
+
+	result = ipa_connect_configure_sps(in, ep, ipa_ep_idx);
+	if (result) {
+		IPAERR("fail to configure SPS.\n");
+		goto ipa_cfg_ep_fail;
+	}
+
+	if (in->desc.base == NULL) {
+		result = ipa_connect_allocate_fifo(in, &ep->connect.desc,
+						  &ep->desc_fifo_in_pipe_mem,
+						  &ep->desc_fifo_pipe_mem_ofst,
+						  in->desc_fifo_sz, ipa_ep_idx);
+		if (result) {
+			IPAERR("fail to allocate DESC FIFO.\n");
+			goto desc_mem_alloc_fail;
+		}
+	} else {
+		IPADBG("client allocated DESC FIFO\n");
+		ep->connect.desc = in->desc;
+		ep->desc_fifo_client_allocated = 1;
+	}
+	IPADBG("Descriptor FIFO pa=0x%x, size=%d\n", ep->connect.desc.phys_base,
+	       ep->connect.desc.size);
+
+	if (in->data.base == NULL) {
+		result = ipa_connect_allocate_fifo(in, &ep->connect.data,
+						&ep->data_fifo_in_pipe_mem,
+						&ep->data_fifo_pipe_mem_ofst,
+						in->data_fifo_sz, ipa_ep_idx);
+		if (result) {
+			IPAERR("fail to allocate DATA FIFO.\n");
+			goto data_mem_alloc_fail;
+		}
+	} else {
+		IPADBG("client allocated DATA FIFO\n");
+		ep->connect.data = in->data;
+		ep->data_fifo_client_allocated = 1;
+	}
+	IPADBG("Data FIFO pa=0x%x, size=%d\n", ep->connect.data.phys_base,
+	       ep->connect.data.size);
+
+	ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
+	ep->connect.options = SPS_O_AUTO_ENABLE;    /* BAM-to-BAM */
+
+	result = sps_connect(ep->ep_hdl, &ep->connect);
+	if (result) {
+		IPAERR("sps_connect fails.\n");
+		goto sps_connect_fail;
+	}
+
+	sps->ipa_bam_hdl = ipa_ctx->bam_handle;
+	sps->ipa_ep_idx = ipa_ep_idx;
+	*clnt_hdl = ipa_ep_idx;
+	memcpy(&sps->desc, &ep->connect.desc, sizeof(struct sps_mem_buffer));
+	memcpy(&sps->data, &ep->connect.data, sizeof(struct sps_mem_buffer));
+
+	return 0;
+
+sps_connect_fail:
+	if (!ep->data_fifo_in_pipe_mem)
+		dma_free_coherent(NULL,
+				  ep->connect.data.size,
+				  ep->connect.data.base,
+				  ep->connect.data.phys_base);
+	else
+		ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
+				  ep->connect.data.size);
+
+data_mem_alloc_fail:
+	if (!ep->desc_fifo_in_pipe_mem)
+		dma_free_coherent(NULL,
+				  ep->connect.desc.size,
+				  ep->connect.desc.base,
+				  ep->connect.desc.phys_base);
+	else
+		ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
+				  ep->connect.desc.size);
+
+desc_mem_alloc_fail:
+	sps_free_endpoint(ep->ep_hdl);
+ipa_cfg_ep_fail:
+	memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+fail:
+	if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
+		ipa_disable_clks();
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_connect);
+
+/**
+ * ipa_disconnect() - low-level IPA client disconnect
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to disconnect
+ * from IPA in BAM-BAM mode. this api expects caller to take responsibility to
+ * free any needed headers, routing and filtering tables and rules as needed.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_disconnect(u32 clnt_hdl)
+{
+	int result;
+	struct ipa_ep_context *ep;
+
+	if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa_ctx->ep[clnt_hdl];
+
+	result = sps_disconnect(ep->ep_hdl);
+	if (result) {
+		IPAERR("SPS disconnect failed.\n");
+		return -EPERM;
+	}
+
+	if (!ep->desc_fifo_client_allocated &&
+	     ep->connect.desc.base) {
+		if (!ep->desc_fifo_in_pipe_mem)
+			dma_free_coherent(NULL,
+					  ep->connect.desc.size,
+					  ep->connect.desc.base,
+					  ep->connect.desc.phys_base);
+		else
+			ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
+					  ep->connect.desc.size);
+	}
+
+	if (!ep->data_fifo_client_allocated &&
+	     ep->connect.data.base) {
+		if (!ep->data_fifo_in_pipe_mem)
+			dma_free_coherent(NULL,
+					  ep->connect.data.size,
+					  ep->connect.data.base,
+					  ep->connect.data.phys_base);
+		else
+			ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
+					  ep->connect.data.size);
+	}
+
+	result = sps_free_endpoint(ep->ep_hdl);
+	if (result) {
+		IPAERR("SPS de-alloc EP failed.\n");
+		return -EPERM;
+	}
+
+	memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
+
+	if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
+		ipa_disable_clks();
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_disconnect);
+
diff --git a/drivers/platform/msm/ipa/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_debugfs.c
new file mode 100644
index 0000000..43b0178d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_debugfs.c
@@ -0,0 +1,507 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include "ipa_i.h"
+
+
+#define IPA_MAX_MSG_LEN 1024
+static struct dentry *dent;
+static struct dentry *dfile_gen_reg;
+static struct dentry *dfile_ep_reg;
+static struct dentry *dfile_hdr;
+static struct dentry *dfile_ip4_rt;
+static struct dentry *dfile_ip6_rt;
+static struct dentry *dfile_ip4_flt;
+static struct dentry *dfile_ip6_flt;
+static char dbg_buff[IPA_MAX_MSG_LEN];
+static s8 ep_reg_idx;
+
+static ssize_t ipa_read_gen_reg(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+
+	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"IPA_VERSION=0x%x\n"
+			"IPA_COMP_HW_VERSION=0x%x\n"
+			"IPA_ROUTE=0x%x\n"
+			"IPA_FILTER=0x%x\n"
+			"IPA_SHARED_MEM_SIZE=0x%x\n"
+			"IPA_HEAD_OF_LINE_BLOCK_EN=0x%x\n",
+			ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST),
+			ipa_read_reg(ipa_ctx->mmio, IPA_COMP_HW_VERSION_OFST),
+			ipa_read_reg(ipa_ctx->mmio, IPA_ROUTE_OFST),
+			ipa_read_reg(ipa_ctx->mmio, IPA_FILTER_OFST),
+			ipa_read_reg(ipa_ctx->mmio, IPA_SHARED_MEM_SIZE_OFST),
+			ipa_read_reg(ipa_ctx->mmio,
+				IPA_HEAD_OF_LINE_BLOCK_EN_OFST));
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_write_ep_reg(struct file *file, const char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	s8 option = 0;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtos8(dbg_buff, 0, &option))
+		return -EFAULT;
+
+	if (option >= IPA_NUM_PIPES) {
+		IPAERR("bad pipe specified %u\n", option);
+		return count;
+	}
+
+	ep_reg_idx = option;
+
+	return count;
+}
+
+static ssize_t ipa_read_ep_reg(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int i;
+	int start_idx;
+	int end_idx;
+	int size = 0;
+	int ret;
+	loff_t pos;
+
+	/* negative ep_reg_idx means all registers */
+	if (ep_reg_idx < 0) {
+		start_idx = 0;
+		end_idx = IPA_NUM_PIPES;
+	} else {
+		start_idx = ep_reg_idx;
+		end_idx = start_idx + 1;
+	}
+	pos = *ppos;
+	for (i = start_idx; i < end_idx; i++) {
+
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"IPA_ENDP_INIT_NAT_%u=0x%x\n"
+				"IPA_ENDP_INIT_HDR_%u=0x%x\n"
+				"IPA_ENDP_INIT_MODE_%u=0x%x\n"
+				"IPA_ENDP_INIT_AGGR_%u=0x%x\n"
+				"IPA_ENDP_INIT_ROUTE_%u=0x%x\n",
+				i, ipa_read_reg(ipa_ctx->mmio,
+					IPA_ENDP_INIT_NAT_n_OFST(i)),
+				i, ipa_read_reg(ipa_ctx->mmio,
+					IPA_ENDP_INIT_HDR_n_OFST(i)),
+				i, ipa_read_reg(ipa_ctx->mmio,
+					IPA_ENDP_INIT_MODE_n_OFST(i)),
+				i, ipa_read_reg(ipa_ctx->mmio,
+					IPA_ENDP_INIT_AGGR_n_OFST(i)),
+				i, ipa_read_reg(ipa_ctx->mmio,
+					IPA_ENDP_INIT_ROUTE_n_OFST(i)));
+		*ppos = pos;
+		ret = simple_read_from_buffer(ubuf, count, ppos, dbg_buff,
+					      nbytes);
+		if (ret < 0)
+			return ret;
+
+		size += ret;
+		ubuf += nbytes;
+		count -= nbytes;
+	}
+
+	*ppos = pos + size;
+	return size;
+}
+
+static ssize_t ipa_read_hdr(struct file *file, char __user *ubuf, size_t count,
+		loff_t *ppos)
+{
+	int nbytes = 0;
+	int cnt = 0;
+	int i = 0;
+	struct ipa_hdr_entry *entry;
+
+	mutex_lock(&ipa_ctx->lock);
+	list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+			link) {
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+				   "name:%s len=%d ref=%d partial=%d lcl=%d ofst=%u ",
+				   entry->name,
+				   entry->hdr_len, entry->ref_cnt,
+				   entry->is_partial,
+				   ipa_ctx->hdr_tbl_lcl,
+				   entry->offset_entry->offset >> 2);
+		for (i = 0; i < entry->hdr_len; i++) {
+			scnprintf(dbg_buff + cnt + nbytes + i * 2,
+				  IPA_MAX_MSG_LEN - cnt - nbytes - i * 2,
+				  "%02x", entry->hdr[i]);
+		}
+		scnprintf(dbg_buff + cnt + nbytes + entry->hdr_len * 2,
+			  IPA_MAX_MSG_LEN - cnt - nbytes - entry->hdr_len * 2,
+			  "\n");
+		cnt += nbytes + entry->hdr_len * 2 + 1;
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static int ipa_attrib_dump(char *buff, size_t sz,
+		struct ipa_rule_attrib *attrib, enum ipa_ip_type ip)
+{
+	int nbytes = 0;
+	int cnt = 0;
+	uint32_t addr[4];
+	uint32_t mask[4];
+	int i;
+
+	if (attrib->attrib_mask & IPA_FLT_TOS) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "tos:%d ",
+				attrib->u.v4.tos);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "protocol:%d ",
+				attrib->u.v4.protocol);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+		if (ip == IPA_IP_v4) {
+			addr[0] = htonl(attrib->u.v4.src_addr);
+			mask[0] = htonl(attrib->u.v4.src_addr_mask);
+			nbytes = scnprintf(buff + cnt, sz - cnt,
+					"src_addr:%pI4 src_addr_mask:%pI4 ",
+					addr + 0, mask + 0);
+			cnt += nbytes;
+		} else if (ip == IPA_IP_v6) {
+			for (i = 0; i < 4; i++) {
+				addr[i] = htonl(attrib->u.v6.src_addr[i]);
+				mask[i] = htonl(attrib->u.v6.src_addr_mask[i]);
+			}
+			nbytes =
+			   scnprintf(buff + cnt, sz - cnt,
+					   "src_addr:%pI6 src_addr_mask:%pI6 ",
+					   addr + 0, mask + 0);
+			cnt += nbytes;
+		} else {
+			WARN_ON(1);
+		}
+	}
+	if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+		if (ip == IPA_IP_v4) {
+			addr[0] = htonl(attrib->u.v4.dst_addr);
+			mask[0] = htonl(attrib->u.v4.dst_addr_mask);
+			nbytes =
+			   scnprintf(buff + cnt, sz - cnt,
+					   "dst_addr:%pI4 dst_addr_mask:%pI4 ",
+					   addr + 0, mask + 0);
+			cnt += nbytes;
+		} else if (ip == IPA_IP_v6) {
+			for (i = 0; i < 4; i++) {
+				addr[i] = htonl(attrib->u.v6.dst_addr[i]);
+				mask[i] = htonl(attrib->u.v6.dst_addr_mask[i]);
+			}
+			nbytes =
+			   scnprintf(buff + cnt, sz - cnt,
+					   "dst_addr:%pI6 dst_addr_mask:%pI6 ",
+					   addr + 0, mask + 0);
+			cnt += nbytes;
+		} else {
+			WARN_ON(1);
+		}
+	}
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+		nbytes =
+		   scnprintf(buff + cnt, sz - cnt, "src_port_range:%u %u ",
+				   attrib->src_port_lo,
+			     attrib->src_port_hi);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+		nbytes =
+		   scnprintf(buff + cnt, sz - cnt, "dst_port_range:%u %u ",
+				   attrib->dst_port_lo,
+			     attrib->dst_port_hi);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_TYPE) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "type:%d ",
+				attrib->type);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_CODE) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "code:%d ",
+				attrib->code);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_SPI) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "spi:%x ",
+				attrib->spi);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "src_port:%u ",
+				attrib->src_port);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "dst_port:%u ",
+				attrib->dst_port);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_TC) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "tc:%d ",
+				attrib->u.v6.tc);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "flow_label:%x ",
+				attrib->u.v6.flow_label);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "next_hdr:%d ",
+				attrib->u.v6.next_hdr);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+		nbytes =
+		   scnprintf(buff + cnt, sz - cnt,
+				   "metadata:%x metadata_mask:%x",
+				   attrib->meta_data, attrib->meta_data_mask);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "frg ");
+		cnt += nbytes;
+	}
+	nbytes = scnprintf(buff + cnt, sz - cnt, "\n");
+	cnt += nbytes;
+
+	return cnt;
+}
+
+static int ipa_open_dbg(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t ipa_read_rt(struct file *file, char __user *ubuf, size_t count,
+		loff_t *ppos)
+{
+	int nbytes = 0;
+	int cnt = 0;
+	int i = 0;
+	struct ipa_rt_tbl *tbl;
+	struct ipa_rt_entry *entry;
+	struct ipa_rt_tbl_set *set;
+	enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+	u32 hdr_ofst;
+
+	set = &ipa_ctx->rt_tbl_set[ip];
+
+	mutex_lock(&ipa_ctx->lock);
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		i = 0;
+		list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+			if (entry->hdr)
+				hdr_ofst = entry->hdr->offset_entry->offset;
+			else
+				hdr_ofst = 0;
+			nbytes = scnprintf(dbg_buff + cnt,
+					IPA_MAX_MSG_LEN - cnt,
+					"tbl_idx:%d tbl_name:%s tbl_ref:%u rule_idx:%d dst:%d ep:%d S:%u hdr_ofst[words]:%u attrib_mask:%08x ",
+					entry->tbl->idx, entry->tbl->name,
+					entry->tbl->ref_cnt, i, entry->rule.dst,
+					ipa_get_ep_mapping(ipa_ctx->mode,
+						entry->rule.dst),
+					   !ipa_ctx->hdr_tbl_lcl,
+					   hdr_ofst >> 2,
+					   entry->rule.attrib.attrib_mask);
+			cnt += nbytes;
+			cnt += ipa_attrib_dump(dbg_buff + cnt,
+					   IPA_MAX_MSG_LEN - cnt,
+					   &entry->rule.attrib,
+					   ip);
+			i++;
+		}
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count,
+		loff_t *ppos)
+{
+	int nbytes = 0;
+	int cnt = 0;
+	int i;
+	int j;
+	struct ipa_flt_tbl *tbl;
+	struct ipa_flt_entry *entry;
+	enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+	struct ipa_rt_tbl *rt_tbl;
+
+	tbl = &ipa_ctx->glob_flt_tbl[ip];
+	mutex_lock(&ipa_ctx->lock);
+	i = 0;
+	list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+		rt_tbl = (struct ipa_rt_tbl *)entry->rule.rt_tbl_hdl;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+				   "ep_idx:global rule_idx:%d act:%d rt_tbl_idx:%d attrib_mask:%08x ",
+				   i, entry->rule.action, rt_tbl->idx,
+				   entry->rule.attrib.attrib_mask);
+		cnt += nbytes;
+		cnt += ipa_attrib_dump(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+				&entry->rule.attrib, ip);
+		i++;
+	}
+
+	for (j = 0; j < IPA_NUM_PIPES; j++) {
+		tbl = &ipa_ctx->flt_tbl[j][ip];
+		i = 0;
+		list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+			rt_tbl = (struct ipa_rt_tbl *)entry->rule.rt_tbl_hdl;
+			nbytes = scnprintf(dbg_buff + cnt,
+					IPA_MAX_MSG_LEN - cnt,
+					"ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d attrib_mask:%08x ",
+					j, i, entry->rule.action, rt_tbl->idx,
+					entry->rule.attrib.attrib_mask);
+			cnt += nbytes;
+			cnt +=
+			   ipa_attrib_dump(dbg_buff + cnt,
+					   IPA_MAX_MSG_LEN - cnt,
+					   &entry->rule.attrib,
+					   ip);
+			i++;
+		}
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+const struct file_operations ipa_gen_reg_ops = {
+	.read = ipa_read_gen_reg,
+};
+
+const struct file_operations ipa_ep_reg_ops = {
+	.read = ipa_read_ep_reg,
+	.write = ipa_write_ep_reg,
+};
+
+const struct file_operations ipa_hdr_ops = {
+	.read = ipa_read_hdr,
+};
+
+const struct file_operations ipa_rt_ops = {
+	.read = ipa_read_rt,
+	.open = ipa_open_dbg,
+};
+
+const struct file_operations ipa_flt_ops = {
+	.read = ipa_read_flt,
+	.open = ipa_open_dbg,
+};
+
+void ipa_debugfs_init(void)
+{
+	const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+	const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+			S_IWUSR | S_IWGRP | S_IWOTH;
+
+	dent = debugfs_create_dir("ipa", 0);
+	if (IS_ERR(dent)) {
+		IPAERR("fail to create folder in debug_fs.\n");
+		return;
+	}
+
+	dfile_gen_reg = debugfs_create_file("gen_reg", read_only_mode, dent, 0,
+			&ipa_gen_reg_ops);
+	if (!dfile_gen_reg || IS_ERR(dfile_gen_reg)) {
+		IPAERR("fail to create file for debug_fs gen_reg\n");
+		goto fail;
+	}
+
+	dfile_ep_reg = debugfs_create_file("ep_reg", read_write_mode, dent, 0,
+			&ipa_ep_reg_ops);
+	if (!dfile_ep_reg || IS_ERR(dfile_ep_reg)) {
+		IPAERR("fail to create file for debug_fs ep_reg\n");
+		goto fail;
+	}
+
+	dfile_hdr = debugfs_create_file("hdr", read_only_mode, dent, 0,
+			&ipa_hdr_ops);
+	if (!dfile_hdr || IS_ERR(dfile_hdr)) {
+		IPAERR("fail to create file for debug_fs hdr\n");
+		goto fail;
+	}
+
+	dfile_ip4_rt = debugfs_create_file("ip4_rt", read_only_mode, dent,
+			(void *)IPA_IP_v4, &ipa_rt_ops);
+	if (!dfile_ip4_rt || IS_ERR(dfile_ip4_rt)) {
+		IPAERR("fail to create file for debug_fs ip4 rt\n");
+		goto fail;
+	}
+
+	dfile_ip6_rt = debugfs_create_file("ip6_rt", read_only_mode, dent,
+			(void *)IPA_IP_v6, &ipa_rt_ops);
+	if (!dfile_ip6_rt || IS_ERR(dfile_ip6_rt)) {
+		IPAERR("fail to create file for debug_fs ip6:w" " rt\n");
+		goto fail;
+	}
+
+	dfile_ip4_flt = debugfs_create_file("ip4_flt", read_only_mode, dent,
+			(void *)IPA_IP_v4, &ipa_flt_ops);
+	if (!dfile_ip4_flt || IS_ERR(dfile_ip4_flt)) {
+		IPAERR("fail to create file for debug_fs ip4 flt\n");
+		goto fail;
+	}
+
+	dfile_ip6_flt = debugfs_create_file("ip6_flt", read_only_mode, dent,
+			(void *)IPA_IP_v6, &ipa_flt_ops);
+	if (!dfile_ip6_flt || IS_ERR(dfile_ip6_flt)) {
+		IPAERR("fail to create file for debug_fs ip6 flt\n");
+		goto fail;
+	}
+
+	return;
+
+fail:
+	debugfs_remove_recursive(dent);
+}
+
+void ipa_debugfs_remove(void)
+{
+	if (IS_ERR(dent)) {
+		IPAERR("ipa_debugfs_remove: folder was not created.\n");
+		return;
+	}
+	debugfs_remove_recursive(dent);
+}
+
+#else /* !CONFIG_DEBUG_FS */
+void ipa_debugfs_init(void) {}
+void ipa_debugfs_remove(void) {}
+#endif
+
diff --git a/drivers/platform/msm/ipa/ipa_dp.c b/drivers/platform/msm/ipa/ipa_dp.c
new file mode 100644
index 0000000..c677a6e
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_dp.c
@@ -0,0 +1,1038 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include "ipa_i.h"
+
+#define list_next_entry(pos, member) \
+	list_entry(pos->member.next, typeof(*pos), member)
+/**
+ * ipa_write_done - this function will be (enevtually) called when a Tx
+ * operation is complete
+ * @work:	work_struct used by the work queue
+ */
+void ipa_write_done(struct work_struct *work)
+{
+	struct ipa_tx_pkt_wrapper *tx_pkt;
+	struct ipa_tx_pkt_wrapper *next_pkt;
+	struct ipa_tx_pkt_wrapper *tx_pkt_expected;
+	unsigned long irq_flags;
+	struct ipa_mem_buffer mult = { 0 };
+	int i;
+	u16 cnt;
+
+	tx_pkt = container_of(work, struct ipa_tx_pkt_wrapper, work);
+	cnt = tx_pkt->cnt;
+	IPADBG("cnt=%d\n", cnt);
+
+	if (unlikely(cnt == 0))
+		WARN_ON(1);
+
+	if (cnt > 1 && cnt != 0xFFFF)
+		mult = tx_pkt->mult;
+
+	for (i = 0; i < cnt; i++) {
+		if (unlikely(tx_pkt == NULL))
+			WARN_ON(1);
+		spin_lock_irqsave(&tx_pkt->sys->spinlock, irq_flags);
+		tx_pkt_expected = list_first_entry(&tx_pkt->sys->head_desc_list,
+						   struct ipa_tx_pkt_wrapper,
+						   link);
+		if (unlikely(tx_pkt != tx_pkt_expected)) {
+			spin_unlock_irqrestore(&tx_pkt->sys->spinlock,
+					irq_flags);
+			WARN_ON(1);
+		}
+		next_pkt = list_next_entry(tx_pkt, link);
+		list_del(&tx_pkt->link);
+		tx_pkt->sys->len--;
+		spin_unlock_irqrestore(&tx_pkt->sys->spinlock, irq_flags);
+		dma_pool_free(ipa_ctx->one_kb_no_straddle_pool, tx_pkt->bounce,
+				tx_pkt->mem.phys_base);
+		if (tx_pkt->callback)
+			tx_pkt->callback(tx_pkt->user1, tx_pkt->user2);
+
+		kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+		tx_pkt = next_pkt;
+	}
+
+	if (mult.phys_base)
+		dma_free_coherent(NULL, mult.size, mult.base, mult.phys_base);
+}
+
+/**
+ * ipa_send_one() - Send a single descriptor
+ * @sys:	system pipe context
+ * @desc:	descriptor to send
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc)
+{
+	struct ipa_tx_pkt_wrapper *tx_pkt;
+	unsigned long irq_flags;
+	int result;
+	u16 sps_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_INT;
+	dma_addr_t dma_address;
+	u16 len;
+
+	tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache, GFP_KERNEL);
+	if (!tx_pkt) {
+		IPAERR("failed to alloc tx wrapper\n");
+		goto fail_mem_alloc;
+	}
+
+	WARN_ON(desc->len > 512);
+
+	/*
+	 * Due to a HW limitation, we need to make sure that the packet does not
+	 * cross a 1KB boundary
+	 */
+	tx_pkt->bounce = dma_pool_alloc(ipa_ctx->one_kb_no_straddle_pool,
+			GFP_KERNEL, &dma_address);
+	if (!tx_pkt->bounce) {
+		dma_address = 0;
+	} else {
+		WARN_ON(!ipa_straddle_boundary
+		       ((u32)dma_address, (u32)dma_address + desc->len - 1,
+			1024));
+		memcpy(tx_pkt->bounce, desc->pyld, desc->len);
+	}
+
+	if (!dma_address) {
+		IPAERR("failed to DMA wrap\n");
+		goto fail_dma_map;
+	}
+
+	INIT_LIST_HEAD(&tx_pkt->link);
+	INIT_WORK(&tx_pkt->work, ipa_write_done);
+	tx_pkt->type = desc->type;
+	tx_pkt->cnt = 1;    /* only 1 desc in this "set" */
+
+	tx_pkt->mem.phys_base = dma_address;
+	tx_pkt->mem.base = desc->pyld;
+	tx_pkt->mem.size = desc->len;
+	tx_pkt->sys = sys;
+	tx_pkt->callback = desc->callback;
+	tx_pkt->user1 = desc->user1;
+	tx_pkt->user2 = desc->user2;
+
+	/*
+	 * Special treatment for immediate commands, where the structure of the
+	 * descriptor is different
+	 */
+	if (desc->type == IPA_IMM_CMD_DESC) {
+		sps_flags |= SPS_IOVEC_FLAG_IMME;
+		len = desc->opcode;
+	} else {
+		len = desc->len;
+	}
+
+	if (desc->type == IPA_IMM_CMD_DESC) {
+		IPADBG("sending cmd=%d pyld_len=%d sps_flags=%x\n",
+				desc->opcode, desc->len, sps_flags);
+		IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
+	}
+
+	spin_lock_irqsave(&sys->spinlock, irq_flags);
+	list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+	sys->len++;
+	result = sps_transfer_one(sys->ep->ep_hdl, dma_address, len, tx_pkt,
+			sps_flags);
+	if (result) {
+		IPAERR("sps_transfer_one failed rc=%d\n", result);
+		goto fail_sps_send;
+	}
+
+	spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+	return 0;
+
+fail_sps_send:
+	list_del(&tx_pkt->link);
+	spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+	dma_pool_free(ipa_ctx->one_kb_no_straddle_pool, tx_pkt->bounce,
+			dma_address);
+fail_dma_map:
+	kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+fail_mem_alloc:
+	return -EFAULT;
+}
+
+/**
+ * ipa_send() - Send multiple descriptors in one HW transaction
+ * @sys: system pipe context
+ * @num_desc: number of packets
+ * @desc: packets to send
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa_send(struct ipa_sys_context *sys, u16 num_desc, struct ipa_desc *desc)
+{
+	struct ipa_tx_pkt_wrapper *tx_pkt;
+	struct ipa_tx_pkt_wrapper *next_pkt;
+	struct sps_transfer transfer = { 0 };
+	struct sps_iovec *iovec;
+	unsigned long irq_flags;
+	dma_addr_t dma_addr;
+	int i;
+	int j;
+	int result;
+	int fail_dma_wrap;
+	uint size = num_desc * sizeof(struct sps_iovec);
+
+	for (i = 0; i < num_desc; i++) {
+		fail_dma_wrap = 0;
+		tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache,
+					   GFP_KERNEL);
+		if (!tx_pkt) {
+			IPAERR("failed to alloc tx wrapper\n");
+			goto failure;
+		}
+		/*
+		 * first desc of set is "special" as it holds the count and
+		 * other info
+		 */
+		if (i == 0) {
+			transfer.user = tx_pkt;
+			transfer.iovec =
+				dma_alloc_coherent(NULL, size, &dma_addr, 0);
+			transfer.iovec_phys = dma_addr;
+			transfer.iovec_count = num_desc;
+			if (!transfer.iovec) {
+				IPAERR("fail alloc DMA mem for sps xfr buff\n");
+				goto failure;
+			}
+
+			tx_pkt->mult.phys_base = dma_addr;
+			tx_pkt->mult.base = transfer.iovec;
+			tx_pkt->mult.size = size;
+			tx_pkt->cnt = num_desc;
+		}
+
+		iovec = &transfer.iovec[i];
+		iovec->flags = 0;
+
+		INIT_LIST_HEAD(&tx_pkt->link);
+		INIT_WORK(&tx_pkt->work, ipa_write_done);
+		tx_pkt->type = desc[i].type;
+
+		tx_pkt->mem.base = desc[i].pyld;
+		tx_pkt->mem.size = desc[i].len;
+
+		WARN_ON(tx_pkt->mem.size > 512);
+
+		/*
+		 * Due to a HW limitation, we need to make sure that the
+		 * packet does not cross a 1KB boundary
+		 */
+		tx_pkt->bounce =
+		   dma_pool_alloc(ipa_ctx->one_kb_no_straddle_pool, GFP_KERNEL,
+				   &tx_pkt->mem.phys_base);
+		if (!tx_pkt->bounce) {
+			tx_pkt->mem.phys_base = 0;
+		} else {
+			WARN_ON(!ipa_straddle_boundary(
+						(u32)tx_pkt->mem.phys_base,
+						(u32)tx_pkt->mem.phys_base +
+						tx_pkt->mem.size - 1, 1024));
+			memcpy(tx_pkt->bounce, tx_pkt->mem.base,
+					tx_pkt->mem.size);
+		}
+
+		if (!tx_pkt->mem.phys_base) {
+			IPAERR("failed to alloc tx wrapper\n");
+			fail_dma_wrap = 1;
+			goto failure;
+		}
+
+		tx_pkt->sys = sys;
+		tx_pkt->callback = desc[i].callback;
+		tx_pkt->user1 = desc[i].user1;
+		tx_pkt->user2 = desc[i].user2;
+
+		iovec->addr = tx_pkt->mem.phys_base;
+		spin_lock_irqsave(&sys->spinlock, irq_flags);
+		list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+		sys->len++;
+		spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+		/*
+		 * Special treatment for immediate commands, where the structure
+		 * of the descriptor is different
+		 */
+		if (desc[i].type == IPA_IMM_CMD_DESC) {
+			iovec->size = desc[i].opcode;
+			iovec->flags |= SPS_IOVEC_FLAG_IMME;
+		} else {
+			iovec->size = desc[i].len;
+		}
+
+		if (i == (num_desc - 1)) {
+			iovec->flags |= (SPS_IOVEC_FLAG_EOT |
+					SPS_IOVEC_FLAG_INT);
+			/* "mark" the last desc */
+			tx_pkt->cnt = 0xFFFF;
+		}
+	}
+
+	result = sps_transfer(sys->ep->ep_hdl, &transfer);
+	if (result) {
+		IPAERR("sps_transfer failed rc=%d\n", result);
+		goto failure;
+	}
+
+	return 0;
+
+failure:
+	tx_pkt = transfer.user;
+	for (j = 0; j < i; j++) {
+		spin_lock_irqsave(&sys->spinlock, irq_flags);
+		next_pkt = list_next_entry(tx_pkt, link);
+		list_del(&tx_pkt->link);
+		spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+		dma_pool_free(ipa_ctx->one_kb_no_straddle_pool, tx_pkt->bounce,
+				tx_pkt->mem.phys_base);
+		kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+		tx_pkt = next_pkt;
+	}
+	if (i < num_desc)
+		/* last desc failed */
+		if (fail_dma_wrap)
+			kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+	if (transfer.iovec_phys)
+		dma_free_coherent(NULL, size, transfer.iovec,
+				  transfer.iovec_phys);
+
+	return -EFAULT;
+}
+
+/**
+ * ipa_cmd_ack - callback function which will be called by SPS driver after an
+ * immediate command is complete.
+ * @user1:	pointer to the descriptor of the transfer
+ * @user2:
+ *
+ * Complete the immediate commands completion object, this will release the
+ * thread which waits on this completion object (ipa_send_cmd())
+ */
+static void ipa_cmd_ack(void *user1, void *user2)
+{
+	struct ipa_desc *desc = (struct ipa_desc *)user1;
+
+	if (!desc)
+		WARN_ON(1);
+	IPADBG("got ack for cmd=%d\n", desc->opcode);
+	complete(&desc->xfer_done);
+}
+
+/**
+ * ipa_send_cmd - send immediate commands
+ * @num_desc:	number of descriptors within the descr struct
+ * @descr:	descriptor structure
+ *
+ * Function will block till command gets ACK from IPA HW, caller needs
+ * to free any resources it allocated after function returns
+ */
+int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr)
+{
+	struct ipa_desc *desc;
+
+	if (num_desc == 1) {
+		init_completion(&descr->xfer_done);
+
+		/* client should not set these */
+		if (descr->callback || descr->user1)
+			WARN_ON(1);
+
+		descr->callback = ipa_cmd_ack;
+		descr->user1 = descr;
+		if (ipa_send_one(&ipa_ctx->sys[IPA_A5_CMD], descr)) {
+			IPAERR("fail to send immediate command\n");
+			return -EFAULT;
+		}
+		wait_for_completion(&descr->xfer_done);
+	} else {
+		desc = &descr[num_desc - 1];
+		init_completion(&desc->xfer_done);
+
+		/* client should not set these */
+		if (desc->callback || desc->user1)
+			WARN_ON(1);
+
+		desc->callback = ipa_cmd_ack;
+		desc->user1 = desc;
+		if (ipa_send(&ipa_ctx->sys[IPA_A5_CMD], num_desc, descr)) {
+			IPAERR("fail to send multiple immediate command set\n");
+			return -EFAULT;
+		}
+		wait_for_completion(&desc->xfer_done);
+	}
+
+	return 0;
+}
+
+/**
+ * ipa_tx_notify() - Callback function which will be called by the SPS driver
+ * after a Tx operation is complete. Called in an interrupt context.
+ * @notify:	SPS driver supplied notification struct
+ */
+static void ipa_tx_notify(struct sps_event_notify *notify)
+{
+	struct ipa_tx_pkt_wrapper *tx_pkt;
+
+	IPADBG("event %d notified\n", notify->event_id);
+
+	switch (notify->event_id) {
+	case SPS_EVENT_EOT:
+		tx_pkt = notify->data.transfer.user;
+		queue_work(ipa_ctx->tx_wq, &tx_pkt->work);
+		break;
+	default:
+		IPAERR("recieved unexpected event id %d\n", notify->event_id);
+	}
+}
+
+/**
+ * ipa_handle_rx_core() - The core functionality of packet reception. This
+ * function is read from multiple code paths.
+ *
+ * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN
+ * endpoint. The function runs as long as there are packets in the pipe.
+ * For each packet:
+ *  - Disconnect the packet from the system pipe linked list
+ *  - Unmap the packets skb, make it non DMAable
+ *  - Free the packet from the cache
+ *  - Prepare a proper skb
+ *  - Call the endpoints notify function, passing the skb in the parameters
+ *  - Replenish the rx cache
+ */
+void ipa_handle_rx_core(void)
+{
+	struct ipa_a5_mux_hdr *mux_hdr;
+	struct ipa_rx_pkt_wrapper *rx_pkt;
+	struct sk_buff *rx_skb;
+	struct sps_iovec iov;
+	unsigned long irq_flags;
+	u16 pull_len;
+	u16 padding;
+	int ret;
+	struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+	struct ipa_ep_context *ep;
+
+	do {
+		ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+		if (ret) {
+			IPAERR("sps_get_iovec failed %d\n", ret);
+			break;
+		}
+
+		/* Break the loop when there are no more packets to receive */
+		if (iov.addr == 0)
+			break;
+
+		spin_lock_irqsave(&sys->spinlock, irq_flags);
+		if (list_empty(&sys->head_desc_list))
+			WARN_ON(1);
+		rx_pkt = list_first_entry(&sys->head_desc_list,
+					  struct ipa_rx_pkt_wrapper, link);
+		if (!rx_pkt)
+			WARN_ON(1);
+		rx_pkt->len = iov.size;
+		sys->len--;
+		list_del(&rx_pkt->link);
+		spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+		IPADBG("--curr_cnt=%d\n", sys->len);
+
+		rx_skb = rx_pkt->skb;
+		dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
+				 DMA_FROM_DEVICE);
+		kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+
+		/*
+		 * make it look like a real skb, "data" was already set at
+		 * alloc time
+		 */
+		rx_skb->tail = rx_skb->data + rx_pkt->len;
+		rx_skb->len = rx_pkt->len;
+		rx_skb->truesize = rx_pkt->len + sizeof(struct sk_buff);
+
+		mux_hdr = (struct ipa_a5_mux_hdr *)rx_skb->data;
+
+		IPADBG("RX pkt len=%d IID=0x%x src=%d, flags=0x%x, meta=0x%x\n",
+		       rx_skb->len, ntohs(mux_hdr->interface_id),
+		       mux_hdr->src_pipe_index,
+		       mux_hdr->flags, ntohl(mux_hdr->metadata));
+
+		IPA_DUMP_BUFF(rx_skb->data, 0, rx_skb->len);
+
+		if (mux_hdr->src_pipe_index >= IPA_NUM_PIPES ||
+		    !ipa_ctx->ep[mux_hdr->src_pipe_index].valid ||
+		    !ipa_ctx->ep[mux_hdr->src_pipe_index].notify) {
+			IPAERR("drop pipe=%d ep_valid=%d notify=%p\n",
+			       mux_hdr->src_pipe_index,
+			       ipa_ctx->ep[mux_hdr->src_pipe_index].valid,
+			       ipa_ctx->ep[mux_hdr->src_pipe_index].notify);
+			dev_kfree_skb_any(rx_skb);
+			ipa_replenish_rx_cache();
+			continue;
+		}
+
+		ep = &ipa_ctx->ep[mux_hdr->src_pipe_index];
+		pull_len = sizeof(struct ipa_a5_mux_hdr);
+
+		/*
+		 * IP packet starts on word boundary
+		 * remove the MUX header and any padding and pass the frame to
+		 * the client which registered a rx callback on the "src pipe"
+		 */
+		padding = ep->cfg.hdr.hdr_len & 0x3;
+		if (padding)
+			pull_len += 4 - padding;
+
+		IPADBG("pulling %d bytes from skb\n", pull_len);
+		skb_pull(rx_skb, pull_len);
+		ep->notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
+		ipa_replenish_rx_cache();
+	} while (1);
+}
+
+/**
+ * ipa_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
+ */
+static void ipa_rx_switch_to_intr_mode(void)
+{
+	int ret;
+	struct ipa_sys_context *sys;
+
+	IPADBG("Enter");
+	if (!ipa_ctx->curr_polling_state) {
+		IPAERR("already in intr mode\n");
+		return;
+	}
+
+	sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+
+	ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
+	if (ret) {
+		IPAERR("sps_get_config() failed %d\n", ret);
+		return;
+	}
+	sys->event.options = SPS_O_EOT;
+	ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
+	if (ret) {
+		IPAERR("sps_register_event() failed %d\n", ret);
+		return;
+	}
+	sys->ep->connect.options =
+		SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+	ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
+	if (ret) {
+		IPAERR("sps_set_config() failed %d\n", ret);
+		return;
+	}
+	ipa_handle_rx_core();
+	ipa_ctx->curr_polling_state = 0;
+}
+
+/**
+ * ipa_rx_switch_to_poll_mode() - Operate the Rx data path in polling mode
+ */
+static void ipa_rx_switch_to_poll_mode(void)
+{
+	int ret;
+	struct ipa_ep_context *ep;
+
+	IPADBG("Enter");
+	ep = ipa_ctx->sys[IPA_A5_LAN_WAN_IN].ep;
+
+	ret = sps_get_config(ep->ep_hdl, &ep->connect);
+	if (ret) {
+		IPAERR("sps_get_config() failed %d\n", ret);
+		return;
+	}
+	ep->connect.options =
+		SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+	ret = sps_set_config(ep->ep_hdl, &ep->connect);
+	if (ret) {
+		IPAERR("sps_set_config() failed %d\n", ret);
+		return;
+	}
+	ipa_ctx->curr_polling_state = 1;
+}
+
+/**
+ * ipa_rx_notify() - Callback function which is called by the SPS driver when a
+ * a packet is received
+ * @notify:	SPS driver supplied notification information
+ *
+ * Called in an interrupt context, therefore the majority of the work is
+ * deffered using a work queue.
+ *
+ * After receiving a packet, the driver goes to polling mode and keeps pulling
+ * packets until the rx buffer is empty, then it goes back to interrupt mode.
+ * This comes to prevent the CPU from handling too many interrupts when the
+ * throughput is high.
+ */
+static void ipa_rx_notify(struct sps_event_notify *notify)
+{
+	struct ipa_rx_pkt_wrapper *rx_pkt;
+
+	IPADBG("event %d notified\n", notify->event_id);
+
+	switch (notify->event_id) {
+	case SPS_EVENT_EOT:
+		if (!ipa_ctx->curr_polling_state) {
+			ipa_rx_switch_to_poll_mode();
+			rx_pkt = notify->data.transfer.user;
+			queue_work(ipa_ctx->rx_wq, &rx_pkt->work);
+		}
+		break;
+	default:
+		IPAERR("recieved unexpected event id %d\n", notify->event_id);
+	}
+}
+
+/**
+ * ipa_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
+ * IPA EP configuration
+ * @sys_in:	[in] input needed to setup BAM pipe and config EP
+ * @clnt_hdl:	[out] client handle
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
+{
+	int ipa_ep_idx;
+	int sys_idx = -1;
+	int result = -EFAULT;
+	dma_addr_t dma_addr;
+
+	if (sys_in == NULL || clnt_hdl == NULL ||
+	    sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
+		IPAERR("bad parm.\n");
+		result = -EINVAL;
+		goto fail_bad_param;
+	}
+
+	ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, sys_in->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("Invalid client.\n");
+		goto fail_bad_param;
+	}
+
+	if (ipa_ctx->ep[ipa_ep_idx].valid == 1) {
+		IPAERR("EP already allocated.\n");
+		goto fail_bad_param;
+	}
+
+	memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+
+	ipa_ctx->ep[ipa_ep_idx].valid = 1;
+	ipa_ctx->ep[ipa_ep_idx].client = sys_in->client;
+
+	if (ipa_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
+		IPAERR("fail to configure EP.\n");
+		goto fail_sps_api;
+	}
+
+	/* Default Config */
+	ipa_ctx->ep[ipa_ep_idx].ep_hdl = sps_alloc_endpoint();
+
+	if (ipa_ctx->ep[ipa_ep_idx].ep_hdl == NULL) {
+		IPAERR("SPS EP allocation failed.\n");
+		goto fail_sps_api;
+	}
+
+	result = sps_get_config(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+			&ipa_ctx->ep[ipa_ep_idx].connect);
+	if (result) {
+		IPAERR("fail to get config.\n");
+		goto fail_mem_alloc;
+	}
+
+	/* Specific Config */
+	if (IPA_CLIENT_IS_CONS(sys_in->client)) {
+		ipa_ctx->ep[ipa_ep_idx].connect.mode = SPS_MODE_SRC;
+		ipa_ctx->ep[ipa_ep_idx].connect.destination =
+			SPS_DEV_HANDLE_MEM;
+		ipa_ctx->ep[ipa_ep_idx].connect.source = ipa_ctx->bam_handle;
+		ipa_ctx->ep[ipa_ep_idx].connect.dest_pipe_index =
+			ipa_ctx->a5_pipe_index++;
+		ipa_ctx->ep[ipa_ep_idx].connect.src_pipe_index = ipa_ep_idx;
+		ipa_ctx->ep[ipa_ep_idx].connect.options =
+			SPS_O_AUTO_ENABLE | SPS_O_EOT | SPS_O_ACK_TRANSFERS;
+		if (ipa_ctx->polling_mode)
+			ipa_ctx->ep[ipa_ep_idx].connect.options |= SPS_O_POLL;
+	} else {
+		ipa_ctx->ep[ipa_ep_idx].connect.mode = SPS_MODE_DEST;
+		ipa_ctx->ep[ipa_ep_idx].connect.source = SPS_DEV_HANDLE_MEM;
+		ipa_ctx->ep[ipa_ep_idx].connect.destination =
+			ipa_ctx->bam_handle;
+		ipa_ctx->ep[ipa_ep_idx].connect.src_pipe_index =
+			ipa_ctx->a5_pipe_index++;
+		ipa_ctx->ep[ipa_ep_idx].connect.dest_pipe_index = ipa_ep_idx;
+		ipa_ctx->ep[ipa_ep_idx].connect.options =
+			SPS_O_AUTO_ENABLE | SPS_O_EOT;
+		if (ipa_ctx->polling_mode)
+			ipa_ctx->ep[ipa_ep_idx].connect.options |=
+				SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+	}
+
+	ipa_ctx->ep[ipa_ep_idx].connect.desc.size = sys_in->desc_fifo_sz;
+	ipa_ctx->ep[ipa_ep_idx].connect.desc.base =
+	   dma_alloc_coherent(NULL, ipa_ctx->ep[ipa_ep_idx].connect.desc.size,
+			   &dma_addr, 0);
+	ipa_ctx->ep[ipa_ep_idx].connect.desc.phys_base = dma_addr;
+	if (ipa_ctx->ep[ipa_ep_idx].connect.desc.base == NULL) {
+		IPAERR("fail to get DMA desc memory.\n");
+		goto fail_mem_alloc;
+	}
+
+	ipa_ctx->ep[ipa_ep_idx].connect.event_thresh = IPA_EVENT_THRESHOLD;
+
+	result = sps_connect(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+			&ipa_ctx->ep[ipa_ep_idx].connect);
+	if (result) {
+		IPAERR("sps_connect fails.\n");
+		goto fail_sps_connect;
+	}
+
+	switch (ipa_ep_idx) {
+	case 1:
+		/* fall through */
+	case 2:
+		/* fall through */
+	case 3:
+		sys_idx = ipa_ep_idx;
+		break;
+	case 15:
+		sys_idx = IPA_A5_WLAN_AMPDU_OUT;
+		break;
+	default:
+		IPAERR("Invalid EP index.\n");
+		result = -EFAULT;
+		goto fail_register_event;
+	}
+
+	if (!ipa_ctx->polling_mode) {
+		if (IPA_CLIENT_IS_CONS(sys_in->client)) {
+			ipa_ctx->sys[sys_idx].event.options = SPS_O_EOT;
+			ipa_ctx->sys[sys_idx].event.mode = SPS_TRIGGER_CALLBACK;
+			ipa_ctx->sys[sys_idx].event.xfer_done = NULL;
+			ipa_ctx->sys[sys_idx].event.callback = ipa_rx_notify;
+			ipa_ctx->sys[sys_idx].event.user =
+				&ipa_ctx->sys[sys_idx];
+			result =
+			   sps_register_event(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+					      &ipa_ctx->sys[sys_idx].event);
+			if (result < 0) {
+				IPAERR("rx register event error %d\n", result);
+				goto fail_register_event;
+			}
+		} else {
+			ipa_ctx->sys[sys_idx].event.options = SPS_O_EOT;
+			ipa_ctx->sys[sys_idx].event.mode = SPS_TRIGGER_CALLBACK;
+			ipa_ctx->sys[sys_idx].event.xfer_done = NULL;
+			ipa_ctx->sys[sys_idx].event.callback = ipa_tx_notify;
+			ipa_ctx->sys[sys_idx].event.user =
+				&ipa_ctx->sys[sys_idx];
+			result =
+			   sps_register_event(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+					      &ipa_ctx->sys[sys_idx].event);
+			if (result < 0) {
+				IPAERR("tx register event error %d\n", result);
+				goto fail_register_event;
+			}
+		}
+	}
+
+	return 0;
+
+fail_register_event:
+	sps_disconnect(ipa_ctx->ep[ipa_ep_idx].ep_hdl);
+fail_sps_connect:
+	dma_free_coherent(NULL, ipa_ctx->ep[ipa_ep_idx].connect.desc.size,
+			  ipa_ctx->ep[ipa_ep_idx].connect.desc.base,
+			  ipa_ctx->ep[ipa_ep_idx].connect.desc.phys_base);
+fail_mem_alloc:
+	sps_free_endpoint(ipa_ctx->ep[ipa_ep_idx].ep_hdl);
+fail_sps_api:
+	memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+fail_bad_param:
+	return result;
+}
+EXPORT_SYMBOL(ipa_setup_sys_pipe);
+
+/**
+ * ipa_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP
+ * @clnt_hdl:	[in] the handle obtained from ipa_setup_sys_pipe
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_teardown_sys_pipe(u32 clnt_hdl)
+{
+	if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	sps_disconnect(ipa_ctx->ep[clnt_hdl].ep_hdl);
+	dma_free_coherent(NULL, ipa_ctx->ep[clnt_hdl].connect.desc.size,
+			  ipa_ctx->ep[clnt_hdl].connect.desc.base,
+			  ipa_ctx->ep[clnt_hdl].connect.desc.phys_base);
+	sps_free_endpoint(ipa_ctx->ep[clnt_hdl].ep_hdl);
+	memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
+	return 0;
+}
+EXPORT_SYMBOL(ipa_teardown_sys_pipe);
+
+/**
+ * ipa_tx_comp() - Callback function which will call the user supplied callback
+ * function to release the skb, or release it on its own if no callback function
+ * was supplied.
+ * @user1
+ * @user2
+ */
+static void ipa_tx_comp(void *user1, void *user2)
+{
+	struct sk_buff *skb = (struct sk_buff *)user1;
+	u32 ep_idx = (u32)user2;
+
+	IPADBG("skb=%p ep=%d\n", skb, ep_idx);
+
+	if (ipa_ctx->ep[ep_idx].notify)
+		ipa_ctx->ep[ep_idx].notify(ipa_ctx->ep[ep_idx].priv,
+				IPA_WRITE_DONE, (unsigned long)skb);
+	else
+		dev_kfree_skb_any(skb);
+}
+
+/**
+ * ipa_tx_dp() - Data-path tx handler
+ * @dst:	[in] which IPA destination to route tx packets to
+ * @skb:	[in] the packet to send
+ * @metadata:	[in] TX packet meta-data
+ *
+ * Data-path tx handler, this is used for both SW data-path which by-passes most
+ * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
+ * dst is a "valid" CONS type, then SW data-path is used. If dst is the
+ * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
+ * is an error. For errors, client needs to free the skb as needed. For success,
+ * IPA driver will later invoke client calback if one was supplied. That
+ * callback should free the skb. If no callback supplied, IPA driver will free
+ * the skb internally
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+		struct ipa_tx_meta *meta)
+{
+	struct ipa_desc desc[2];
+	int ipa_ep_idx;
+	struct ipa_ip_packet_init *cmd;
+
+	memset(&desc, 0, 2 * sizeof(struct ipa_desc));
+
+	ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, dst);
+	if (ipa_ep_idx == -1) {
+		IPAERR("dest EP does not exist.\n");
+		goto fail_gen;
+	}
+
+	if (ipa_ctx->ep[ipa_ep_idx].valid == 0) {
+		IPAERR("dest EP not valid.\n");
+		goto fail_gen;
+	}
+
+	if (IPA_CLIENT_IS_CONS(dst)) {
+		cmd = kzalloc(sizeof(struct ipa_ip_packet_init), GFP_KERNEL);
+		if (!cmd) {
+			IPAERR("failed to alloc immediate command object\n");
+			goto fail_mem_alloc;
+		}
+
+		cmd->destination_pipe_index = ipa_ep_idx;
+		if (meta && meta->mbim_stream_id_valid)
+			cmd->metadata = meta->mbim_stream_id;
+		desc[0].opcode = IPA_IP_PACKET_INIT;
+		desc[0].pyld = cmd;
+		desc[0].len = sizeof(struct ipa_ip_packet_init);
+		desc[0].type = IPA_IMM_CMD_DESC;
+		desc[1].pyld = skb->data;
+		desc[1].len = skb->len;
+		desc[1].type = IPA_DATA_DESC_SKB;
+		desc[1].callback = ipa_tx_comp;
+		desc[1].user1 = skb;
+		desc[1].user2 = (void *)ipa_ep_idx;
+
+		if (ipa_send(&ipa_ctx->sys[IPA_A5_LAN_WAN_OUT], 2, desc)) {
+			IPAERR("fail to send immediate command\n");
+			goto fail_send;
+		}
+	} else if (dst == IPA_CLIENT_A5_WLAN_AMPDU_PROD) {
+		desc[0].pyld = skb->data;
+		desc[0].len = skb->len;
+		desc[0].type = IPA_DATA_DESC_SKB;
+		desc[0].callback = ipa_tx_comp;
+		desc[0].user1 = skb;
+		desc[0].user2 = (void *)ipa_ep_idx;
+
+		if (ipa_send_one(&ipa_ctx->sys[IPA_A5_WLAN_AMPDU_OUT],
+					&desc[0])) {
+			IPAERR("fail to send skb\n");
+			goto fail_gen;
+		}
+	} else {
+		IPAERR("%d PROD is not supported.\n", dst);
+		goto fail_gen;
+	}
+
+	return 0;
+
+fail_send:
+	kfree(cmd);
+fail_mem_alloc:
+fail_gen:
+	return -EFAULT;
+}
+EXPORT_SYMBOL(ipa_tx_dp);
+
+/**
+ * ipa_handle_rx() - handle packet reception. This function is executed in the
+ * context of a work queue.
+ * @work: work struct needed by the work queue
+ *
+ * ipa_handle_rx_core() is run in polling mode. After all packets has been
+ * received, the driver switches back to interrupt mode.
+ */
+void ipa_handle_rx(struct work_struct *work)
+{
+	ipa_handle_rx_core();
+	ipa_rx_switch_to_intr_mode();
+}
+
+/**
+ * ipa_replenish_rx_cache() - Replenish the Rx packets cache.
+ *
+ * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
+ * are IPA_RX_POOL_CEIL buffers in the cache.
+ *   - Allocate a buffer in the cache
+ *   - Initialized the packets link
+ *   - Initialize the packets work struct
+ *   - Allocate the packets socket buffer (skb)
+ *   - Fill the packets skb with data
+ *   - Make the packet DMAable
+ *   - Add the packet to the system pipe linked list
+ *   - Initiate a SPS transfer so that SPS driver will use this packet later.
+ */
+void ipa_replenish_rx_cache(void)
+{
+	void *ptr;
+	struct ipa_rx_pkt_wrapper *rx_pkt;
+	int ret;
+	int rx_len_cached;
+	unsigned long irq_flags;
+	struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+
+	spin_lock_irqsave(&sys->spinlock, irq_flags);
+	rx_len_cached = sys->len;
+	spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+	/* true RX data path is not currently exercised so drop the ceil */
+	while (rx_len_cached < (IPA_RX_POOL_CEIL >> 3)) {
+		rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
+					   GFP_KERNEL);
+		if (!rx_pkt) {
+			IPAERR("failed to alloc rx wrapper\n");
+			return;
+		}
+
+		INIT_LIST_HEAD(&rx_pkt->link);
+		INIT_WORK(&rx_pkt->work, ipa_handle_rx);
+
+		rx_pkt->skb = __dev_alloc_skb(IPA_RX_SKB_SIZE, GFP_KERNEL);
+		if (rx_pkt->skb == NULL) {
+			IPAERR("failed to alloc skb\n");
+			goto fail_skb_alloc;
+		}
+		ptr = skb_put(rx_pkt->skb, IPA_RX_SKB_SIZE);
+		rx_pkt->dma_address = dma_map_single(NULL, ptr,
+						     IPA_RX_SKB_SIZE,
+						     DMA_FROM_DEVICE);
+		if (rx_pkt->dma_address == 0 || rx_pkt->dma_address == ~0) {
+			IPAERR("dma_map_single failure %p for %p\n",
+			       (void *)rx_pkt->dma_address, ptr);
+			goto fail_dma_mapping;
+		}
+
+		spin_lock_irqsave(&sys->spinlock, irq_flags);
+		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+		rx_len_cached = ++sys->len;
+		spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+		ret = sps_transfer_one(sys->ep->ep_hdl, rx_pkt->dma_address,
+				       IPA_RX_SKB_SIZE, rx_pkt,
+				       SPS_IOVEC_FLAG_INT);
+
+		if (ret) {
+			IPAERR("sps_transfer_one failed %d\n", ret);
+			goto fail_sps_transfer;
+		}
+
+		IPADBG("++curr_cnt=%d\n", sys->len);
+	}
+
+	return;
+
+fail_sps_transfer:
+	spin_lock_irqsave(&sys->spinlock, irq_flags);
+	list_del(&rx_pkt->link);
+	--sys->len;
+	spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+	dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
+			 DMA_FROM_DEVICE);
+fail_dma_mapping:
+	dev_kfree_skb_any(rx_pkt->skb);
+fail_skb_alloc:
+	kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+
+	return;
+}
+
+/**
+ * ipa_cleanup_rx() - release RX queue resources
+ *
+ */
+void ipa_cleanup_rx(void)
+{
+	struct ipa_rx_pkt_wrapper *rx_pkt;
+	struct ipa_rx_pkt_wrapper *r;
+	unsigned long irq_flags;
+	struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+
+	spin_lock_irqsave(&sys->spinlock, irq_flags);
+	list_for_each_entry_safe(rx_pkt, r,
+				 &sys->head_desc_list, link) {
+		list_del(&rx_pkt->link);
+		dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
+				 DMA_FROM_DEVICE);
+		dev_kfree_skb_any(rx_pkt->skb);
+		kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+	}
+	spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_flt.c b/drivers/platform/msm/ipa/ipa_flt.c
new file mode 100644
index 0000000..81f3a80
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_flt.c
@@ -0,0 +1,811 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+
+#define IPA_FLT_TABLE_WORD_SIZE			(4)
+#define IPA_FLT_ENTRY_MEMORY_ALLIGNMENT		(0x3)
+#define IPA_FLT_BIT_MASK			(0x1)
+#define IPA_FLT_TABLE_INDEX_NOT_FOUND		(-1)
+#define IPA_FLT_STATUS_OF_ADD_FAILED		(-1)
+#define IPA_FLT_STATUS_OF_DEL_FAILED		(-1)
+
+/**
+ * ipa_generate_flt_hw_rule() - generates the filtering hardware rule
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means
+ *		caller wants to know the size of the rule as seen
+ *		by HW so they did not pass a valid buffer, we will use a
+ *		scratch buffer instead.
+ *		With this scheme we are going to
+ *		generate the rule twice, once to know size using scratch
+ *		buffer and second to write the rule to the actual caller
+ *		supplied buffer which is of required size
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_generate_flt_hw_rule(enum ipa_ip_type ip,
+		struct ipa_flt_entry *entry, u8 *buf)
+{
+	struct ipa_flt_rule_hw_hdr *hdr;
+	const struct ipa_flt_rule *rule =
+		(const struct ipa_flt_rule *)&entry->rule;
+	u16 en_rule = 0;
+	u8 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE];
+	u8 *start;
+
+	memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
+	if (buf == NULL)
+		buf = tmp;
+
+	start = buf;
+	hdr = (struct ipa_flt_rule_hw_hdr *)buf;
+	hdr->u.hdr.action = entry->rule.action;
+	hdr->u.hdr.rt_tbl_idx = entry->rt_tbl->idx;
+	hdr->u.hdr.rsvd = 0;
+	buf += sizeof(struct ipa_flt_rule_hw_hdr);
+
+	if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
+		IPAERR("fail to generate hw rule\n");
+		return -EPERM;
+	}
+
+	IPADBG("en_rule %x\n", en_rule);
+
+	hdr->u.hdr.en_rule = en_rule;
+	ipa_write_32(hdr->u.word, (u8 *)hdr);
+
+	if (entry->hw_len == 0) {
+		entry->hw_len = buf - start;
+	} else if (entry->hw_len != (buf - start)) {
+		IPAERR("hw_len differs b/w passes passed=%x calc=%x\n",
+		       entry->hw_len, (buf - start));
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+/**
+ * ipa_get_flt_hw_tbl_size() - returns the size of HW filtering table
+ * @ip: the ip address family type
+ * @hdr_sz: header size
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_get_flt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz)
+{
+	struct ipa_flt_tbl *tbl;
+	struct ipa_flt_entry *entry;
+	u32 total_sz = 0;
+	u32 rule_set_sz;
+	int i;
+
+	*hdr_sz = 0;
+	tbl = &ipa_ctx->glob_flt_tbl[ip];
+	rule_set_sz = 0;
+	list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+		if (ipa_generate_flt_hw_rule(ip, entry, NULL)) {
+			IPAERR("failed to find HW FLT rule size\n");
+			return -EPERM;
+		}
+		IPADBG("glob ip %d len %d\n", ip, entry->hw_len);
+		rule_set_sz += entry->hw_len;
+	}
+
+	if (rule_set_sz) {
+		tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE;
+		/* this rule-set uses a word in header block */
+		*hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
+		if (!tbl->in_sys) {
+			/* add the terminator */
+			total_sz += (rule_set_sz + IPA_FLT_TABLE_WORD_SIZE);
+			total_sz = (total_sz +
+					IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) &
+					~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+		}
+	}
+
+	for (i = 0; i < IPA_NUM_PIPES; i++) {
+		tbl = &ipa_ctx->flt_tbl[i][ip];
+		rule_set_sz = 0;
+		list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+			if (ipa_generate_flt_hw_rule(ip, entry, NULL)) {
+				IPAERR("failed to find HW FLT rule size\n");
+				return -EPERM;
+			}
+			IPADBG("pipe %d len %d\n", i, entry->hw_len);
+			rule_set_sz += entry->hw_len;
+		}
+
+		if (rule_set_sz) {
+			tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE;
+			/* this rule-set uses a word in header block */
+			*hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
+			if (!tbl->in_sys) {
+				/* add the terminator */
+				total_sz += (rule_set_sz +
+					    IPA_FLT_TABLE_WORD_SIZE);
+				total_sz = (total_sz +
+					IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) &
+					~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+			}
+		}
+	}
+
+	*hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
+	total_sz += *hdr_sz;
+	IPADBG("FLT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip);
+
+	return total_sz;
+}
+
+/**
+ * ipa_generate_flt_hw_tbl() - generates the filtering hardware table
+ * @ip:	[in] the ip address family type
+ * @mem:	[out] buffer to put the filtering table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_generate_flt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem)
+{
+	struct ipa_flt_tbl *tbl;
+	struct ipa_flt_entry *entry;
+	u32 hdr_top = 0;
+	int i;
+	u32 hdr_sz;
+	u32 offset;
+	u8 *hdr;
+	u8 *body;
+	u8 *base;
+	struct ipa_mem_buffer flt_tbl_mem;
+	u8 *ftbl_membody;
+
+	mem->size = ipa_get_flt_hw_tbl_size(ip, &hdr_sz);
+	mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size);
+
+	if (mem->size == 0) {
+		IPAERR("flt tbl empty ip=%d\n", ip);
+		goto error;
+	}
+	mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
+			GFP_KERNEL);
+	if (!mem->base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+		goto error;
+	}
+
+	memset(mem->base, 0, mem->size);
+
+	/* build the flt tbl in the DMA buffer to submit to IPA HW */
+	base = hdr = (u8 *)mem->base;
+	body = base + hdr_sz;
+
+	/* write a dummy header to move cursor */
+	hdr = ipa_write_32(hdr_top, hdr);
+
+	tbl = &ipa_ctx->glob_flt_tbl[ip];
+
+	if (!list_empty(&tbl->head_flt_rule_list)) {
+		hdr_top |= IPA_FLT_BIT_MASK;
+		if (!tbl->in_sys) {
+			offset = body - base;
+			if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) {
+				IPAERR("offset is not word multiple %d\n",
+						offset);
+				goto proc_err;
+			}
+
+			offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+			/* rule is at an offset from base */
+			offset |= IPA_FLT_BIT_MASK;
+			hdr = ipa_write_32(offset, hdr);
+
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_flt_rule_list,
+					link) {
+				if (ipa_generate_flt_hw_rule(ip, entry, body)) {
+					IPAERR("failed to gen HW FLT rule\n");
+					goto proc_err;
+				}
+				body += entry->hw_len;
+			}
+
+			/* write the rule-set terminator */
+			body = ipa_write_32(0, body);
+			if ((u32)body & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)
+				/* advance body to next word boundary */
+				body = body + (IPA_FLT_TABLE_WORD_SIZE -
+					((u32)body &
+					IPA_FLT_ENTRY_MEMORY_ALLIGNMENT));
+		} else {
+			WARN_ON(tbl->sz == 0);
+			/* allocate memory for the flt tbl */
+			flt_tbl_mem.size = tbl->sz;
+			flt_tbl_mem.base =
+			   dma_alloc_coherent(NULL, flt_tbl_mem.size,
+					   &flt_tbl_mem.phys_base, GFP_KERNEL);
+			if (!flt_tbl_mem.base) {
+				IPAERR("fail to alloc DMA buff of size %d\n",
+						flt_tbl_mem.size);
+				WARN_ON(1);
+				goto proc_err;
+			}
+
+			WARN_ON(flt_tbl_mem.phys_base &
+				IPA_FLT_ENTRY_MEMORY_ALLIGNMENT);
+			ftbl_membody = flt_tbl_mem.base;
+			memset(flt_tbl_mem.base, 0, flt_tbl_mem.size);
+			hdr = ipa_write_32(flt_tbl_mem.phys_base, hdr);
+
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_flt_rule_list,
+					link) {
+				if (ipa_generate_flt_hw_rule(ip, entry,
+							ftbl_membody)) {
+					IPAERR("failed to gen HW FLT rule\n");
+					WARN_ON(1);
+				}
+				ftbl_membody += entry->hw_len;
+			}
+
+			/* write the rule-set terminator */
+			ftbl_membody = ipa_write_32(0, ftbl_membody);
+			if (tbl->curr_mem.phys_base) {
+				WARN_ON(tbl->prev_mem.phys_base);
+				tbl->prev_mem = tbl->curr_mem;
+			}
+			tbl->curr_mem = flt_tbl_mem;
+		}
+	}
+
+	for (i = 0; i < IPA_NUM_PIPES; i++) {
+		tbl = &ipa_ctx->flt_tbl[i][ip];
+		if (!list_empty(&tbl->head_flt_rule_list)) {
+			/* pipe "i" is at bit "i+1" */
+			hdr_top |= (1 << (i + 1));
+			if (!tbl->in_sys) {
+				offset = body - base;
+				if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) {
+					IPAERR("ofst is not word multiple %d\n",
+					       offset);
+					goto proc_err;
+				}
+				offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+				/* rule is at an offset from base */
+				offset |= IPA_FLT_BIT_MASK;
+				hdr = ipa_write_32(offset, hdr);
+
+				/* generate the rule-set */
+				list_for_each_entry(entry,
+						&tbl->head_flt_rule_list,
+						link) {
+					if (ipa_generate_flt_hw_rule(ip, entry,
+								body)) {
+						IPAERR("fail gen FLT rule\n");
+						goto proc_err;
+					}
+					body += entry->hw_len;
+				}
+
+				/* write the rule-set terminator */
+				body = ipa_write_32(0, body);
+				if ((u32)body & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)
+					/* advance body to next word boundary */
+					body = body + (IPA_FLT_TABLE_WORD_SIZE -
+						((u32)body &
+					IPA_FLT_ENTRY_MEMORY_ALLIGNMENT));
+			} else {
+				WARN_ON(tbl->sz == 0);
+				/* allocate memory for the flt tbl */
+				flt_tbl_mem.size = tbl->sz;
+				flt_tbl_mem.base =
+				   dma_alloc_coherent(NULL, flt_tbl_mem.size,
+						   &flt_tbl_mem.phys_base,
+						   GFP_KERNEL);
+				if (!flt_tbl_mem.base) {
+					IPAERR("fail alloc DMA buff size %d\n",
+							flt_tbl_mem.size);
+					WARN_ON(1);
+					goto proc_err;
+				}
+
+				WARN_ON(flt_tbl_mem.phys_base &
+				IPA_FLT_ENTRY_MEMORY_ALLIGNMENT);
+
+				ftbl_membody = flt_tbl_mem.base;
+				memset(flt_tbl_mem.base, 0, flt_tbl_mem.size);
+				hdr = ipa_write_32(flt_tbl_mem.phys_base, hdr);
+
+				/* generate the rule-set */
+				list_for_each_entry(entry,
+						&tbl->head_flt_rule_list,
+						link) {
+					if (ipa_generate_flt_hw_rule(ip, entry,
+							ftbl_membody)) {
+						IPAERR("fail gen FLT rule\n");
+						WARN_ON(1);
+					}
+					ftbl_membody += entry->hw_len;
+				}
+
+				/* write the rule-set terminator */
+				ftbl_membody =
+					ipa_write_32(0, ftbl_membody);
+				if (tbl->curr_mem.phys_base) {
+					WARN_ON(tbl->prev_mem.phys_base);
+					tbl->prev_mem = tbl->curr_mem;
+				}
+				tbl->curr_mem = flt_tbl_mem;
+			}
+		}
+	}
+
+	/* now write the hdr_top */
+	ipa_write_32(hdr_top, base);
+
+	return 0;
+proc_err:
+	dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+error:
+
+	return -EPERM;
+}
+
+static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip)
+{
+	struct ipa_flt_tbl *tbl;
+	int i;
+
+	tbl = &ipa_ctx->glob_flt_tbl[ip];
+	if (tbl->prev_mem.phys_base) {
+		IPADBG("reaping glob flt tbl (prev) ip=%d\n", ip);
+		dma_free_coherent(NULL, tbl->prev_mem.size, tbl->prev_mem.base,
+				tbl->prev_mem.phys_base);
+		memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
+	}
+
+	if (list_empty(&tbl->head_flt_rule_list)) {
+		if (tbl->curr_mem.phys_base) {
+			IPADBG("reaping glob flt tbl (curr) ip=%d\n", ip);
+			dma_free_coherent(NULL, tbl->curr_mem.size,
+					tbl->curr_mem.base,
+					tbl->curr_mem.phys_base);
+			memset(&tbl->curr_mem, 0, sizeof(tbl->curr_mem));
+		}
+	}
+
+	for (i = 0; i < IPA_NUM_PIPES; i++) {
+		tbl = &ipa_ctx->flt_tbl[i][ip];
+		if (tbl->prev_mem.phys_base) {
+			IPADBG("reaping flt tbl (prev) pipe=%d ip=%d\n", i, ip);
+			dma_free_coherent(NULL, tbl->prev_mem.size,
+					tbl->prev_mem.base,
+					tbl->prev_mem.phys_base);
+			memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
+		}
+
+		if (list_empty(&tbl->head_flt_rule_list)) {
+			if (tbl->curr_mem.phys_base) {
+				IPADBG("reaping flt tbl (curr) pipe=%d ip=%d\n",
+						i, ip);
+				dma_free_coherent(NULL, tbl->curr_mem.size,
+						tbl->curr_mem.base,
+						tbl->curr_mem.phys_base);
+				memset(&tbl->curr_mem, 0,
+						sizeof(tbl->curr_mem));
+			}
+		}
+	}
+}
+
+static int __ipa_commit_flt(enum ipa_ip_type ip)
+{
+	struct ipa_desc desc = { 0 };
+	struct ipa_mem_buffer *mem;
+	void *cmd;
+	struct ipa_ip_v4_filter_init *v4;
+	struct ipa_ip_v6_filter_init *v6;
+	u16 avail;
+	u16 size;
+
+	mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
+	if (!mem) {
+		IPAERR("failed to alloc memory object\n");
+		goto fail_alloc_mem;
+	}
+
+	if (ip == IPA_IP_v4) {
+		avail = IPA_RAM_V4_FLT_SIZE;
+		size = sizeof(struct ipa_ip_v4_filter_init);
+	} else {
+		avail = IPA_RAM_V6_FLT_SIZE;
+		size = sizeof(struct ipa_ip_v6_filter_init);
+	}
+	cmd = kmalloc(size, GFP_KERNEL);
+	if (!cmd) {
+		IPAERR("failed to alloc immediate command object\n");
+		goto fail_alloc_cmd;
+	}
+
+	if (ipa_generate_flt_hw_tbl(ip, mem)) {
+		IPAERR("fail to generate FLT HW TBL ip %d\n", ip);
+		goto fail_hw_tbl_gen;
+	}
+
+	if (mem->size > avail) {
+		IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
+		goto fail_hw_tbl_gen;
+	}
+
+	if (ip == IPA_IP_v4) {
+		v4 = (struct ipa_ip_v4_filter_init *)cmd;
+		desc.opcode = IPA_IP_V4_FILTER_INIT;
+		v4->ipv4_rules_addr = mem->phys_base;
+		v4->size_ipv4_rules = mem->size;
+		v4->ipv4_addr = IPA_RAM_V4_FLT_OFST;
+	} else {
+		v6 = (struct ipa_ip_v6_filter_init *)cmd;
+		desc.opcode = IPA_IP_V6_FILTER_INIT;
+		v6->ipv6_rules_addr = mem->phys_base;
+		v6->size_ipv6_rules = mem->size;
+		v6->ipv6_addr = IPA_RAM_V6_FLT_OFST;
+	}
+
+	desc.pyld = cmd;
+	desc.len = size;
+	desc.type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+	if (ipa_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		goto fail_send_cmd;
+	}
+
+	__ipa_reap_sys_flt_tbls(ip);
+	dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+	kfree(cmd);
+	kfree(mem);
+
+	return 0;
+
+fail_send_cmd:
+	if (mem->phys_base)
+		dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+fail_hw_tbl_gen:
+	kfree(cmd);
+fail_alloc_cmd:
+	kfree(mem);
+fail_alloc_mem:
+
+	return -EPERM;
+}
+
+static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip,
+			      const struct ipa_flt_rule *rule, u8 add_rear,
+			      u32 *rule_hdl)
+{
+	struct ipa_flt_entry *entry;
+	struct ipa_tree_node *node;
+
+	if (!rule->rt_tbl_hdl) {
+		IPAERR("flt rule does not point to valid RT tbl\n");
+		goto error;
+	}
+
+	if (ipa_search(&ipa_ctx->rt_tbl_hdl_tree, rule->rt_tbl_hdl) == NULL) {
+		IPAERR("RT tbl not found\n");
+		goto error;
+	}
+
+	if (((struct ipa_rt_tbl *)rule->rt_tbl_hdl)->cookie != IPA_COOKIE) {
+		IPAERR("flt rule cookie is invalid\n");
+		goto error;
+	}
+
+	node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
+	if (!node) {
+		IPAERR("failed to alloc tree node object\n");
+		goto error;
+	}
+
+	entry = kmem_cache_zalloc(ipa_ctx->flt_rule_cache, GFP_KERNEL);
+	if (!entry) {
+		IPAERR("failed to alloc FLT rule object\n");
+		goto mem_alloc_fail;
+	}
+	INIT_LIST_HEAD(&entry->link);
+	entry->rule = *rule;
+	entry->cookie = IPA_COOKIE;
+	entry->rt_tbl = (struct ipa_rt_tbl *)rule->rt_tbl_hdl;
+	entry->tbl = tbl;
+	if (add_rear)
+		list_add_tail(&entry->link, &tbl->head_flt_rule_list);
+	else
+		list_add(&entry->link, &tbl->head_flt_rule_list);
+	tbl->rule_cnt++;
+	entry->rt_tbl->ref_cnt++;
+	*rule_hdl = (u32)entry;
+	IPADBG("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
+
+	node->hdl = *rule_hdl;
+	if (ipa_insert(&ipa_ctx->flt_rule_hdl_tree, node)) {
+		IPAERR("failed to add to tree\n");
+		WARN_ON(1);
+	}
+
+	return 0;
+
+mem_alloc_fail:
+	kmem_cache_free(ipa_ctx->tree_node_cache, node);
+error:
+
+	return -EPERM;
+}
+
+static int __ipa_del_flt_rule(u32 rule_hdl)
+{
+	struct ipa_flt_entry *entry = (struct ipa_flt_entry *)rule_hdl;
+	struct ipa_tree_node *node;
+
+	if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
+		IPAERR("bad params\n");
+
+		return -EINVAL;
+	}
+	node = ipa_search(&ipa_ctx->flt_rule_hdl_tree, rule_hdl);
+	if (node == NULL) {
+		IPAERR("lookup failed\n");
+
+		return -EPERM;
+	}
+	list_del(&entry->link);
+	entry->tbl->rule_cnt--;
+	entry->rt_tbl->ref_cnt--;
+	IPADBG("del flt rule rule_cnt=%d\n", entry->tbl->rule_cnt);
+	entry->cookie = 0;
+	kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+
+	/* remove the handle from the database */
+	rb_erase(&node->node, &ipa_ctx->flt_rule_hdl_tree);
+	kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+	return 0;
+}
+
+static int __ipa_add_global_flt_rule(enum ipa_ip_type ip,
+		const struct ipa_flt_rule *rule, u8 add_rear, u32 *rule_hdl)
+{
+	struct ipa_flt_tbl *tbl;
+
+	tbl = &ipa_ctx->glob_flt_tbl[ip];
+	IPADBG("add global flt rule ip=%d\n", ip);
+
+	return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
+}
+
+static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
+				 const struct ipa_flt_rule *rule, u8 add_rear,
+				 u32 *rule_hdl)
+{
+	struct ipa_flt_tbl *tbl;
+	int ipa_ep_idx;
+
+	if (ip >= IPA_IP_MAX || rule == NULL || rule_hdl == NULL ||
+			ep >= IPA_CLIENT_MAX) {
+		IPAERR("bad parms\n");
+
+		return -EINVAL;
+	}
+	ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, ep);
+	if (ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND ||
+				ipa_ctx->ep[ipa_ep_idx].valid == 0) {
+		IPAERR("bad parms\n");
+
+		return -EINVAL;
+	}
+	tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][ip];
+	IPADBG("add ep flt rule ip=%d ep=%d\n", ip, ep);
+
+	return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
+}
+
+/**
+ * ipa_add_flt_rule() - Add the specified filtering rules to SW and optionally
+ * commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
+{
+	int i;
+	int result;
+
+	if (rules == NULL || rules->num_rules == 0 ||
+			rules->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	for (i = 0; i < rules->num_rules; i++) {
+		if (rules->global)
+			result = __ipa_add_global_flt_rule(rules->ip,
+					&rules->rules[i].rule,
+					rules->rules[i].at_rear,
+					&rules->rules[i].flt_rule_hdl);
+		else
+			result = __ipa_add_ep_flt_rule(rules->ip, rules->ep,
+					&rules->rules[i].rule,
+					rules->rules[i].at_rear,
+					&rules->rules[i].flt_rule_hdl);
+		if (result) {
+			IPAERR("failed to add flt rule %d\n", i);
+			rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
+		} else {
+			rules->rules[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (__ipa_commit_flt(rules->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_add_flt_rule);
+
+/**
+ * ipa_del_flt_rule() - Remove the specified filtering rules from SW and
+ * optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
+{
+	int i;
+	int result;
+
+	if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
+			IPAERR("failed to del rt rule %i\n", i);
+			hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit)
+		if (__ipa_commit_flt(hdls->ip)) {
+			mutex_unlock(&ipa_ctx->lock);
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_del_flt_rule);
+
+/**
+ * ipa_commit_flt() - Commit the current SW filtering table of specified type to
+ * IPA HW
+ * @ip:	[in] the family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_commit_flt(enum ipa_ip_type ip)
+{
+	int result;
+
+	mutex_lock(&ipa_ctx->lock);
+
+	if (__ipa_commit_flt(ip)) {
+		result = -EPERM;
+		goto bail;
+	}
+	result = 0;
+
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_commit_flt);
+
+/**
+ * ipa_reset_flt() - Reset the current SW filtering table of specified type
+ * (does not commit to HW)
+ * @ip:	[in] the family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_reset_flt(enum ipa_ip_type ip)
+{
+	struct ipa_flt_tbl *tbl;
+	struct ipa_flt_entry *entry;
+	struct ipa_flt_entry *next;
+	struct ipa_tree_node *node;
+	int i;
+
+	tbl = &ipa_ctx->glob_flt_tbl[ip];
+	mutex_lock(&ipa_ctx->lock);
+	IPADBG("reset flt ip=%d\n", ip);
+	list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list, link) {
+		node = ipa_search(&ipa_ctx->flt_rule_hdl_tree, (u32)entry);
+		if (node == NULL)
+			WARN_ON(1);
+		list_del(&entry->link);
+		entry->tbl->rule_cnt--;
+		entry->rt_tbl->ref_cnt--;
+		entry->cookie = 0;
+		kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+
+		/* remove the handle from the database */
+		rb_erase(&node->node, &ipa_ctx->flt_rule_hdl_tree);
+		kmem_cache_free(ipa_ctx->tree_node_cache, node);
+	}
+
+	for (i = 0; i < IPA_NUM_PIPES; i++) {
+		tbl = &ipa_ctx->flt_tbl[i][ip];
+		list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list,
+				link) {
+			node = ipa_search(&ipa_ctx->flt_rule_hdl_tree,
+					(u32)entry);
+			if (node == NULL)
+				WARN_ON(1);
+			list_del(&entry->link);
+			entry->tbl->rule_cnt--;
+			entry->rt_tbl->ref_cnt--;
+			entry->cookie = 0;
+			kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+
+			/* remove the handle from the database */
+			rb_erase(&node->node, &ipa_ctx->flt_rule_hdl_tree);
+			kmem_cache_free(ipa_ctx->tree_node_cache, node);
+		}
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_reset_flt);
diff --git a/drivers/platform/msm/ipa/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_hdr.c
new file mode 100644
index 0000000..4b9a500
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_hdr.c
@@ -0,0 +1,614 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+
+static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 32, 64 };
+
+/**
+ * ipa_generate_hdr_hw_tbl() - generates the headers table
+ * @mem:	[out] buffer to put the header table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
+{
+	struct ipa_hdr_entry *entry;
+
+	mem->size = ipa_ctx->hdr_tbl.end;
+
+	if (mem->size == 0) {
+		IPAERR("hdr tbl empty\n");
+		return -EPERM;
+	}
+	IPADBG("tbl_sz=%d\n", ipa_ctx->hdr_tbl.end);
+
+	mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
+			GFP_KERNEL);
+	if (!mem->base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+		return -ENOMEM;
+	}
+
+	memset(mem->base, 0, mem->size);
+	list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+			link) {
+		IPADBG("hdr of len %d ofst=%d\n", entry->hdr_len,
+				entry->offset_entry->offset);
+		memcpy(mem->base + entry->offset_entry->offset, entry->hdr,
+				entry->hdr_len);
+	}
+
+	return 0;
+}
+
+/*
+ * __ipa_commit_hdr() commits hdr to hardware
+ * This function needs to be called with a locked mutex.
+ */
+static int __ipa_commit_hdr(void)
+{
+	struct ipa_desc desc = { 0 };
+	struct ipa_mem_buffer *mem;
+	struct ipa_hdr_init_local *cmd;
+	u16 len;
+
+	mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
+	if (!mem) {
+		IPAERR("failed to alloc memory object\n");
+		goto fail_alloc_mem;
+	}
+
+	/* the immediate command param size is same for both local and system */
+	len = sizeof(struct ipa_hdr_init_local);
+
+	/*
+	 * we can use init_local ptr for init_system due to layout of the
+	 * struct
+	 */
+	cmd = kmalloc(len, GFP_KERNEL);
+	if (!cmd) {
+		IPAERR("failed to alloc immediate command object\n");
+		goto fail_alloc_cmd;
+	}
+
+	if (ipa_generate_hdr_hw_tbl(mem)) {
+		IPAERR("fail to generate HDR HW TBL\n");
+		goto fail_hw_tbl_gen;
+	}
+
+	if (ipa_ctx->hdr_tbl_lcl && mem->size > IPA_RAM_HDR_SIZE) {
+		IPAERR("tbl too big, needed %d avail %d\n", mem->size,
+				IPA_RAM_HDR_SIZE);
+		goto fail_hw_tbl_gen;
+	}
+
+	cmd->hdr_table_addr = mem->phys_base;
+	if (ipa_ctx->hdr_tbl_lcl) {
+		cmd->size_hdr_table = mem->size;
+		cmd->hdr_addr = IPA_RAM_HDR_OFST;
+		desc.opcode = IPA_HDR_INIT_LOCAL;
+	} else {
+		desc.opcode = IPA_HDR_INIT_SYSTEM;
+	}
+	desc.pyld = cmd;
+	desc.len = sizeof(struct ipa_hdr_init_local);
+	desc.type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+	if (ipa_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		goto fail_send_cmd;
+	}
+
+	if (ipa_ctx->hdr_tbl_lcl) {
+		dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+	} else {
+		if (ipa_ctx->hdr_mem.phys_base) {
+			dma_free_coherent(NULL, ipa_ctx->hdr_mem.size,
+					  ipa_ctx->hdr_mem.base,
+					  ipa_ctx->hdr_mem.phys_base);
+		}
+		ipa_ctx->hdr_mem = *mem;
+	}
+	kfree(cmd);
+	kfree(mem);
+
+	return 0;
+
+fail_send_cmd:
+	if (mem->phys_base)
+		dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+fail_hw_tbl_gen:
+	kfree(cmd);
+fail_alloc_cmd:
+	kfree(mem);
+fail_alloc_mem:
+
+	return -EPERM;
+}
+
+static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
+{
+	struct ipa_hdr_entry *entry;
+	struct ipa_hdr_offset_entry *offset;
+	struct ipa_tree_node *node;
+	u32 bin;
+	struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
+
+	if (hdr->hdr_len == 0) {
+		IPAERR("bad parm\n");
+		goto error;
+	}
+
+	node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
+	if (!node) {
+		IPAERR("failed to alloc tree node object\n");
+		goto error;
+	}
+
+	entry = kmem_cache_zalloc(ipa_ctx->hdr_cache, GFP_KERNEL);
+	if (!entry) {
+		IPAERR("failed to alloc hdr object\n");
+		goto hdr_alloc_fail;
+	}
+
+	INIT_LIST_HEAD(&entry->link);
+
+	memcpy(entry->hdr, hdr->hdr, hdr->hdr_len);
+	entry->hdr_len = hdr->hdr_len;
+	strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX);
+	entry->is_partial = hdr->is_partial;
+	entry->cookie = IPA_COOKIE;
+
+	if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
+		bin = IPA_HDR_BIN0;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
+		bin = IPA_HDR_BIN1;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2])
+		bin = IPA_HDR_BIN2;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3])
+		bin = IPA_HDR_BIN3;
+	else {
+		IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
+		goto bad_hdr_len;
+	}
+
+	if (list_empty(&htbl->head_free_offset_list[bin])) {
+		offset = kmem_cache_zalloc(ipa_ctx->hdr_offset_cache,
+					   GFP_KERNEL);
+		if (!offset) {
+			IPAERR("failed to alloc hdr offset object\n");
+			goto ofst_alloc_fail;
+		}
+		INIT_LIST_HEAD(&offset->link);
+		/*
+		 * for a first item grow, set the bin and offset which are set
+		 * in stone
+		 */
+		offset->offset = htbl->end;
+		offset->bin = bin;
+		htbl->end += ipa_hdr_bin_sz[bin];
+		list_add(&offset->link,
+				&htbl->head_offset_list[bin]);
+	} else {
+		/* get the first free slot */
+		offset =
+		    list_first_entry(&htbl->head_free_offset_list[bin],
+				     struct ipa_hdr_offset_entry, link);
+		list_move(&offset->link, &htbl->head_offset_list[bin]);
+	}
+
+	entry->offset_entry = offset;
+	list_add(&entry->link, &htbl->head_hdr_entry_list);
+	htbl->hdr_cnt++;
+	IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n", hdr->hdr_len,
+			htbl->hdr_cnt, offset->offset);
+
+	hdr->hdr_hdl = (u32) entry;
+	node->hdl = hdr->hdr_hdl;
+	if (ipa_insert(&ipa_ctx->hdr_hdl_tree, node)) {
+		IPAERR("failed to add to tree\n");
+		WARN_ON(1);
+	}
+
+	return 0;
+
+ofst_alloc_fail:
+	kmem_cache_free(ipa_ctx->hdr_offset_cache, offset);
+bad_hdr_len:
+	entry->cookie = 0;
+	kmem_cache_free(ipa_ctx->hdr_cache, entry);
+hdr_alloc_fail:
+	kmem_cache_free(ipa_ctx->tree_node_cache, node);
+error:
+	return -EPERM;
+}
+
+static int __ipa_del_hdr(u32 hdr_hdl)
+{
+	struct ipa_hdr_entry *entry = (struct ipa_hdr_entry *)hdr_hdl;
+	struct ipa_tree_node *node;
+	struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
+
+	if (!entry || (entry->cookie != IPA_COOKIE) || (entry->ref_cnt != 0)) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+	node = ipa_search(&ipa_ctx->hdr_hdl_tree, hdr_hdl);
+	if (node == NULL) {
+		IPAERR("lookup failed\n");
+		return -EPERM;
+	}
+
+	IPADBG("del hdr of sz=%d hdr_cnt=%d ofst=%d\n", entry->hdr_len,
+			htbl->hdr_cnt, entry->offset_entry->offset);
+
+	/* move the offset entry to appropriate free list */
+	list_move(&entry->offset_entry->link,
+		  &htbl->head_free_offset_list[entry->offset_entry->bin]);
+	list_del(&entry->link);
+	htbl->hdr_cnt--;
+	entry->cookie = 0;
+	kmem_cache_free(ipa_ctx->hdr_cache, entry);
+
+	/* remove the handle from the database */
+	rb_erase(&node->node, &ipa_ctx->hdr_hdl_tree);
+	kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+	return 0;
+}
+
+/**
+ * ipa_add_hdr() - add the specified headers to SW and optionally commit them to
+ * IPA HW
+ * @hdrs:	[inout] set of headers to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs)
+{
+	int i;
+	int result = -EFAULT;
+
+	if (hdrs == NULL || hdrs->num_hdrs == 0) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	for (i = 0; i < hdrs->num_hdrs; i++) {
+		if (__ipa_add_hdr(&hdrs->hdr[i])) {
+			IPAERR("failed to add hdr %d\n", i);
+			hdrs->hdr[i].status = -1;
+		} else {
+			hdrs->hdr[i].status = 0;
+		}
+	}
+
+	if (hdrs->commit) {
+		if (__ipa_commit_hdr()) {
+			result = -EPERM;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return result;
+}
+EXPORT_SYMBOL(ipa_add_hdr);
+
+/**
+ * ipa_del_hdr() - Remove the specified headers from SW and optionally commit them
+ * to IPA HW
+ * @hdls:	[inout] set of headers to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls)
+{
+	int i;
+	int result = -EFAULT;
+
+	if (hdls == NULL || hdls->num_hdls == 0) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa_del_hdr(hdls->hdl[i].hdl)) {
+			IPAERR("failed to del hdr %i\n", i);
+			hdls->hdl[i].status = -1;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit) {
+		if (__ipa_commit_hdr()) {
+			result = -EPERM;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return result;
+}
+EXPORT_SYMBOL(ipa_del_hdr);
+
+/**
+ * ipa_dump_hdr() - prints all the headers in the header table in SW
+ *
+ * Note:	Should not be called from atomic context
+ */
+void ipa_dump_hdr(void)
+{
+	struct ipa_hdr_entry *entry;
+
+	IPADBG("START\n");
+	mutex_lock(&ipa_ctx->lock);
+	list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+			link) {
+		IPADBG("hdr_len=%4d off=%4d bin=%4d\n", entry->hdr_len,
+				entry->offset_entry->offset,
+				entry->offset_entry->bin);
+	}
+	mutex_unlock(&ipa_ctx->lock);
+	IPADBG("END\n");
+}
+
+/**
+ * ipa_commit_hdr() - commit to IPA HW the current header table in SW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_commit_hdr(void)
+{
+	int result = -EFAULT;
+
+	/*
+	 * issue a commit on the routing module since routing rules point to
+	 * header table entries
+	 */
+	if (ipa_commit_rt(IPA_IP_v4))
+		return -EPERM;
+	if (ipa_commit_rt(IPA_IP_v6))
+		return -EPERM;
+
+	mutex_lock(&ipa_ctx->lock);
+	if (__ipa_commit_hdr()) {
+		result = -EPERM;
+		goto bail;
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return result;
+}
+EXPORT_SYMBOL(ipa_commit_hdr);
+
+/**
+ * ipa_reset_hdr() - reset the current header table in SW (does not commit to
+ * HW)
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_reset_hdr(void)
+{
+	struct ipa_hdr_entry *entry;
+	struct ipa_hdr_entry *next;
+	struct ipa_hdr_offset_entry *off_entry;
+	struct ipa_hdr_offset_entry *off_next;
+	struct ipa_tree_node *node;
+	int i;
+
+	/*
+	 * issue a reset on the routing module since routing rules point to
+	 * header table entries
+	 */
+	if (ipa_reset_rt(IPA_IP_v4))
+		IPAERR("fail to reset v4 rt\n");
+	if (ipa_reset_rt(IPA_IP_v6))
+		IPAERR("fail to reset v4 rt\n");
+
+	mutex_lock(&ipa_ctx->lock);
+	IPADBG("reset hdr\n");
+	list_for_each_entry_safe(entry, next,
+			&ipa_ctx->hdr_tbl.head_hdr_entry_list, link) {
+
+		/* do not remove the default exception header */
+		if (!strncmp(entry->name, IPA_DFLT_HDR_NAME,
+					IPA_RESOURCE_NAME_MAX))
+			continue;
+
+		node = ipa_search(&ipa_ctx->hdr_hdl_tree, (u32) entry);
+		if (node == NULL)
+			WARN_ON(1);
+		list_del(&entry->link);
+		entry->cookie = 0;
+		kmem_cache_free(ipa_ctx->hdr_cache, entry);
+
+		/* remove the handle from the database */
+		rb_erase(&node->node, &ipa_ctx->hdr_hdl_tree);
+		kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+	}
+	for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+		list_for_each_entry_safe(off_entry, off_next,
+					 &ipa_ctx->hdr_tbl.head_offset_list[i],
+					 link) {
+
+			/*
+			 * do not remove the default exception header which is
+			 * at offset 0
+			 */
+			if (off_entry->offset == 0)
+				continue;
+
+			list_del(&off_entry->link);
+			kmem_cache_free(ipa_ctx->hdr_offset_cache, off_entry);
+		}
+		list_for_each_entry_safe(off_entry, off_next,
+				&ipa_ctx->hdr_tbl.head_free_offset_list[i],
+				link) {
+			list_del(&off_entry->link);
+			kmem_cache_free(ipa_ctx->hdr_offset_cache, off_entry);
+		}
+	}
+	/* there is one header of size 8 */
+	ipa_ctx->hdr_tbl.end = 8;
+	ipa_ctx->hdr_tbl.hdr_cnt = 1;
+	mutex_unlock(&ipa_ctx->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_reset_hdr);
+
+static struct ipa_hdr_entry *__ipa_find_hdr(const char *name)
+{
+	struct ipa_hdr_entry *entry;
+
+	list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+			link) {
+		if (!strncmp(name, entry->name, IPA_RESOURCE_NAME_MAX))
+			return entry;
+	}
+
+	return NULL;
+}
+
+/**
+ * ipa_get_hdr() - Lookup the specified header resource
+ * @lookup:	[inout] header to lookup and its handle
+ *
+ * lookup the specified header resource and return handle if it exists, if
+ * lookup succeeds the header entry ref cnt is increased
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ *		Caller should call ipa_put_hdr later if this function succeeds
+ */
+int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup)
+{
+	struct ipa_hdr_entry *entry;
+	int result = -1;
+
+	if (lookup == NULL) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+	mutex_lock(&ipa_ctx->lock);
+	entry = __ipa_find_hdr(lookup->name);
+	if (entry) {
+		entry->ref_cnt++;
+		lookup->hdl = (uint32_t) entry;
+		result = 0;
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_get_hdr);
+
+/**
+ * ipa_put_hdr() - Release the specified header handle
+ * @hdr_hdl:	[in] the header handle to release
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_put_hdr(u32 hdr_hdl)
+{
+	struct ipa_hdr_entry *entry = (struct ipa_hdr_entry *)hdr_hdl;
+	struct ipa_tree_node *node;
+	int result = -EFAULT;
+
+	if (entry == NULL || entry->cookie != IPA_COOKIE ||
+			entry->ref_cnt == 0) {
+		IPAERR("bad params\n");
+		return -EINVAL;
+	}
+	node = ipa_search(&ipa_ctx->hdr_hdl_tree, hdr_hdl);
+	if (node == NULL) {
+		IPAERR("lookup failed\n");
+		return -EPERM;
+	}
+	mutex_lock(&ipa_ctx->lock);
+	entry->ref_cnt--;
+	if (entry->ref_cnt == 0) {
+		if (__ipa_del_hdr(hdr_hdl)) {
+			IPAERR("fail to del hdr\n");
+			result = -EFAULT;
+			goto bail;
+		}
+		/* commit for put */
+		if (__ipa_commit_hdr()) {
+			IPAERR("fail to commit hdr\n");
+			result = -EFAULT;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return result;
+}
+EXPORT_SYMBOL(ipa_put_hdr);
+
+/**
+ * ipa_copy_hdr() - Lookup the specified header resource and return a copy of it
+ * @copy:	[inout] header to lookup and its copy
+ *
+ * lookup the specified header resource and return a copy of it (along with its
+ * attributes) if it exists, this would be called for partial headers
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy)
+{
+	struct ipa_hdr_entry *entry;
+	int result = -EFAULT;
+
+	if (copy == NULL) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+	mutex_lock(&ipa_ctx->lock);
+	entry = __ipa_find_hdr(copy->name);
+	if (entry) {
+		memcpy(copy->hdr, entry->hdr, entry->hdr_len);
+		copy->hdr_len = entry->hdr_len;
+		copy->is_partial = entry->is_partial;
+		result = 0;
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_copy_hdr);
+
+
diff --git a/drivers/platform/msm/ipa/ipa_hw_defs.h b/drivers/platform/msm/ipa/ipa_hw_defs.h
new file mode 100644
index 0000000..3131a84
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_hw_defs.h
@@ -0,0 +1,258 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_HW_DEFS_H
+#define _IPA_HW_DEFS_H
+#include <linux/bitops.h>
+
+/* This header defines various HW related data types */
+
+/* immediate command op-codes */
+#define IPA_DECIPH_INIT        (1)
+#define IPA_PPP_FRM_INIT       (2)
+#define IPA_IP_V4_FILTER_INIT  (3)
+#define IPA_IP_V6_FILTER_INIT  (4)
+#define IPA_IP_V4_NAT_INIT     (5)
+#define IPA_IP_V6_NAT_INIT     (6)
+#define IPA_IP_V4_ROUTING_INIT (7)
+#define IPA_IP_V6_ROUTING_INIT (8)
+#define IPA_HDR_INIT_LOCAL     (9)
+#define IPA_HDR_INIT_SYSTEM   (10)
+#define IPA_DECIPH_SETUP      (11)
+#define IPA_INSERT_NAT_RULE   (12)
+#define IPA_DELETE_NAT_RULE   (13)
+#define IPA_NAT_DMA           (14)
+#define IPA_IP_PACKET_TAG     (15)
+#define IPA_IP_PACKET_INIT    (16)
+
+#define IPA_INTERFACE_ID_EXCEPTION         (0)
+#define IPA_INTERFACE_ID_A2_WWAN        (0x10)
+#define IPA_INTERFACE_ID_HSUSB_RMNET1   (0x21)
+#define IPA_INTERFACE_ID_HSUSB_RMNET2   (0x22)
+#define IPA_INTERFACE_ID_HSUSB_RMNET3   (0x23)
+#define IPA_INTERFACE_ID_HSIC_WLAN_WAN  (0x31)
+#define IPA_INTERFACE_ID_HSIC_WLAN_LAN1 (0x32)
+#define IPA_INTERFACE_ID_HSIC_WLAN_LAN2 (0x33)
+#define IPA_INTERFACE_ID_HSIC_RMNET1    (0x41)
+#define IPA_INTERFACE_ID_HSIC_RMNET2    (0x42)
+#define IPA_INTERFACE_ID_HSIC_RMNET3    (0x43)
+#define IPA_INTERFACE_ID_HSIC_RMNET4    (0x44)
+#define IPA_INTERFACE_ID_HSIC_RMNET5    (0x45)
+
+/**
+ * struct ipa_flt_rule_hw_hdr - HW header of IPA filter rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @action: post routing action
+ * @rt_tbl_idx: index in routing table
+ * @rsvd: reserved
+ */
+struct ipa_flt_rule_hw_hdr {
+	union {
+		u32 word;
+		struct {
+			u32 en_rule:16;
+			u32 action:5;
+			u32 rt_tbl_idx:5;
+			u32 rsvd:6;
+		} hdr;
+	} u;
+};
+
+/**
+ * struct ipa_rt_rule_hw_hdr - HW header of IPA routing rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @pipe_dest_idx: destination pipe index
+ * @system: changed from local to system due to HW change
+ * @hdr_offset: header offset
+ */
+struct ipa_rt_rule_hw_hdr {
+	union {
+		u32 word;
+		struct {
+			u32 en_rule:16;
+			u32 pipe_dest_idx:5;
+			u32 system:1;
+			u32 hdr_offset:10;
+		} hdr;
+	} u;
+};
+
+/**
+ * struct ipa_ip_v4_filter_init - IPA_IP_V4_FILTER_INIT command payload
+ * @ipv4_rules_addr: address of ipv4 rules
+ * @size_ipv4_rules: size of the above
+ * @ipv4_addr: ipv4 address
+ * @rsvd: reserved
+ */
+struct ipa_ip_v4_filter_init {
+	u64 ipv4_rules_addr:32;
+	u64 size_ipv4_rules:12;
+	u64 ipv4_addr:16;
+	u64 rsvd:4;
+};
+
+/**
+ * struct ipa_ip_v6_filter_init - IPA_IP_V6_FILTER_INIT command payload
+ * @ipv6_rules_addr: address of ipv6 rules
+ * @size_ipv6_rules: size of the above
+ * @ipv6_addr: ipv6 address
+ */
+struct ipa_ip_v6_filter_init {
+	u64 ipv6_rules_addr:32;
+	u64 size_ipv6_rules:16;
+	u64 ipv6_addr:16;
+};
+
+/**
+ * struct ipa_ip_v4_routing_init - IPA_IP_V4_ROUTING_INIT command payload
+ * @ipv4_rules_addr: address of ipv4 rules
+ * @size_ipv4_rules: size of the above
+ * @ipv4_addr: ipv4 address
+ * @rsvd: reserved
+ */
+struct ipa_ip_v4_routing_init {
+	u64 ipv4_rules_addr:32;
+	u64 size_ipv4_rules:12;
+	u64 ipv4_addr:16;
+	u64 rsvd:4;
+};
+
+/**
+ * struct ipa_ip_v6_routing_init - IPA_IP_V6_ROUTING_INIT command payload
+ * @ipv6_rules_addr: address of ipv6 rules
+ * @size_ipv6_rules: size of the above
+ * @ipv6_addr: ipv6 address
+ */
+struct ipa_ip_v6_routing_init {
+	u64 ipv6_rules_addr:32;
+	u64 size_ipv6_rules:16;
+	u64 ipv6_addr:16;
+};
+
+/**
+ * struct ipa_hdr_init_local - IPA_HDR_INIT_LOCAL command payload
+ * @hdr_table_addr: address of header table
+ * @size_hdr_table: size of the above
+ * @hdr_addr: header address
+ * @rsvd: reserved
+ */
+struct ipa_hdr_init_local {
+	u64 hdr_table_addr:32;
+	u64 size_hdr_table:12;
+	u64 hdr_addr:16;
+	u64 rsvd:4;
+};
+
+/**
+ * struct ipa_hdr_init_system - IPA_HDR_INIT_SYSTEM command payload
+ * @hdr_table_addr: address of header table
+ * @rsvd: reserved
+ */
+struct ipa_hdr_init_system {
+	u64 hdr_table_addr:32;
+	u64 rsvd:32;
+};
+
+#define IPA_A5_MUX_HDR_EXCP_FLAG_IP		BIT(0)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_NAT		BIT(1)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT	BIT(2)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_TAG		BIT(3)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED	BIT(4)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_IHL		BIT(5)
+
+/**
+ * struct ipa_a5_mux_hdr - A5 MUX header definition
+ * @interface_id: interface ID
+ * @src_pipe_index: source pipe index
+ * @flags: flags
+ * @metadata: metadata
+ *
+ * A5 MUX header is in BE, A5 runs in LE. This struct definition
+ * allows A5 SW to correctly parse the header
+ */
+struct ipa_a5_mux_hdr {
+	u16 interface_id;
+	u8 src_pipe_index;
+	u8 flags;
+	u32 metadata;
+};
+
+/**
+ * struct ipa_nat_dma - IPA_NAT_DMA command payload
+ * @table_index: NAT table index
+ * @rsvd1: reserved
+ * @base_addr: base address
+ * @rsvd2: reserved
+ * @offset: offset
+ * @data: metadata
+ * @rsvd3: reserved
+ */
+struct ipa_nat_dma {
+	u64 table_index:3;
+	u64 rsvd1:1;
+	u64 base_addr:2;
+	u64 rsvd2:2;
+	u64 offset:32;
+	u64 data:16;
+	u64 rsvd3:8;
+};
+
+/**
+ * struct ipa_nat_dma - IPA_IP_PACKET_INIT command payload
+ * @destination_pipe_index: destination pipe index
+ * @rsvd1: reserved
+ * @metadata: metadata
+ * @rsvd2: reserved
+ */
+struct ipa_ip_packet_init {
+	u64 destination_pipe_index:5;
+	u64 rsvd1:3;
+	u64 metadata:32;
+	u64 rsvd2:24;
+};
+
+/**
+ * struct ipa_nat_dma - IPA_IP_V4_NAT_INIT command payload
+ * @ipv4_rules_addr: ipv4 rules address
+ * @ipv4_expansion_rules_addr: ipv4 expansion rules address
+ * @index_table_addr: index tables address
+ * @index_table_expansion_addr: index expansion table address
+ * @table_index: index in table
+ * @ipv4_rules_addr_type: ipv4 address type
+ * @ipv4_expansion_rules_addr_type: ipv4 expansion address type
+ * @index_table_addr_type: index table address type
+ * @index_table_expansion_addr_type: index expansion table type
+ * @size_base_tables: size of base tables
+ * @size_expansion_tables: size of expansion tables
+ * @rsvd2: reserved
+ * @public_ip_addr: public IP address
+ */
+struct ipa_ip_v4_nat_init {
+	u64 ipv4_rules_addr:32;
+	u64 ipv4_expansion_rules_addr:32;
+	u64 index_table_addr:32;
+	u64 index_table_expansion_addr:32;
+	u64 table_index:3;
+	u64 rsvd1:1;
+	u64 ipv4_rules_addr_type:1;
+	u64 ipv4_expansion_rules_addr_type:1;
+	u64 index_table_addr_type:1;
+	u64 index_table_expansion_addr_type:1;
+	u64 size_base_tables:12;
+	u64 size_expansion_tables:10;
+	u64 rsvd2:2;
+	u64 public_ip_addr:32;
+};
+
+#endif /* _IPA_HW_DEFS_H */
diff --git a/drivers/platform/msm/ipa/ipa_i.h b/drivers/platform/msm/ipa/ipa_i.h
new file mode 100644
index 0000000..63ef5fb
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_i.h
@@ -0,0 +1,727 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_I_H_
+#define _IPA_I_H_
+
+#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <mach/ipa.h>
+#include <mach/sps.h>
+#include "ipa_hw_defs.h"
+#include "ipa_ram_mmap.h"
+#include "ipa_reg.h"
+
+#define DRV_NAME "ipa"
+#define IPA_COOKIE 0xfacefeed
+
+#define IPA_NUM_PIPES 0x14
+#define IPA_SYS_DESC_FIFO_SZ (0x800)
+
+#ifdef IPA_DEBUG
+#define IPADBG(fmt, args...) \
+	pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+#else
+#define IPADBG(fmt, args...)
+#endif
+
+#define IPAERR(fmt, args...) \
+	pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+
+#define IPA_TOS_EQ			BIT(0)
+#define IPA_PROTOCOL_EQ		BIT(1)
+#define IPA_OFFSET_MEQ32_0		BIT(2)
+#define IPA_OFFSET_MEQ32_1		BIT(3)
+#define IPA_IHL_OFFSET_RANGE16_0	BIT(4)
+#define IPA_IHL_OFFSET_RANGE16_1	BIT(5)
+#define IPA_IHL_OFFSET_EQ_16		BIT(6)
+#define IPA_IHL_OFFSET_EQ_32		BIT(7)
+#define IPA_IHL_OFFSET_MEQ32_0		BIT(8)
+#define IPA_OFFSET_MEQ128_0		BIT(9)
+#define IPA_OFFSET_MEQ128_1		BIT(10)
+#define IPA_TC_EQ			BIT(11)
+#define IPA_FL_EQ			BIT(12)
+#define IPA_IHL_OFFSET_MEQ32_1		BIT(13)
+#define IPA_METADATA_COMPARE		BIT(14)
+#define IPA_IPV4_IS_FRAG		BIT(15)
+
+#define IPA_HDR_BIN0 0
+#define IPA_HDR_BIN1 1
+#define IPA_HDR_BIN2 2
+#define IPA_HDR_BIN3 3
+#define IPA_HDR_BIN_MAX 4
+
+#define IPA_EVENT_THRESHOLD 0x10
+
+#define IPA_RX_POOL_CEIL 24
+#define IPA_RX_SKB_SIZE 2048
+
+#define IPA_DFLT_HDR_NAME "ipa_excp_hdr"
+
+#define IPA_CLIENT_IS_PROD(x) (x >= IPA_CLIENT_PROD && x < IPA_CLIENT_CONS)
+#define IPA_CLIENT_IS_CONS(x) (x >= IPA_CLIENT_CONS && x < IPA_CLIENT_MAX)
+#define IPA_SETFIELD(val, shift, mask) (((val) << (shift)) & (mask))
+
+#define IPA_HW_TABLE_ALIGNMENT(start_ofst) \
+	(((start_ofst) + 127) & ~127)
+#define IPA_RT_FLT_HW_RULE_BUF_SIZE	(128)
+
+/**
+ * enum ipa_sys_pipe - 5 A5-IPA pipes
+ *
+ * 5 A5-IPA pipes (all system mode)
+ */
+enum ipa_sys_pipe {
+	IPA_A5_UNUSED,
+	IPA_A5_CMD,
+	IPA_A5_LAN_WAN_OUT,
+	IPA_A5_LAN_WAN_IN,
+	IPA_A5_WLAN_AMPDU_OUT,
+	IPA_A5_SYS_MAX
+};
+
+/**
+ * enum ipa_operating_mode - IPA operating mode
+ *
+ * IPA operating mode
+ */
+enum ipa_operating_mode {
+	IPA_MODE_USB_DONGLE,
+	IPA_MODE_MSM,
+	IPA_MODE_EXT_APPS,
+	IPA_MODE_MOBILE_AP_WAN,
+	IPA_MODE_MOBILE_AP_WLAN,
+	IPA_MODE_MOBILE_AP_ETH,
+	IPA_MODE_MAX
+};
+
+/**
+ * enum ipa_bridge_dir - direction of the bridge from air interface perspective
+ *
+ * IPA bridge direction
+ */
+enum ipa_bridge_dir {
+	IPA_DL,
+	IPA_UL,
+	IPA_DIR_MAX
+};
+
+/**
+ * struct ipa_mem_buffer - IPA memory buffer
+ * @base: base
+ * @phys_base: physical base address
+ * @size: size of memory buffer
+ */
+struct ipa_mem_buffer {
+	void *base;
+	dma_addr_t phys_base;
+	u32 size;
+};
+
+/**
+ * struct ipa_flt_entry - IPA filtering table entry
+ * @link: entry's link in global filtering enrties list
+ * @rule: filter rule
+ * @cookie: cookie used for validity check
+ * @tbl: filter table
+ * @rt_tbl: routing table
+ * @hw_len: entry's size
+ */
+struct ipa_flt_entry {
+	struct list_head link;
+	struct ipa_flt_rule rule;
+	u32 cookie;
+	struct ipa_flt_tbl *tbl;
+	struct ipa_rt_tbl *rt_tbl;
+	u32 hw_len;
+};
+
+/**
+ * struct ipa_rt_tbl - IPA routing table
+ * @link: table's link in global routing tables list
+ * @head_rt_rule_list: head of routing rules list
+ * @name: routing table name
+ * @idx: routing table index
+ * @rule_cnt: number of rules in routing table
+ * @ref_cnt: reference counter of raouting table
+ * @set: collection of routing tables
+ * @cookie: cookie used for validity check
+ * @in_sys: flag indicating if the table is located in system memory
+ * @sz: the size of the routing table
+ * @curr_mem: current routing tables block in sys memory
+ * @prev_mem: previous routing table block in sys memory
+ */
+struct ipa_rt_tbl {
+	struct list_head link;
+	struct list_head head_rt_rule_list;
+	char name[IPA_RESOURCE_NAME_MAX];
+	u32 idx;
+	u32 rule_cnt;
+	u32 ref_cnt;
+	struct ipa_rt_tbl_set *set;
+	u32 cookie;
+	bool in_sys;
+	u32 sz;
+	struct ipa_mem_buffer curr_mem;
+	struct ipa_mem_buffer prev_mem;
+};
+
+/**
+ * struct ipa_hdr_entry - IPA header table entry
+ * @link: entry's link in global header table entries list
+ * @hdr: the header
+ * @hdr_len: header length
+ * @name: name of header table entry
+ * @is_partial: flag indicating if header table entry is partial
+ * @offset_entry: entry's offset
+ * @cookie: cookie used for validity check
+ * @ref_cnt: reference counter of raouting table
+ */
+struct ipa_hdr_entry {
+	struct list_head link;
+	u8 hdr[IPA_HDR_MAX_SIZE];
+	u32 hdr_len;
+	char name[IPA_RESOURCE_NAME_MAX];
+	u8 is_partial;
+	struct ipa_hdr_offset_entry *offset_entry;
+	u32 cookie;
+	u32 ref_cnt;
+};
+
+/**
+ * struct ipa_hdr_offset_entry - IPA header offset entry
+ * @link: entry's link in global header offset entries list
+ * @offset: the offset
+ * @bin: bin
+ */
+struct ipa_hdr_offset_entry {
+	struct list_head link;
+	u32 offset;
+	u32 bin;
+};
+
+/**
+ * struct ipa_hdr_tbl - IPA header table
+ * @head_hdr_entry_list: header entries list
+ * @head_offset_list: header offset list
+ * @head_free_offset_list: header free offset list
+ * @hdr_cnt: number of headers
+ * @end: the last header index
+ */
+struct ipa_hdr_tbl {
+	struct list_head head_hdr_entry_list;
+	struct list_head head_offset_list[IPA_HDR_BIN_MAX];
+	struct list_head head_free_offset_list[IPA_HDR_BIN_MAX];
+	u32 hdr_cnt;
+	u32 end;
+};
+
+/**
+ * struct ipa_flt_tbl - IPA filter table
+ * @head_flt_rule_list: filter rules list
+ * @rule_cnt: number of filter rules
+ * @in_sys: flag indicating if filter table is located in system memory
+ * @sz: the size of the filter table
+ * @end: the last header index
+ * @curr_mem: current filter tables block in sys memory
+ * @prev_mem: previous filter table block in sys memory
+ */
+struct ipa_flt_tbl {
+	struct list_head head_flt_rule_list;
+	u32 rule_cnt;
+	bool in_sys;
+	u32 sz;
+	struct ipa_mem_buffer curr_mem;
+	struct ipa_mem_buffer prev_mem;
+};
+
+/**
+ * struct ipa_rt_entry - IPA routing table entry
+ * @link: entry's link in global routing table entries list
+ * @rule: routing rule
+ * @cookie: cookie used for validity check
+ * @tbl: routing table
+ * @hdr: header table
+ * @hw_len: the length of the table
+ */
+struct ipa_rt_entry {
+	struct list_head link;
+	struct ipa_rt_rule rule;
+	u32 cookie;
+	struct ipa_rt_tbl *tbl;
+	struct ipa_hdr_entry *hdr;
+	u32 hw_len;
+};
+
+/**
+ * struct ipa_rt_tbl_set - collection of routing tables
+ * @head_rt_tbl_list: collection of routing tables
+ * @tbl_cnt: number of routing tables
+ */
+struct ipa_rt_tbl_set {
+	struct list_head head_rt_tbl_list;
+	u32 tbl_cnt;
+};
+
+/**
+ * struct ipa_tree_node - handle database entry
+ * @node: RB node
+ * @hdl: handle
+ */
+struct ipa_tree_node {
+	struct rb_node node;
+	u32 hdl;
+};
+
+/**
+ * struct ipa_ep_context - IPA end point context
+ * @valid: flag indicating id EP context is valid
+ * @client: EP client type
+ * @ep_hdl: EP's client SPS handle
+ * @cfg: EP cionfiguration
+ * @dst_pipe_index: destination pipe index
+ * @rt_tbl_idx: routing table index
+ * @connect: SPS connect
+ * @priv: user provided information
+ * @notify: user provided CB for EP events notification
+ * @desc_fifo_in_pipe_mem: flag indicating if descriptors FIFO uses pipe memory
+ * @data_fifo_in_pipe_mem: flag indicating if data FIFO uses pipe memory
+ * @desc_fifo_pipe_mem_ofst: descriptors FIFO pipe memory offset
+ * @data_fifo_pipe_mem_ofst: data FIFO pipe memory offset
+ * @desc_fifo_client_allocated: if descriptors FIFO was allocated by a client
+ * @data_fifo_client_allocated: if data FIFO was allocated by a client
+ */
+struct ipa_ep_context {
+	int valid;
+	enum ipa_client_type client;
+	struct sps_pipe *ep_hdl;
+	struct ipa_ep_cfg cfg;
+	u32 dst_pipe_index;
+	u32 rt_tbl_idx;
+	struct sps_connect connect;
+	void *priv;
+	void (*notify)(void *priv, enum ipa_dp_evt_type evt,
+		       unsigned long data);
+	bool desc_fifo_in_pipe_mem;
+	bool data_fifo_in_pipe_mem;
+	u32 desc_fifo_pipe_mem_ofst;
+	u32 data_fifo_pipe_mem_ofst;
+	bool desc_fifo_client_allocated;
+	bool data_fifo_client_allocated;
+};
+
+/**
+ * struct ipa_sys_context - IPA endpoint context for system to BAM pipes
+ * @head_desc_list: header descriptors list
+ * @len: the size of the above list
+ * @spinlock: protects the list and its size
+ * @event: used to request CALLBACK mode from SPS driver
+ * @ep: IPA EP context
+ * @wait_desc_list: used to hold completed Tx packets
+ *
+ * IPA context specific to the system-bam pipes a.k.a LAN IN/OUT and WAN
+ */
+struct ipa_sys_context {
+	struct list_head head_desc_list;
+	u32 len;
+	spinlock_t spinlock;
+	struct sps_register_event event;
+	struct ipa_ep_context *ep;
+	struct list_head wait_desc_list;
+};
+
+/**
+ * enum ipa_desc_type - IPA decriptors type
+ *
+ * IPA decriptors type, IPA supports DD and ICD but no CD
+ */
+enum ipa_desc_type {
+	IPA_DATA_DESC,
+	IPA_DATA_DESC_SKB,
+	IPA_IMM_CMD_DESC
+};
+
+/**
+ * struct ipa_tx_pkt_wrapper - IPA Tx packet wrapper
+ * @type: info for the skb or immediate command param
+ * @mem: memory buffer used by this Tx packet
+ * @work: work struct for current Tx packet
+ * @link: linked to the wrappers on that pipe
+ * @callback: IPA client provided callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @sys: corresponding IPA sys context
+ * @mult: valid only for first of a "multiple" transfer,
+ * holds info for the "sps_transfer" buffer
+ * @cnt: 1 for single transfers,
+ * >1 and <0xFFFF for first of a "multiple" tranfer,
+ * 0xFFFF for last desc, 0 for rest of "multiple' transfer
+ * @bounce: va of bounce buffer
+ */
+struct ipa_tx_pkt_wrapper {
+	enum ipa_desc_type type;
+	struct ipa_mem_buffer mem;
+	struct work_struct work;
+	struct list_head link;
+	void (*callback)(void *user1, void *user2);
+	void *user1;
+	void *user2;
+	struct ipa_sys_context *sys;
+	struct ipa_mem_buffer mult;
+	u16 cnt;
+	void *bounce;
+};
+
+/**
+ * struct ipa_desc - IPA descriptor
+ * @type: skb or immediate command or plain old data
+ * @pyld: points to skb
+ * or kmalloc'ed immediate command parameters/plain old data
+ * @len: length of the pyld
+ * @opcode: for immediate commands
+ * @callback: IPA client provided completion callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @xfer_done: completion object for sync completion
+ */
+struct ipa_desc {
+	enum ipa_desc_type type;
+	void *pyld;
+	u16 len;
+	u16 opcode;
+	void (*callback)(void *user1, void *user2);
+	void *user1;
+	void *user2;
+	struct completion xfer_done;
+};
+
+/**
+ * struct ipa_rx_pkt_wrapper - IPA Rx packet wrapper
+ * @skb: skb
+ * @dma_address: DMA address of this Rx packet
+ * @work: work struct for current Rx packet
+ * @link: linked to the Rx packets on that pipe
+ * @len: how many bytes are copied into skb's flat buffer
+ */
+struct ipa_rx_pkt_wrapper {
+	struct sk_buff *skb;
+	dma_addr_t dma_address;
+	struct work_struct work;
+	struct list_head link;
+	u16 len;
+};
+
+/**
+ * struct ipa_nat_mem - IPA NAT memory description
+ * @class: pointer to the struct class
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ * @dev_num: device number
+ * @vaddr: virtual address
+ * @dma_handle: DMA handle
+ * @size: NAT memory size
+ * @is_mapped: flag indicating if NAT memory is mapped
+ * @is_sys_mem: flag indicating if NAT memory is sys memory
+ * @is_dev_init: flag indicating if NAT device is initialized
+ * @lock: NAT memory mutex
+ */
+struct ipa_nat_mem {
+	struct class *class;
+	struct device *dev;
+	struct cdev cdev;
+	dev_t dev_num;
+	void *vaddr;
+	dma_addr_t dma_handle;
+	size_t size;
+	bool is_mapped;
+	bool is_sys_mem;
+	bool is_dev_init;
+	struct mutex lock;
+};
+
+/**
+ * struct ipa_context - IPA context
+ * @class: pointer to the struct class
+ * @dev_num: device number
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ * @bam_handle: IPA driver's BAM handle
+ * @ep: list of all end points
+ * @flt_tbl: list of all IPA filter tables
+ * @mode: IPA operating mode
+ * @mmio: iomem
+ * @ipa_wrapper_base: IPA wrapper base address
+ * @glob_flt_tbl: global filter table
+ * @hdr_tbl: IPA header table
+ * @rt_tbl_set: list of routing tables each of which is a list of rules
+ * @reap_rt_tbl_set: list of sys mem routing tables waiting to be reaped
+ * @flt_rule_cache: filter rule cache
+ * @rt_rule_cache: routing rule cache
+ * @hdr_cache: header cache
+ * @hdr_offset_cache: header offset cache
+ * @rt_tbl_cache: routing table cache
+ * @tx_pkt_wrapper_cache: Tx packets cache
+ * @rx_pkt_wrapper_cache: Rx packets cache
+ * @tree_node_cache: tree nodes cache
+ * @rt_idx_bitmap: routing table index bitmap
+ * @lock: this does NOT protect the linked lists within ipa_sys_context
+ * @sys: IPA sys context for system-bam pipes
+ * @rx_wq: Rx packets work queue
+ * @tx_wq: Tx packets work queue
+ * @smem_sz: shared memory size
+ * @hdr_hdl_tree: header handles tree
+ * @rt_rule_hdl_tree: routing rule handles tree
+ * @rt_tbl_hdl_tree: routing table handles tree
+ * @flt_rule_hdl_tree: filtering rule handles tree
+ * @nat_mem: NAT memory
+ * @excp_hdr_hdl: exception header handle
+ * @dflt_v4_rt_rule_hdl: default v4 routing rule handle
+ * @dflt_v6_rt_rule_hdl: default v6 routing rule handle
+ * @polling_mode: 1 - pure polling mode; 0 - interrupt+polling mode
+ * @aggregation_type: aggregation type used on USB client endpoint
+ * @aggregation_byte_limit: aggregation byte limit used on USB client endpoint
+ * @aggregation_time_limit: aggregation time limit used on USB client endpoint
+ * @curr_polling_state: current polling state
+ * @poll_work: polling work
+ * @hdr_tbl_lcl: where hdr tbl resides 1-local, 0-system
+ * @hdr_mem: header memory
+ * @ip4_rt_tbl_lcl: where ip4 rt tables reside 1-local; 0-system
+ * @ip6_rt_tbl_lcl: where ip6 rt tables reside 1-local; 0-system
+ * @ip4_flt_tbl_lcl: where ip4 flt tables reside 1-local; 0-system
+ * @ip6_flt_tbl_lcl: where ip6 flt tables reside 1-local; 0-system
+ * @empty_rt_tbl_mem: empty routing tables memory
+ * @pipe_mem_pool: pipe memory pool
+ * @one_kb_no_straddle_pool: one kb no straddle pool
+ *
+ * IPA context - holds all relevant info about IPA driver and its state
+ */
+struct ipa_context {
+	struct class *class;
+	dev_t dev_num;
+	struct device *dev;
+	struct cdev cdev;
+	u32 bam_handle;
+	struct ipa_ep_context ep[IPA_NUM_PIPES];
+	struct ipa_flt_tbl flt_tbl[IPA_NUM_PIPES][IPA_IP_MAX];
+	enum ipa_operating_mode mode;
+	void __iomem *mmio;
+	u32 ipa_wrapper_base;
+	struct ipa_flt_tbl glob_flt_tbl[IPA_IP_MAX];
+	struct ipa_hdr_tbl hdr_tbl;
+	struct ipa_rt_tbl_set rt_tbl_set[IPA_IP_MAX];
+	struct ipa_rt_tbl_set reap_rt_tbl_set[IPA_IP_MAX];
+	struct kmem_cache *flt_rule_cache;
+	struct kmem_cache *rt_rule_cache;
+	struct kmem_cache *hdr_cache;
+	struct kmem_cache *hdr_offset_cache;
+	struct kmem_cache *rt_tbl_cache;
+	struct kmem_cache *tx_pkt_wrapper_cache;
+	struct kmem_cache *rx_pkt_wrapper_cache;
+	struct kmem_cache *tree_node_cache;
+	unsigned long rt_idx_bitmap[IPA_IP_MAX];
+	struct mutex lock;
+	struct ipa_sys_context sys[IPA_A5_SYS_MAX];
+	struct workqueue_struct *rx_wq;
+	struct workqueue_struct *tx_wq;
+	u16 smem_sz;
+	struct rb_root hdr_hdl_tree;
+	struct rb_root rt_rule_hdl_tree;
+	struct rb_root rt_tbl_hdl_tree;
+	struct rb_root flt_rule_hdl_tree;
+	struct ipa_nat_mem nat_mem;
+	u32 excp_hdr_hdl;
+	u32 dflt_v4_rt_rule_hdl;
+	u32 dflt_v6_rt_rule_hdl;
+	bool polling_mode;
+	uint aggregation_type;
+	uint aggregation_byte_limit;
+	uint aggregation_time_limit;
+	uint curr_polling_state;
+	struct delayed_work poll_work;
+	bool hdr_tbl_lcl;
+	struct ipa_mem_buffer hdr_mem;
+	bool ip4_rt_tbl_lcl;
+	bool ip6_rt_tbl_lcl;
+	bool ip4_flt_tbl_lcl;
+	bool ip6_flt_tbl_lcl;
+	struct ipa_mem_buffer empty_rt_tbl_mem;
+	struct gen_pool *pipe_mem_pool;
+	struct dma_pool *one_kb_no_straddle_pool;
+	atomic_t ipa_active_clients;
+	u32 clnt_hdl_cmd;
+	u32 clnt_hdl_data_in;
+	u32 clnt_hdl_data_out;
+	u8 a5_pipe_index;
+};
+
+/**
+ * struct ipa_route - IPA route
+ * @route_dis: route disable
+ * @route_def_pipe: route default pipe
+ * @route_def_hdr_table: route default header table
+ * @route_def_hdr_ofst: route default header offset table
+ */
+struct ipa_route {
+	u32 route_dis;
+	u32 route_def_pipe;
+	u32 route_def_hdr_table;
+	u32 route_def_hdr_ofst;
+};
+
+/**
+ * enum ipa_pipe_mem_type - IPA pipe memory type
+ * @IPA_SPS_PIPE_MEM: Default, SPS dedicated pipe memory
+ * @IPA_PRIVATE_MEM: IPA's private memory
+ * @IPA_SYSTEM_MEM: System RAM, requires allocation
+ */
+enum ipa_pipe_mem_type {
+	IPA_SPS_PIPE_MEM = 0,
+	IPA_PRIVATE_MEM  = 1,
+	IPA_SYSTEM_MEM   = 2,
+};
+
+/**
+ * enum a2_mux_pipe_direction - IPA-A2 pipe direction
+ */
+enum a2_mux_pipe_direction {
+	A2_TO_IPA = 0,
+	IPA_TO_A2 = 1
+};
+
+/**
+ * struct a2_mux_pipe_connection - A2 MUX pipe connection
+ * @src_phy_addr: source physical address
+ * @src_pipe_index: source pipe index
+ * @dst_phy_addr: destination physical address
+ * @dst_pipe_index: destination pipe index
+ * @mem_type: pipe memory type
+ * @data_fifo_base_offset: data FIFO base offset
+ * @data_fifo_size: data FIFO size
+ * @desc_fifo_base_offset: descriptors FIFO base offset
+ * @desc_fifo_size: descriptors FIFO size
+ */
+struct a2_mux_pipe_connection {
+	int			src_phy_addr;
+	int			src_pipe_index;
+	int			dst_phy_addr;
+	int			dst_pipe_index;
+	enum ipa_pipe_mem_type	mem_type;
+	int			data_fifo_base_offset;
+	int			data_fifo_size;
+	int			desc_fifo_base_offset;
+	int			desc_fifo_size;
+};
+
+extern struct ipa_context *ipa_ctx;
+
+int ipa_get_a2_mux_pipe_info(enum a2_mux_pipe_direction pipe_dir,
+				struct a2_mux_pipe_connection *pipe_connect);
+void rmnet_bridge_get_client_handles(u32 *producer_handle,
+		u32 *consumer_handle);
+int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc);
+int ipa_send(struct ipa_sys_context *sys, u16 num_desc, struct ipa_desc *desc);
+int ipa_get_ep_mapping(enum ipa_operating_mode mode,
+		       enum ipa_client_type client);
+int ipa_generate_hw_rule(enum ipa_ip_type ip,
+			 const struct ipa_rule_attrib *attrib,
+			 u8 **buf,
+			 u16 *en_rule);
+u8 *ipa_write_32(u32 w, u8 *dest);
+u8 *ipa_write_16(u16 hw, u8 *dest);
+u8 *ipa_write_8(u8 b, u8 *dest);
+u8 *ipa_pad_to_32(u8 *dest);
+int ipa_init_hw(void);
+struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name);
+void ipa_dump(void);
+int ipa_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem);
+int ipa_generate_rt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem);
+int ipa_generate_flt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem);
+void ipa_debugfs_init(void);
+void ipa_debugfs_remove(void);
+
+/*
+ * below functions read from/write to IPA local memory a.k.a. device memory.
+ * the order of the arguments is deliberately different from the ipa_write*
+ * functions which operate on system memory
+ */
+void ipa_write_dev_8(u8 val, u16 ofst_ipa_sram);
+void ipa_write_dev_16(u16 val, u16 ofst_ipa_sram);
+void ipa_write_dev_32(u32 val, u16 ofst_ipa_sram);
+unsigned int ipa_read_dev_8(u16 ofst_ipa_sram);
+unsigned int ipa_read_dev_16(u16 ofst_ipa_sram);
+unsigned int ipa_read_dev_32(u16 ofst_ipa_sram);
+void ipa_write_dev_8rep(u16 ofst_ipa_sram, const void *buf,
+		unsigned long count);
+void ipa_write_dev_16rep(u16 ofst_ipa_sram, const void *buf,
+		unsigned long count);
+void ipa_write_dev_32rep(u16 ofst_ipa_sram, const void *buf,
+		unsigned long count);
+void ipa_read_dev_8rep(u16 ofst_ipa_sram, void *buf, unsigned long count);
+void ipa_read_dev_16rep(u16 ofst_ipa_sram, void *buf, unsigned long count);
+void ipa_read_dev_32rep(u16 ofst_ipa_sram, void *buf, unsigned long count);
+void ipa_memset_dev(u16 ofst_ipa_sram, u8 value, unsigned int count);
+void ipa_memcpy_from_dev(void *dest, u16 ofst_ipa_sram, unsigned int count);
+void ipa_memcpy_to_dev(u16 ofst_ipa_sram, void *source, unsigned int count);
+
+int ipa_insert(struct rb_root *root, struct ipa_tree_node *data);
+struct ipa_tree_node *ipa_search(struct rb_root *root, u32 hdl);
+void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size);
+
+#ifdef IPA_DEBUG
+#define IPA_DUMP_BUFF(base, phy_base, size) \
+	ipa_dump_buff_internal(base, phy_base, size)
+#else
+#define IPA_DUMP_BUFF(base, phy_base, size)
+#endif
+
+int ipa_cfg_route(struct ipa_route *route);
+int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr);
+void ipa_replenish_rx_cache(void);
+void ipa_cleanup_rx(void);
+int ipa_cfg_filter(u32 disable);
+void ipa_write_done(struct work_struct *work);
+void ipa_handle_rx(struct work_struct *work);
+void ipa_handle_rx_core(void);
+int ipa_pipe_mem_init(u32 start_ofst, u32 size);
+int ipa_pipe_mem_alloc(u32 *ofst, u32 size);
+int ipa_pipe_mem_free(u32 ofst, u32 size);
+int ipa_straddle_boundary(u32 start, u32 end, u32 boundary);
+struct ipa_context *ipa_get_ctx(void);
+void ipa_enable_clks(void);
+void ipa_disable_clks(void);
+
+static inline u32 ipa_read_reg(void *base, u32 offset)
+{
+	u32 val = ioread32(base + offset);
+	IPADBG("0x%x(va) read reg 0x%x r_val 0x%x.\n",
+		(u32)base, offset, val);
+	return val;
+}
+
+static inline void ipa_write_reg(void *base, u32 offset, u32 val)
+{
+	iowrite32(val, base + offset);
+	IPADBG("0x%x(va) write reg 0x%x w_val 0x%x.\n",
+		(u32)base, offset, val);
+}
+
+int ipa_bridge_init(void);
+void ipa_bridge_cleanup(void);
+int ipa_bridge_setup(enum ipa_bridge_dir dir);
+int ipa_bridge_teardown(enum ipa_bridge_dir dir);
+
+#endif /* _IPA_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_nat.c b/drivers/platform/msm/ipa/ipa_nat.c
new file mode 100644
index 0000000..c13c53a
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_nat.c
@@ -0,0 +1,466 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include "ipa_i.h"
+
+#define IPA_NAT_PHYS_MEM_OFFSET  0
+#define IPA_NAT_PHYS_MEM_SIZE  IPA_RAM_NAT_SIZE
+
+#define IPA_NAT_SYSTEM_MEMORY  0
+#define IPA_NAT_SHARED_MEMORY  1
+
+static int ipa_nat_vma_fault_remap(
+	 struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	IPADBG("\n");
+	vmf->page = NULL;
+
+	return VM_FAULT_SIGBUS;
+}
+
+/* VMA related file operations functions */
+static struct vm_operations_struct ipa_nat_remap_vm_ops = {
+	.fault = ipa_nat_vma_fault_remap,
+};
+
+static int ipa_nat_open(struct inode *inode, struct file *filp)
+{
+	struct ipa_nat_mem *nat_ctx;
+	IPADBG("\n");
+	nat_ctx = container_of(inode->i_cdev, struct ipa_nat_mem, cdev);
+	filp->private_data = nat_ctx;
+	IPADBG("return\n");
+	return 0;
+}
+
+static int ipa_nat_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	unsigned long vsize = vma->vm_end - vma->vm_start;
+	struct ipa_nat_mem *nat_ctx = (struct ipa_nat_mem *)filp->private_data;
+	unsigned long phys_addr;
+	int result;
+
+	mutex_lock(&nat_ctx->lock);
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	if (nat_ctx->is_sys_mem) {
+		IPADBG("Mapping system memory\n");
+		if (nat_ctx->is_mapped) {
+			IPAERR("mapping already exists, only 1 supported\n");
+			result = -EINVAL;
+			goto bail;
+		}
+		IPADBG("map sz=0x%x\n", nat_ctx->size);
+		result =
+			dma_mmap_coherent(
+				 NULL, vma,
+				 nat_ctx->vaddr, nat_ctx->dma_handle,
+				 nat_ctx->size);
+
+		if (result) {
+			IPAERR("unable to map memory. Err:%d\n", result);
+			goto bail;
+		}
+	} else {
+		IPADBG("Mapping shared(local) memory\n");
+		IPADBG("map sz=0x%lx\n", vsize);
+		phys_addr = ipa_ctx->ipa_wrapper_base + IPA_REG_BASE_OFST +
+			IPA_SRAM_DIRECT_ACCESS_n_OFST(IPA_NAT_PHYS_MEM_OFFSET);
+
+		if (remap_pfn_range(
+			 vma, vma->vm_start,
+			 phys_addr >> PAGE_SHIFT, vsize, vma->vm_page_prot)) {
+			IPAERR("remap failed\n");
+			result = -EAGAIN;
+			goto bail;
+		}
+
+	}
+	nat_ctx->is_mapped = true;
+	vma->vm_ops = &ipa_nat_remap_vm_ops;
+	IPADBG("return\n");
+	result = 0;
+bail:
+	mutex_unlock(&nat_ctx->lock);
+	return result;
+}
+
+static const struct file_operations ipa_nat_fops = {
+	.owner = THIS_MODULE,
+	.open = ipa_nat_open,
+	.mmap = ipa_nat_mmap
+};
+
+/**
+ * allocate_nat_device() - Allocates memory for the NAT device
+ * @mem:	[in/out] memory parameters
+ *
+ * Called by NAT client driver to allocate memory for the NAT entries. Based on
+ * the request size either shared or system memory will be used.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
+{
+	struct ipa_nat_mem *nat_ctx = &(ipa_ctx->nat_mem);
+	int gfp_flags = GFP_KERNEL | __GFP_ZERO;
+	int result;
+
+	IPADBG("passed memory size %d\n", mem->size);
+
+	mutex_lock(&nat_ctx->lock);
+	if (mem->size <= 0 || !strlen(mem->dev_name)
+			|| nat_ctx->is_dev_init == true) {
+		IPADBG("Invalid Parameters or device is already init\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (mem->size > IPA_NAT_PHYS_MEM_SIZE) {
+		IPADBG("Allocating system memory\n");
+		nat_ctx->is_sys_mem = true;
+		nat_ctx->vaddr =
+		   dma_alloc_coherent(NULL, mem->size, &nat_ctx->dma_handle,
+				       gfp_flags);
+		if (nat_ctx->vaddr == NULL) {
+			IPAERR("memory alloc failed\n");
+			result = -ENOMEM;
+			goto bail;
+		}
+		nat_ctx->size = mem->size;
+	} else {
+		IPADBG("using shared(local) memory\n");
+		nat_ctx->is_sys_mem = false;
+	}
+
+	nat_ctx->class = class_create(THIS_MODULE, mem->dev_name);
+	if (IS_ERR(nat_ctx->class)) {
+		IPAERR("unable to create the class\n");
+		result = -ENODEV;
+		goto vaddr_alloc_fail;
+	}
+	result = alloc_chrdev_region(&nat_ctx->dev_num,
+					0,
+					1,
+					mem->dev_name);
+	if (result) {
+		IPAERR("alloc_chrdev_region err.\n");
+		result = -ENODEV;
+		goto alloc_chrdev_region_fail;
+	}
+
+	nat_ctx->dev =
+	   device_create(nat_ctx->class, NULL, nat_ctx->dev_num, nat_ctx,
+			 mem->dev_name);
+
+	if (IS_ERR(nat_ctx->dev)) {
+		IPAERR("device_create err:%ld\n", PTR_ERR(nat_ctx->dev));
+		result = -ENODEV;
+		goto device_create_fail;
+	}
+
+	cdev_init(&nat_ctx->cdev, &ipa_nat_fops);
+	nat_ctx->cdev.owner = THIS_MODULE;
+	nat_ctx->cdev.ops = &ipa_nat_fops;
+
+	result = cdev_add(&nat_ctx->cdev, nat_ctx->dev_num, 1);
+	if (result) {
+		IPAERR("cdev_add err=%d\n", -result);
+		goto cdev_add_fail;
+	}
+	nat_ctx->is_dev_init = true;
+	IPADBG("IPA NAT driver init successfully\n");
+	result = 0;
+	goto bail;
+
+cdev_add_fail:
+	device_destroy(nat_ctx->class, nat_ctx->dev_num);
+device_create_fail:
+	unregister_chrdev_region(nat_ctx->dev_num, 1);
+alloc_chrdev_region_fail:
+	class_destroy(nat_ctx->class);
+vaddr_alloc_fail:
+	if (nat_ctx->vaddr) {
+		IPADBG("Releasing system memory\n");
+		dma_free_coherent(
+			 NULL, nat_ctx->size,
+			 nat_ctx->vaddr, nat_ctx->dma_handle);
+		nat_ctx->vaddr = NULL;
+		nat_ctx->dma_handle = 0;
+		nat_ctx->size = 0;
+	}
+bail:
+	mutex_unlock(&nat_ctx->lock);
+
+	return result;
+}
+
+/* IOCTL function handlers */
+/**
+ * ipa_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW
+ * @init:	[in] initialization command attributes
+ *
+ * Called by NAT client driver to post IP_V4_NAT_INIT command to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
+{
+	struct ipa_desc desc = { 0 };
+	struct ipa_ip_v4_nat_init *cmd;
+	u16 size = sizeof(struct ipa_ip_v4_nat_init);
+	int result;
+
+	IPADBG("\n");
+	if (init->tbl_index < 0 || init->table_entries <= 0) {
+		IPADBG("Table index or entries is zero\n");
+		result = -EPERM;
+		goto bail;
+	}
+	cmd = kmalloc(size, GFP_KERNEL);
+	if (!cmd) {
+		IPAERR("Failed to alloc immediate command object\n");
+		result = -ENOMEM;
+		goto bail;
+	}
+	if (ipa_ctx->nat_mem.vaddr) {
+		IPADBG("using system memory for nat table\n");
+		cmd->ipv4_rules_addr_type = IPA_NAT_SYSTEM_MEMORY;
+		cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SYSTEM_MEMORY;
+		cmd->index_table_addr_type = IPA_NAT_SYSTEM_MEMORY;
+		cmd->index_table_expansion_addr_type = IPA_NAT_SYSTEM_MEMORY;
+
+		cmd->ipv4_rules_addr =
+			ipa_ctx->nat_mem.dma_handle + init->ipv4_rules_offset;
+		IPADBG("ipv4_rules_offset:0x%x\n", init->ipv4_rules_offset);
+
+		cmd->ipv4_expansion_rules_addr =
+		   ipa_ctx->nat_mem.dma_handle + init->expn_rules_offset;
+		IPADBG("expn_rules_offset:0x%x\n", init->expn_rules_offset);
+
+		cmd->index_table_addr =
+			ipa_ctx->nat_mem.dma_handle + init->index_offset;
+		IPADBG("index_offset:0x%x\n", init->index_offset);
+
+		cmd->index_table_expansion_addr =
+		   ipa_ctx->nat_mem.dma_handle + init->index_expn_offset;
+		IPADBG("index_expn_offset:0x%x\n", init->index_expn_offset);
+	} else {
+		IPADBG("using shared(local) memory for nat table\n");
+		cmd->ipv4_rules_addr_type = IPA_NAT_SHARED_MEMORY;
+		cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SHARED_MEMORY;
+		cmd->index_table_addr_type = IPA_NAT_SHARED_MEMORY;
+		cmd->index_table_expansion_addr_type = IPA_NAT_SHARED_MEMORY;
+
+		cmd->ipv4_rules_addr =
+			init->ipv4_rules_offset + IPA_RAM_NAT_OFST;
+
+		cmd->ipv4_expansion_rules_addr =
+			init->expn_rules_offset + IPA_RAM_NAT_OFST;
+
+		cmd->index_table_addr = init->index_offset + IPA_RAM_NAT_OFST;
+
+		cmd->index_table_expansion_addr =
+			init->index_expn_offset + IPA_RAM_NAT_OFST;
+	}
+	cmd->table_index = init->tbl_index;
+	IPADBG("Table index:0x%x\n", cmd->table_index);
+	cmd->size_base_tables = init->table_entries;
+	IPADBG("Base Table size:0x%x\n", cmd->size_base_tables);
+	cmd->size_expansion_tables = init->expn_table_entries;
+	IPADBG("Expansion Table size:0x%x\n", cmd->size_expansion_tables);
+	cmd->public_ip_addr = init->ip_addr;
+	IPADBG("Public ip address:0x%x\n", cmd->public_ip_addr);
+	desc.opcode = IPA_IP_V4_NAT_INIT;
+	desc.type = IPA_IMM_CMD_DESC;
+	desc.callback = NULL;
+	desc.user1 = NULL;
+	desc.user2 = NULL;
+	desc.pyld = (void *)cmd;
+	desc.len = size;
+	IPADBG("posting v4 init command\n");
+	if (ipa_send_cmd(1, &desc)) {
+		IPAERR("Fail to send immediate command\n");
+		result = -EPERM;
+		goto free_cmd;
+	}
+
+	IPADBG("return\n");
+	result = 0;
+free_cmd:
+	kfree(cmd);
+bail:
+	return result;
+}
+
+/**
+ * ipa_nat_dma_cmd() - Post NAT_DMA command to IPA HW
+ * @dma:	[in] initialization command attributes
+ *
+ * Called by NAT client driver to post NAT_DMA command to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+	struct ipa_nat_dma *cmd = NULL;
+	struct ipa_desc *desc = NULL;
+	u16 size = 0, cnt = 0;
+	int ret = 0;
+
+	IPADBG("\n");
+	if (dma->entries <= 0) {
+		IPADBG("Invalid number of commands\n");
+		ret = -EPERM;
+		goto bail;
+	}
+	size = sizeof(struct ipa_desc) * dma->entries;
+	desc = kmalloc(size, GFP_KERNEL);
+	if (desc == NULL) {
+		IPAERR("Failed to alloc memory\n");
+		ret = -ENOMEM;
+		goto bail;
+	}
+	size = sizeof(struct ipa_nat_dma) * dma->entries;
+	cmd = kmalloc(size, GFP_KERNEL);
+	if (cmd == NULL) {
+		IPAERR("Failed to alloc memory\n");
+		ret = -ENOMEM;
+		goto bail;
+	}
+	for (cnt = 0; cnt < dma->entries; cnt++) {
+		cmd[cnt].table_index = dma->dma[cnt].table_index;
+		cmd[cnt].base_addr = dma->dma[cnt].base_addr;
+		cmd[cnt].offset = dma->dma[cnt].offset;
+		cmd[cnt].data = dma->dma[cnt].data;
+		desc[cnt].type = IPA_IMM_CMD_DESC;
+		desc[cnt].opcode = IPA_NAT_DMA;
+		desc[cnt].callback = NULL;
+		desc[cnt].user1 = NULL;
+
+		desc[cnt].user2 = NULL;
+
+		desc[cnt].len = sizeof(struct ipa_nat_dma);
+		desc[cnt].pyld = (void *)&cmd[cnt];
+	}
+	IPADBG("posting dma command with entries %d\n", dma->entries);
+	ret = ipa_send_cmd(dma->entries, desc);
+	if (ret == -EPERM)
+		IPAERR("Fail to send immediate command\n");
+
+bail:
+	kfree(cmd);
+	kfree(desc);
+
+	return ret;
+}
+
+/**
+ * ipa_nat_free_mem_and_device() - free the NAT memory and remove the device
+ * @nat_ctx:	[in] the IPA NAT memory to free
+ *
+ * Called by NAT client driver to free the NAT memory and remove the device
+ */
+void ipa_nat_free_mem_and_device(struct ipa_nat_mem *nat_ctx)
+{
+	IPADBG("\n");
+	mutex_lock(&nat_ctx->lock);
+
+	if (nat_ctx->is_sys_mem) {
+		IPADBG("freeing the dma memory\n");
+		dma_free_coherent(
+			 NULL, nat_ctx->size,
+			 nat_ctx->vaddr, nat_ctx->dma_handle);
+		nat_ctx->size = 0;
+		nat_ctx->vaddr = NULL;
+	}
+	nat_ctx->is_mapped = false;
+	nat_ctx->is_sys_mem = false;
+	cdev_del(&nat_ctx->cdev);
+	device_destroy(nat_ctx->class, nat_ctx->dev_num);
+	unregister_chrdev_region(nat_ctx->dev_num, 1);
+	class_destroy(nat_ctx->class);
+	nat_ctx->is_dev_init = false;
+
+	mutex_unlock(&nat_ctx->lock);
+	IPADBG("return\n");
+	return;
+}
+
+/**
+ * ipa_nat_del_cmd() - Delete a NAT table
+ * @del:	[in] delete table table table parameters
+ *
+ * Called by NAT client driver to delete the nat table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
+{
+	struct ipa_desc desc = { 0 };
+	struct ipa_ip_v4_nat_init *cmd;
+	u16 size = sizeof(struct ipa_ip_v4_nat_init);
+	u8 mem_type = IPA_NAT_SHARED_MEMORY;
+	u32 base_addr = IPA_NAT_PHYS_MEM_OFFSET;
+	int result;
+
+	IPADBG("\n");
+	if (del->table_index < 0 || del->public_ip_addr == 0) {
+		IPADBG("Bad Parameter\n");
+		result = -EPERM;
+		goto bail;
+	}
+	cmd = kmalloc(size, GFP_KERNEL);
+	if (cmd == NULL) {
+		IPAERR("Failed to alloc immediate command object\n");
+		result = -ENOMEM;
+		goto bail;
+	}
+	cmd->table_index = del->table_index;
+	cmd->ipv4_rules_addr = base_addr;
+	cmd->ipv4_rules_addr_type = mem_type;
+	cmd->ipv4_expansion_rules_addr = base_addr;
+	cmd->ipv4_expansion_rules_addr_type = mem_type;
+	cmd->index_table_addr = base_addr;
+	cmd->index_table_addr_type = mem_type;
+	cmd->index_table_expansion_addr = base_addr;
+	cmd->index_table_expansion_addr_type = mem_type;
+	cmd->size_base_tables = 0;
+	cmd->size_expansion_tables = 0;
+	cmd->public_ip_addr = del->public_ip_addr;
+
+	desc.opcode = IPA_IP_V4_NAT_INIT;
+	desc.type = IPA_IMM_CMD_DESC;
+	desc.callback = NULL;
+	desc.user1 = NULL;
+	desc.user2 = NULL;
+	desc.pyld = (void *)cmd;
+	desc.len = size;
+	if (ipa_send_cmd(1, &desc)) {
+		IPAERR("Fail to send immediate command\n");
+		result = -EPERM;
+		goto free_mem;
+	}
+
+	ipa_nat_free_mem_and_device(&ipa_ctx->nat_mem);
+	IPADBG("return\n");
+	result = 0;
+free_mem:
+	kfree(cmd);
+bail:
+	return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_ram_mmap.h b/drivers/platform/msm/ipa/ipa_ram_mmap.h
new file mode 100644
index 0000000..000718b
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_ram_mmap.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RAM_MMAP_H_
+#define _IPA_RAM_MMAP_H_
+
+/*
+ * This header defines the memory map of the IPA RAM (not all 8K is available
+ * for SW use) the first 2K are set aside for NAT
+ */
+
+#define IPA_RAM_NAT_OFST    0
+#define IPA_RAM_NAT_SIZE    2048
+#define IPA_RAM_HDR_OFST    2048
+#define IPA_RAM_HDR_SIZE    256
+#define IPA_RAM_V4_FLT_OFST (IPA_RAM_HDR_OFST + IPA_RAM_HDR_SIZE)
+#define IPA_RAM_V4_FLT_SIZE 1024
+#define IPA_RAM_V4_RT_OFST  (IPA_RAM_V4_FLT_OFST + IPA_RAM_V4_FLT_SIZE)
+#define IPA_RAM_V4_RT_SIZE  1024
+#define IPA_RAM_V6_FLT_OFST (IPA_RAM_V4_RT_OFST + IPA_RAM_V4_RT_SIZE)
+#define IPA_RAM_V6_FLT_SIZE 1024
+#define IPA_RAM_V6_RT_OFST  (IPA_RAM_V6_FLT_OFST + IPA_RAM_V6_FLT_SIZE)
+#define IPA_RAM_V6_RT_SIZE  1024
+#define IPA_RAM_END_OFST    (IPA_RAM_V6_RT_OFST + IPA_RAM_V6_RT_SIZE)
+
+#endif /* _IPA_RAM_MMAP_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_reg.h b/drivers/platform/msm/ipa/ipa_reg.h
new file mode 100644
index 0000000..61913b6
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_reg.h
@@ -0,0 +1,223 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __IPA_REG_H__
+#define __IPA_REG_H__
+
+/*
+ * IPA's BAM specific registers
+ */
+
+#define IPA_BAM_REG_BASE_OFST 0x00004000
+
+#define IPA_BAM_CNFG_BITS_OFST 0x7c
+#define IPA_BAM_REMAP_SIZE (0x1000)
+
+/*
+ * IPA's core specific regtisters
+ */
+
+#define IPA_REG_BASE_OFST 0x00020000
+
+#define IPA_COMP_HW_VERSION_OFST 0x00000030
+#define IPA_COMP_HW_VERSION_RMSK 0xffffffff
+#define IPA_COMP_HW_VERSION_MAJOR_BMSK 0xff000000
+#define IPA_COMP_HW_VERSION_MAJOR_SHFT 0x18
+#define IPA_COMP_HW_VERSION_MINOR_BMSK 0xff0000
+#define IPA_COMP_HW_VERSION_MINOR_SHFT 0x10
+#define IPA_COMP_HW_VERSION_STEP_BMSK 0xffff
+#define IPA_COMP_HW_VERSION_STEP_SHFT 0x0
+
+#define IPA_VERSION_OFST 0x00000034
+#define IPA_VERSION_RMSK 0xffffffff
+#define IPA_VERSION_IPA_R_REV_BMSK 0xff000000
+#define IPA_VERSION_IPA_R_REV_SHFT 0x18
+#define IPA_VERSION_IPA_Q_REV_BMSK 0xff0000
+#define IPA_VERSION_IPA_Q_REV_SHFT 0x10
+#define IPA_VERSION_IPA_P_REV_BMSK 0xff00
+#define IPA_VERSION_IPA_P_REV_SHFT 0x8
+#define IPA_VERSION_IPA_ECO_REV_BMSK 0xff
+#define IPA_VERSION_IPA_ECO_REV_SHFT 0x0
+
+#define IPA_COMP_CFG_OFST 0x00000038
+#define IPA_COMP_CFG_RMSK 0x1
+#define IPA_COMP_CFG_ENABLE_BMSK 0x1
+#define IPA_COMP_CFG_ENABLE_SHFT 0x0
+
+#define IPA_COMP_SW_RESET_OFST 0x0000003c
+#define IPA_COMP_SW_RESET_RMSK 0x1
+#define IPA_COMP_SW_RESET_SW_RESET_BMSK 0x1
+#define IPA_COMP_SW_RESET_SW_RESET_SHFT 0x0
+
+#define IPA_CLKON_CFG_OFST 0x00000040
+#define IPA_CLKON_CFG_RMSK 0xf
+#define IPA_CLKON_CFG_CGC_OPEN_MISC_BMSK 0x8
+#define IPA_CLKON_CFG_CGC_OPEN_MISC_SHFT 0x3
+#define IPA_CLKON_CFG_CGC_OPEN_TX_BMSK 0x4
+#define IPA_CLKON_CFG_CGC_OPEN_TX_SHFT 0x2
+#define IPA_CLKON_CFG_CGC_OPEN_PROC_BMSK 0x2
+#define IPA_CLKON_CFG_CGC_OPEN_PROC_SHFT 0x1
+#define IPA_CLKON_CFG_CGC_OPEN_RX_BMSK 0x1
+#define IPA_CLKON_CFG_CGC_OPEN_RX_SHFT 0x0
+
+#define IPA_HEAD_OF_LINE_BLOCK_EN_OFST 0x00000044
+#define IPA_HEAD_OF_LINE_BLOCK_EN_RMSK 0x1
+#define IPA_HEAD_OF_LINE_BLOCK_EN_EN_BMSK 0x1
+#define IPA_HEAD_OF_LINE_BLOCK_EN_EN_SHFT 0x0
+
+#define IPA_HEAD_OF_LINE_BLOCK_TIMER_OFST 0x00000048
+#define IPA_HEAD_OF_LINE_BLOCK_TIMER_RMSK 0x1ff
+#define IPA_HEAD_OF_LINE_BLOCK_TIMER_TIMER_BMSK 0x1ff
+#define IPA_HEAD_OF_LINE_BLOCK_TIMER_TIMER_SHFT 0x0
+
+#define IPA_ROUTE_OFST 0x0000004c
+#define IPA_ROUTE_RMSK 0x1ffff
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0x40
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6
+#define IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e
+#define IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1
+#define IPA_ROUTE_ROUTE_DIS_BMSK 0x1
+#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0
+
+#define IPA_FILTER_OFST 0x00000050
+#define IPA_FILTER_RMSK 0x1
+#define IPA_FILTER_FILTER_EN_BMSK 0x1
+#define IPA_FILTER_FILTER_EN_SHFT 0x0
+
+#define IPA_MASTER_PRIORITY_OFST 0x00000054
+#define IPA_MASTER_PRIORITY_RMSK 0xffffffff
+#define IPA_MASTER_PRIORITY_MASTER_7_WR_BMSK 0xc0000000
+#define IPA_MASTER_PRIORITY_MASTER_7_WR_SHFT 0x1e
+#define IPA_MASTER_PRIORITY_MASTER_7_RD_BMSK 0x30000000
+#define IPA_MASTER_PRIORITY_MASTER_7_RD_SHFT 0x1c
+#define IPA_MASTER_PRIORITY_MASTER_6_WR_BMSK 0xc000000
+#define IPA_MASTER_PRIORITY_MASTER_6_WR_SHFT 0x1a
+#define IPA_MASTER_PRIORITY_MASTER_6_RD_BMSK 0x3000000
+#define IPA_MASTER_PRIORITY_MASTER_6_RD_SHFT 0x18
+#define IPA_MASTER_PRIORITY_MASTER_5_WR_BMSK 0xc00000
+#define IPA_MASTER_PRIORITY_MASTER_5_WR_SHFT 0x16
+#define IPA_MASTER_PRIORITY_MASTER_5_RD_BMSK 0x300000
+#define IPA_MASTER_PRIORITY_MASTER_5_RD_SHFT 0x14
+#define IPA_MASTER_PRIORITY_MASTER_4_WR_BMSK 0xc0000
+#define IPA_MASTER_PRIORITY_MASTER_4_WR_SHFT 0x12
+#define IPA_MASTER_PRIORITY_MASTER_4_RD_BMSK 0x30000
+#define IPA_MASTER_PRIORITY_MASTER_4_RD_SHFT 0x10
+#define IPA_MASTER_PRIORITY_MASTER_3_WR_BMSK 0xc000
+#define IPA_MASTER_PRIORITY_MASTER_3_WR_SHFT 0xe
+#define IPA_MASTER_PRIORITY_MASTER_3_RD_BMSK 0x3000
+#define IPA_MASTER_PRIORITY_MASTER_3_RD_SHFT 0xc
+#define IPA_MASTER_PRIORITY_MASTER_2_WR_BMSK 0xc00
+#define IPA_MASTER_PRIORITY_MASTER_2_WR_SHFT 0xa
+#define IPA_MASTER_PRIORITY_MASTER_2_RD_BMSK 0x300
+#define IPA_MASTER_PRIORITY_MASTER_2_RD_SHFT 0x8
+#define IPA_MASTER_PRIORITY_MASTER_1_WR_BMSK 0xc0
+#define IPA_MASTER_PRIORITY_MASTER_1_WR_SHFT 0x6
+#define IPA_MASTER_PRIORITY_MASTER_1_RD_BMSK 0x30
+#define IPA_MASTER_PRIORITY_MASTER_1_RD_SHFT 0x4
+#define IPA_MASTER_PRIORITY_MASTER_0_WR_BMSK 0xc
+#define IPA_MASTER_PRIORITY_MASTER_0_WR_SHFT 0x2
+#define IPA_MASTER_PRIORITY_MASTER_0_RD_BMSK 0x3
+#define IPA_MASTER_PRIORITY_MASTER_0_RD_SHFT 0x0
+
+#define IPA_SHARED_MEM_SIZE_OFST 0x00000058
+#define IPA_SHARED_MEM_SIZE_RMSK 0x1fff
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK 0x1fff
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT 0x0
+
+#define IPA_NAT_TIMER_OFST 0x0000005c
+#define IPA_NAT_TIMER_RMSK 0xffffff
+#define IPA_NAT_TIMER_NAT_TIMER_BMSK 0xffffff
+#define IPA_NAT_TIMER_NAT_TIMER_SHFT 0x0
+
+#define IPA_NAT_TIMER_RESET_OFST 0x00000060
+#define IPA_NAT_TIMER_RESET_RMSK 0x1
+#define IPA_NAT_TIMER_RESET_NAT_TIMER_RESET_BMSK 0x1
+#define IPA_NAT_TIMER_RESET_NAT_TIMER_RESET_SHFT 0x0
+
+#define IPA_ENDP_INIT_NAT_n_OFST(n) (0x00000080 + 0x4 * (n))
+#define IPA_ENDP_INIT_NAT_n_RMSK 0x3
+#define IPA_ENDP_INIT_NAT_n_MAXn 19
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK 0x3
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT 0x0
+
+#define IPA_ENDP_INIT_HDR_n_OFST(n) (0x000000e0 + 0x4 * (n))
+#define IPA_ENDP_INIT_HDR_n_RMSK 0x7ffffff
+#define IPA_ENDP_INIT_HDR_n_MAXn 19
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK 0x4000000
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT 0x1a
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK 0x3f00000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT 0x14
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK 0x1f80
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT 0x7
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK 0x40
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT 0x6
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK 0x3f
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT 0x0
+
+#define IPA_ENDP_INIT_MODE_n_OFST(n) (0x00000140 + 0x4 * (n))
+#define IPA_ENDP_INIT_MODE_n_RMSK 0x7f
+#define IPA_ENDP_INIT_MODE_n_MAXn 19
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK 0x7c
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT 0x2
+#define IPA_ENDP_INIT_MODE_n_MODE_BMSK 0x3
+#define IPA_ENDP_INIT_MODE_n_MODE_SHFT 0x0
+
+#define IPA_ENDP_INIT_AGGR_n_OFST(n) (0x000001a0 + 0x4 * (n))
+#define IPA_ENDP_INIT_AGGR_n_RMSK 0x7fff
+#define IPA_ENDP_INIT_AGGR_n_MAXn 19
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK 0x7c00
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT 0xa
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK 0x3e0
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT 0x5
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK 0x1c
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT 0x2
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK 0x3
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT 0x0
+
+#define IPA_ENDP_INIT_ROUTE_n_OFST(n) (0x00000200 + 0x4 * (n))
+#define IPA_ENDP_INIT_ROUTE_n_RMSK 0x1f
+#define IPA_ENDP_INIT_ROUTE_n_MAXn 19
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK 0x1f
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT 0x0
+
+#define IPA_AGGREGATION_SPARE_REG_1_OFST 0x00002090
+#define IPA_AGGREGATION_SPARE_REG_1_RMSK 0xffffffff
+#define IPA_AGGREGATION_SPARE_REG_1_GENERAL_CONFIG_BMSK 0xffffffff
+#define IPA_AGGREGATION_SPARE_REG_1_GENERAL_CONFIG_SHFT 0x0
+
+#define IPA_AGGREGATION_SPARE_REG_2_OFST 0x00002094
+#define IPA_AGGREGATION_SPARE_REG_2_RMSK 0xffffffff
+#define IPA_AGGREGATION_SPARE_REG_2_GENERAL_CONFIG_BMSK 0xffffffff
+#define IPA_AGGREGATION_SPARE_REG_2_GENERAL_CONFIG_SHFT 0x0
+
+#define IPA_AGGREGATION_MODE_MSK 0x1
+#define IPA_AGGREGATION_MODE_SHFT 31
+#define IPA_AGGREGATION_MODE_BMSK 0x7fffffff
+#define IPA_AGGREGATION_QCNCM_SIG0_SHFT 16
+#define IPA_AGGREGATION_QCNCM_SIG1_SHFT 8
+#define IPA_AGGREGATION_QCNCM_SIG_BMSK 0xff000000
+#define IPA_AGGREGATION_SINGLE_NDP_MSK 0x1
+#define IPA_AGGREGATION_SINGLE_NDP_BMSK 0xfffffffe
+
+#define IPA_SRAM_DIRECT_ACCESS_n_OFST(n) (0x00004000 + 0x4 * (n))
+#define IPA_SRAM_DIRECT_ACCESS_n_RMSK 0xffffffff
+#define IPA_SRAM_DIRECT_ACCESS_n_MAXn 2047
+#define IPA_SRAM_DIRECT_ACCESS_n_DATA_WORD_BMSK 0xffffffff
+#define IPA_SRAM_DIRECT_ACCESS_n_DATA_WORD_SHFT 0x0
+
+#endif /* __IPA_REG_H__ */
diff --git a/drivers/platform/msm/ipa/ipa_rt.c b/drivers/platform/msm/ipa/ipa_rt.c
new file mode 100644
index 0000000..c69e1fb
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rt.c
@@ -0,0 +1,964 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include "ipa_i.h"
+
+#define IPA_RT_TABLE_INDEX_NOT_FOUND	(-1)
+#define IPA_RT_TABLE_WORD_SIZE		(4)
+#define IPA_RT_INDEX_BITMAP_SIZE	(32)
+#define IPA_RT_TABLE_MEMORY_ALLIGNMENT	(127)
+#define IPA_RT_ENTRY_MEMORY_ALLIGNMENT	(3)
+#define IPA_RT_BIT_MASK			(0x1)
+#define IPA_RT_STATUS_OF_ADD_FAILED	(-1)
+#define IPA_RT_STATUS_OF_DEL_FAILED	(-1)
+
+/**
+ * ipa_generate_rt_hw_rule() - generates the routing hardware rule
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means
+ *		caller wants to know the size of the rule as seen
+ *		by HW so they did not pass a valid buffer, we will use a
+ *		scratch buffer instead.
+ *		With this scheme we are going to
+ *		generate the rule twice, once to know size using scratch
+ *		buffer and second to write the rule to the actual caller
+ *		supplied buffer which is of required size
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip,
+		struct ipa_rt_entry *entry, u8 *buf)
+{
+	struct ipa_rt_rule_hw_hdr *rule_hdr;
+	const struct ipa_rt_rule *rule =
+		(const struct ipa_rt_rule *)&entry->rule;
+	u16 en_rule = 0;
+	u8 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE];
+	u8 *start;
+	int pipe_idx;
+
+	memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
+	if (buf == NULL)
+		buf = tmp;
+
+	start = buf;
+	rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf;
+	pipe_idx = ipa_get_ep_mapping(ipa_ctx->mode,
+			entry->rule.dst);
+	if (pipe_idx == -1) {
+		IPAERR("Wrong destination pipe specified in RT rule\n");
+		WARN_ON(1);
+		return -EPERM;
+	}
+	rule_hdr->u.hdr.pipe_dest_idx = pipe_idx;
+	rule_hdr->u.hdr.system = !ipa_ctx->hdr_tbl_lcl;
+	if (entry->hdr)
+		rule_hdr->u.hdr.hdr_offset =
+			entry->hdr->offset_entry->offset >> 2;
+	else
+		rule_hdr->u.hdr.hdr_offset = 0;
+
+	buf += sizeof(struct ipa_rt_rule_hw_hdr);
+	if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
+		IPAERR("fail to generate hw rule\n");
+		return -EPERM;
+	}
+
+	IPADBG("en_rule 0x%x\n", en_rule);
+
+	rule_hdr->u.hdr.en_rule = en_rule;
+	ipa_write_32(rule_hdr->u.word, (u8 *)rule_hdr);
+
+	if (entry->hw_len == 0) {
+		entry->hw_len = buf - start;
+	} else if (entry->hw_len != (buf - start)) {
+			IPAERR(
+			"hw_len differs b/w passes passed=0x%x calc=0x%x\n",
+			entry->hw_len,
+			(buf - start));
+			return -EPERM;
+		}
+
+	return 0;
+}
+
+/**
+ * ipa_get_rt_hw_tbl_size() - returns the size of HW routing table
+ * @ip: the ip address family type
+ * @hdr_sz: header size
+ * @max_rt_idx: maximal index
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ * the MSB set in rt_idx_bitmap indicates the size of hdr of routing tbl
+ */
+static int ipa_get_rt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz,
+		int *max_rt_idx)
+{
+	struct ipa_rt_tbl_set *set;
+	struct ipa_rt_tbl *tbl;
+	struct ipa_rt_entry *entry;
+	u32 total_sz = 0;
+	u32 tbl_sz;
+	u32 bitmap = ipa_ctx->rt_idx_bitmap[ip];
+	int highest_bit_set = IPA_RT_TABLE_INDEX_NOT_FOUND;
+	int i;
+
+	*hdr_sz = 0;
+	set = &ipa_ctx->rt_tbl_set[ip];
+
+	for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
+		if (bitmap & IPA_RT_BIT_MASK)
+			highest_bit_set = i;
+		bitmap >>= 1;
+	}
+
+	*max_rt_idx = highest_bit_set;
+	if (highest_bit_set == IPA_RT_TABLE_INDEX_NOT_FOUND) {
+		IPAERR("no rt tbls present\n");
+		total_sz = IPA_RT_TABLE_WORD_SIZE;
+		*hdr_sz = IPA_RT_TABLE_WORD_SIZE;
+		return total_sz;
+	}
+
+	*hdr_sz = (highest_bit_set + 1) * IPA_RT_TABLE_WORD_SIZE;
+	total_sz += *hdr_sz;
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		tbl_sz = 0;
+		list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+			if (ipa_generate_rt_hw_rule(ip, entry, NULL)) {
+				IPAERR("failed to find HW RT rule size\n");
+				return -EPERM;
+			}
+			tbl_sz += entry->hw_len;
+		}
+
+		if (tbl_sz)
+			tbl->sz = tbl_sz + IPA_RT_TABLE_WORD_SIZE;
+
+		if (tbl->in_sys)
+			continue;
+
+		if (tbl_sz) {
+			/* add the terminator */
+			total_sz += (tbl_sz + IPA_RT_TABLE_WORD_SIZE);
+			/* every rule-set should start at word boundary */
+			total_sz = (total_sz + IPA_RT_ENTRY_MEMORY_ALLIGNMENT) &
+						~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
+		}
+	}
+
+	IPADBG("RT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip);
+
+	return total_sz;
+}
+
+/**
+ * ipa_generate_rt_hw_tbl() - generates the routing hardware table
+ * @ip:	[in] the ip address family type
+ * @mem:	[out] buffer to put the filtering table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_generate_rt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem)
+{
+	struct ipa_rt_tbl *tbl;
+	struct ipa_rt_entry *entry;
+	struct ipa_rt_tbl_set *set;
+	u32 hdr_sz;
+	u32 offset;
+	u8 *hdr;
+	u8 *body;
+	u8 *base;
+	struct ipa_mem_buffer rt_tbl_mem;
+	u8 *rt_tbl_mem_body;
+	int max_rt_idx;
+	int i;
+
+	mem->size = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx);
+	mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) &
+				~IPA_RT_TABLE_MEMORY_ALLIGNMENT;
+
+	if (mem->size == 0) {
+		IPAERR("rt tbl empty ip=%d\n", ip);
+		goto error;
+	}
+	mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
+			GFP_KERNEL);
+	if (!mem->base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+		goto error;
+	}
+
+	memset(mem->base, 0, mem->size);
+
+	/* build the rt tbl in the DMA buffer to submit to IPA HW */
+	base = hdr = (u8 *)mem->base;
+	body = base + hdr_sz;
+
+	/* setup all indices to point to the empty sys rt tbl */
+	for (i = 0; i <= max_rt_idx; i++)
+		ipa_write_32(ipa_ctx->empty_rt_tbl_mem.phys_base,
+				hdr + (i * IPA_RT_TABLE_WORD_SIZE));
+
+	set = &ipa_ctx->rt_tbl_set[ip];
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		offset = body - base;
+		if (offset & IPA_RT_ENTRY_MEMORY_ALLIGNMENT) {
+			IPAERR("offset is not word multiple %d\n", offset);
+			goto proc_err;
+		}
+
+		if (!tbl->in_sys) {
+			/* convert offset to words from bytes */
+			offset &= ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
+			/* rule is at an offset from base */
+			offset |= IPA_RT_BIT_MASK;
+
+			/* update the hdr at the right index */
+			ipa_write_32(offset, hdr +
+					(tbl->idx * IPA_RT_TABLE_WORD_SIZE));
+
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_rt_rule_list,
+					link) {
+				if (ipa_generate_rt_hw_rule(ip, entry, body)) {
+					IPAERR("failed to gen HW RT rule\n");
+					goto proc_err;
+				}
+				body += entry->hw_len;
+			}
+
+			/* write the rule-set terminator */
+			body = ipa_write_32(0, body);
+			if ((u32)body & IPA_RT_ENTRY_MEMORY_ALLIGNMENT)
+				/* advance body to next word boundary */
+				body = body + (IPA_RT_TABLE_WORD_SIZE -
+					      ((u32)body &
+					      IPA_RT_ENTRY_MEMORY_ALLIGNMENT));
+		} else {
+			WARN_ON(tbl->sz == 0);
+			/* allocate memory for the RT tbl */
+			rt_tbl_mem.size = tbl->sz;
+			rt_tbl_mem.base =
+			   dma_alloc_coherent(NULL, rt_tbl_mem.size,
+					   &rt_tbl_mem.phys_base, GFP_KERNEL);
+			if (!rt_tbl_mem.base) {
+				IPAERR("fail to alloc DMA buff of size %d\n",
+						rt_tbl_mem.size);
+				WARN_ON(1);
+				goto proc_err;
+			}
+
+			WARN_ON(rt_tbl_mem.phys_base &
+					IPA_RT_ENTRY_MEMORY_ALLIGNMENT);
+			rt_tbl_mem_body = rt_tbl_mem.base;
+			memset(rt_tbl_mem.base, 0, rt_tbl_mem.size);
+			/* update the hdr at the right index */
+			ipa_write_32(rt_tbl_mem.phys_base,
+					hdr + (tbl->idx *
+					IPA_RT_TABLE_WORD_SIZE));
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_rt_rule_list,
+					link) {
+				if (ipa_generate_rt_hw_rule(ip, entry,
+							rt_tbl_mem_body)) {
+					IPAERR("failed to gen HW RT rule\n");
+					WARN_ON(1);
+					goto rt_table_mem_alloc_failed;
+				}
+				rt_tbl_mem_body += entry->hw_len;
+			}
+
+			/* write the rule-set terminator */
+			rt_tbl_mem_body = ipa_write_32(0, rt_tbl_mem_body);
+
+			if (tbl->curr_mem.phys_base) {
+				WARN_ON(tbl->prev_mem.phys_base);
+				tbl->prev_mem = tbl->curr_mem;
+			}
+			tbl->curr_mem = rt_tbl_mem;
+		}
+	}
+
+	return 0;
+
+rt_table_mem_alloc_failed:
+	dma_free_coherent(NULL, rt_tbl_mem.size,
+			  rt_tbl_mem.base, rt_tbl_mem.phys_base);
+proc_err:
+	dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+error:
+	return -EPERM;
+}
+
+static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
+{
+	struct ipa_rt_tbl *tbl;
+	struct ipa_rt_tbl *next;
+	struct ipa_rt_tbl_set *set;
+
+	set = &ipa_ctx->rt_tbl_set[ip];
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		if (tbl->prev_mem.phys_base) {
+			IPADBG("reaping rt tbl name=%s ip=%d\n", tbl->name, ip);
+			dma_free_coherent(NULL, tbl->prev_mem.size,
+					tbl->prev_mem.base,
+					tbl->prev_mem.phys_base);
+			memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
+		}
+	}
+
+	set = &ipa_ctx->reap_rt_tbl_set[ip];
+	list_for_each_entry_safe(tbl, next, &set->head_rt_tbl_list, link) {
+		list_del(&tbl->link);
+		WARN_ON(tbl->prev_mem.phys_base != 0);
+		if (tbl->curr_mem.phys_base) {
+			IPADBG("reaping sys rt tbl name=%s ip=%d\n", tbl->name,
+					ip);
+			dma_free_coherent(NULL, tbl->curr_mem.size,
+					tbl->curr_mem.base,
+					tbl->curr_mem.phys_base);
+			kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
+		}
+	}
+}
+
+static int __ipa_commit_rt(enum ipa_ip_type ip)
+{
+	struct ipa_desc desc = { 0 };
+	struct ipa_mem_buffer *mem;
+	void *cmd;
+	struct ipa_ip_v4_routing_init *v4;
+	struct ipa_ip_v6_routing_init *v6;
+	u16 avail;
+	u16 size;
+
+	mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
+	if (!mem) {
+		IPAERR("failed to alloc memory object\n");
+		goto fail_alloc_mem;
+	}
+
+	if (ip == IPA_IP_v4) {
+		avail = IPA_RAM_V4_RT_SIZE;
+		size = sizeof(struct ipa_ip_v4_routing_init);
+	} else {
+		avail = IPA_RAM_V6_RT_SIZE;
+		size = sizeof(struct ipa_ip_v6_routing_init);
+	}
+	cmd = kmalloc(size, GFP_KERNEL);
+	if (!cmd) {
+		IPAERR("failed to alloc immediate command object\n");
+		goto fail_alloc_cmd;
+	}
+
+	if (ipa_generate_rt_hw_tbl(ip, mem)) {
+		IPAERR("fail to generate RT HW TBL ip %d\n", ip);
+		goto fail_hw_tbl_gen;
+	}
+
+	if (mem->size > avail) {
+		IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
+		goto fail_hw_tbl_gen;
+	}
+
+	if (ip == IPA_IP_v4) {
+		v4 = (struct ipa_ip_v4_routing_init *)cmd;
+		desc.opcode = IPA_IP_V4_ROUTING_INIT;
+		v4->ipv4_rules_addr = mem->phys_base;
+		v4->size_ipv4_rules = mem->size;
+		v4->ipv4_addr = IPA_RAM_V4_RT_OFST;
+	} else {
+		v6 = (struct ipa_ip_v6_routing_init *)cmd;
+		desc.opcode = IPA_IP_V6_ROUTING_INIT;
+		v6->ipv6_rules_addr = mem->phys_base;
+		v6->size_ipv6_rules = mem->size;
+		v6->ipv6_addr = IPA_RAM_V6_RT_OFST;
+	}
+
+	desc.pyld = cmd;
+	desc.len = size;
+	desc.type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+	if (ipa_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		goto fail_send_cmd;
+	}
+
+	__ipa_reap_sys_rt_tbls(ip);
+	dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+	kfree(cmd);
+	kfree(mem);
+
+	return 0;
+
+fail_send_cmd:
+	if (mem->phys_base)
+		dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+fail_hw_tbl_gen:
+	kfree(cmd);
+fail_alloc_cmd:
+	kfree(mem);
+fail_alloc_mem:
+	return -EPERM;
+}
+
+/**
+ * __ipa_find_rt_tbl() - find the routing table
+ *			which name is given as parameter
+ * @ip:	[in] the ip address family type of the wanted routing table
+ * @name:	[in] the name of the wanted routing table
+ *
+ * Returns: the routing table which name is given as parameter, or NULL if it
+ * doesn't exist
+ */
+struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name)
+{
+	struct ipa_rt_tbl *entry;
+	struct ipa_rt_tbl_set *set;
+
+	set = &ipa_ctx->rt_tbl_set[ip];
+	list_for_each_entry(entry, &set->head_rt_tbl_list, link) {
+		if (!strncmp(name, entry->name, IPA_RESOURCE_NAME_MAX))
+			return entry;
+	}
+
+	return NULL;
+}
+
+static struct ipa_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
+		const char *name)
+{
+	struct ipa_rt_tbl *entry;
+	struct ipa_rt_tbl_set *set;
+	struct ipa_tree_node *node;
+	int i;
+
+	node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
+	if (!node) {
+		IPAERR("failed to alloc tree node object\n");
+		goto node_alloc_fail;
+	}
+
+	if (ip >= IPA_IP_MAX || name == NULL) {
+		IPAERR("bad parm\n");
+		goto error;
+	}
+
+	set = &ipa_ctx->rt_tbl_set[ip];
+	/* check if this table exists */
+	entry = __ipa_find_rt_tbl(ip, name);
+	if (!entry) {
+		entry = kmem_cache_zalloc(ipa_ctx->rt_tbl_cache, GFP_KERNEL);
+		if (!entry) {
+			IPAERR("failed to alloc RT tbl object\n");
+			goto error;
+		}
+		/* find a routing tbl index */
+		for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
+			if (!test_bit(i, &ipa_ctx->rt_idx_bitmap[ip])) {
+				entry->idx = i;
+				set_bit(i, &ipa_ctx->rt_idx_bitmap[ip]);
+				break;
+			}
+		}
+		if (i == IPA_RT_INDEX_BITMAP_SIZE) {
+			IPAERR("not free RT tbl indices left\n");
+			goto fail_rt_idx_alloc;
+		}
+
+		INIT_LIST_HEAD(&entry->head_rt_rule_list);
+		INIT_LIST_HEAD(&entry->link);
+		strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
+		entry->set = set;
+		entry->cookie = IPA_COOKIE;
+		entry->in_sys = (ip == IPA_IP_v4) ?
+			!ipa_ctx->ip4_rt_tbl_lcl : !ipa_ctx->ip6_rt_tbl_lcl;
+		set->tbl_cnt++;
+		list_add(&entry->link, &set->head_rt_tbl_list);
+
+		IPADBG("add rt tbl idx=%d tbl_cnt=%d ip=%d\n", entry->idx,
+				set->tbl_cnt, ip);
+
+		node->hdl = (u32)entry;
+		if (ipa_insert(&ipa_ctx->rt_tbl_hdl_tree, node)) {
+			IPAERR("failed to add to tree\n");
+			WARN_ON(1);
+		}
+	}
+
+	return entry;
+
+fail_rt_idx_alloc:
+	entry->cookie = 0;
+	kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
+error:
+	kmem_cache_free(ipa_ctx->tree_node_cache, node);
+node_alloc_fail:
+	return NULL;
+}
+
+static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry)
+{
+	struct ipa_tree_node *node;
+	enum ipa_ip_type ip = IPA_IP_MAX;
+
+	if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
+		IPAERR("bad parms\n");
+		return -EINVAL;
+	}
+	node = ipa_search(&ipa_ctx->rt_tbl_hdl_tree, (u32)entry);
+	if (node == NULL) {
+		IPAERR("lookup failed\n");
+		return -EPERM;
+	}
+
+	if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
+		ip = IPA_IP_v4;
+	else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
+		ip = IPA_IP_v6;
+	else
+		WARN_ON(1);
+
+	if (!entry->in_sys) {
+		list_del(&entry->link);
+		clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
+		entry->set->tbl_cnt--;
+		IPADBG("del rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
+				entry->set->tbl_cnt);
+		kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
+	} else {
+		list_move(&entry->link,
+				&ipa_ctx->reap_rt_tbl_set[ip].head_rt_tbl_list);
+		clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
+		entry->set->tbl_cnt--;
+		IPADBG("del sys rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
+				entry->set->tbl_cnt);
+	}
+
+	/* remove the handle from the database */
+	rb_erase(&node->node, &ipa_ctx->rt_tbl_hdl_tree);
+	kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+	return 0;
+}
+
+static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
+		const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl)
+{
+	struct ipa_rt_tbl *tbl;
+	struct ipa_rt_entry *entry;
+	struct ipa_tree_node *node;
+
+	if (rule->hdr_hdl &&
+	    ((ipa_search(&ipa_ctx->hdr_hdl_tree, rule->hdr_hdl) == NULL) ||
+	     ((struct ipa_hdr_entry *)rule->hdr_hdl)->cookie != IPA_COOKIE)) {
+		IPAERR("rt rule does not point to valid hdr\n");
+		goto error;
+	}
+
+	node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
+	if (!node) {
+		IPAERR("failed to alloc tree node object\n");
+		goto error;
+	}
+
+	tbl = __ipa_add_rt_tbl(ip, name);
+	if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
+		IPAERR("bad params\n");
+		goto fail_rt_tbl_sanity;
+	}
+	/*
+	 * do not allow any rules to be added at end of the "default" routing
+	 * tables
+	 */
+	if (!strncmp(tbl->name, IPA_DFLT_RT_TBL_NAME, IPA_RESOURCE_NAME_MAX) &&
+	    (tbl->rule_cnt > 0) && (at_rear != 0)) {
+		IPAERR("cannot add rule at end of tbl rule_cnt=%d at_rear=%d\n",
+		       tbl->rule_cnt, at_rear);
+		goto fail_rt_tbl_sanity;
+	}
+
+	entry = kmem_cache_zalloc(ipa_ctx->rt_rule_cache, GFP_KERNEL);
+	if (!entry) {
+		IPAERR("failed to alloc RT rule object\n");
+		goto fail_rt_tbl_sanity;
+	}
+	INIT_LIST_HEAD(&entry->link);
+	entry->cookie = IPA_COOKIE;
+	entry->rule = *rule;
+	entry->tbl = tbl;
+	entry->hdr = (struct ipa_hdr_entry *)rule->hdr_hdl;
+	if (at_rear)
+		list_add_tail(&entry->link, &tbl->head_rt_rule_list);
+	else
+		list_add(&entry->link, &tbl->head_rt_rule_list);
+	tbl->rule_cnt++;
+	if (entry->hdr)
+		entry->hdr->ref_cnt++;
+	IPADBG("add rt rule tbl_idx=%d rule_cnt=%d\n", tbl->idx, tbl->rule_cnt);
+	*rule_hdl = (u32)entry;
+
+	node->hdl = *rule_hdl;
+	if (ipa_insert(&ipa_ctx->rt_rule_hdl_tree, node)) {
+		IPAERR("failed to add to tree\n");
+		WARN_ON(1);
+		goto ipa_insert_failed;
+	}
+
+	return 0;
+
+ipa_insert_failed:
+	list_del(&entry->link);
+	kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
+fail_rt_tbl_sanity:
+	kmem_cache_free(ipa_ctx->tree_node_cache, node);
+error:
+	return -EPERM;
+}
+
+/**
+ * ipa_add_rt_rule() - Add the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @rules:	[inout] set of routing rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
+{
+	int i;
+	int ret;
+
+	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	for (i = 0; i < rules->num_rules; i++) {
+		if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
+					&rules->rules[i].rule,
+					rules->rules[i].at_rear,
+					&rules->rules[i].rt_rule_hdl)) {
+			IPAERR("failed to add rt rule %d\n", i);
+			rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+		} else {
+			rules->rules[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (__ipa_commit_rt(rules->ip)) {
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_rt_rule);
+
+static int __ipa_del_rt_rule(u32 rule_hdl)
+{
+	struct ipa_rt_entry *entry = (struct ipa_rt_entry *)rule_hdl;
+	struct ipa_tree_node *node;
+
+	if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
+		IPAERR("bad params\n");
+		return -EINVAL;
+	}
+	node = ipa_search(&ipa_ctx->rt_rule_hdl_tree, rule_hdl);
+	if (node == NULL) {
+		IPAERR("lookup failed\n");
+		return -EPERM;
+	}
+
+	if (entry->hdr)
+		entry->hdr->ref_cnt--;
+	list_del(&entry->link);
+	entry->tbl->rule_cnt--;
+	IPADBG("del rt rule tbl_idx=%d rule_cnt=%d\n", entry->tbl->idx,
+			entry->tbl->rule_cnt);
+	if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
+		if (__ipa_del_rt_tbl(entry->tbl))
+			IPAERR("fail to del RT tbl\n");
+	}
+	entry->cookie = 0;
+	kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
+
+	/* remove the handle from the database */
+	rb_erase(&node->node, &ipa_ctx->rt_rule_hdl_tree);
+	kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+	return 0;
+}
+
+/**
+ * ipa_del_rt_rule() - Remove the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @hdls:	[inout] set of routing rules to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
+{
+	int i;
+	int ret;
+
+	if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa_del_rt_rule(hdls->hdl[i].hdl)) {
+			IPAERR("failed to del rt rule %i\n", i);
+			hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit)
+		if (__ipa_commit_rt(hdls->ip)) {
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_del_rt_rule);
+
+/**
+ * ipa_commit_rt_rule() - Commit the current SW routing table of specified type
+ * to IPA HW
+ * @ip:	The family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_commit_rt(enum ipa_ip_type ip)
+{
+	int ret;
+	/*
+	 * issue a commit on the filtering module of same IP type since
+	 * filtering rules point to routing tables
+	 */
+	if (ipa_commit_flt(ip))
+		return -EPERM;
+
+	mutex_lock(&ipa_ctx->lock);
+	if (__ipa_commit_rt(ip)) {
+		ret = -EPERM;
+		goto bail;
+	}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_commit_rt);
+
+/**
+ * ipa_reset_rt() - reset the current SW routing table of specified type
+ * (does not commit to HW)
+ * @ip:	The family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_reset_rt(enum ipa_ip_type ip)
+{
+	struct ipa_rt_tbl *tbl;
+	struct ipa_rt_tbl *tbl_next;
+	struct ipa_rt_tbl_set *set;
+	struct ipa_rt_entry *rule;
+	struct ipa_rt_entry *rule_next;
+	struct ipa_tree_node *node;
+	struct ipa_rt_tbl_set *rset;
+
+	/*
+	 * issue a reset on the filtering module of same IP type since
+	 * filtering rules point to routing tables
+	 */
+	if (ipa_reset_flt(ip))
+		IPAERR("fail to reset flt ip=%d\n", ip);
+
+	set = &ipa_ctx->rt_tbl_set[ip];
+	rset = &ipa_ctx->reap_rt_tbl_set[ip];
+	mutex_lock(&ipa_ctx->lock);
+	IPADBG("reset rt ip=%d\n", ip);
+	list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) {
+		list_for_each_entry_safe(rule, rule_next,
+					 &tbl->head_rt_rule_list, link) {
+			node = ipa_search(&ipa_ctx->rt_rule_hdl_tree,
+					  (u32)rule);
+			if (node == NULL)
+				WARN_ON(1);
+
+			/*
+			 * for the "default" routing tbl, remove all but the
+			 *  last rule
+			 */
+			if (tbl->idx == 0 && tbl->rule_cnt == 1)
+				continue;
+
+			list_del(&rule->link);
+			tbl->rule_cnt--;
+			if (rule->hdr)
+				rule->hdr->ref_cnt--;
+			rule->cookie = 0;
+			kmem_cache_free(ipa_ctx->rt_rule_cache, rule);
+
+			/* remove the handle from the database */
+			rb_erase(&node->node, &ipa_ctx->rt_rule_hdl_tree);
+			kmem_cache_free(ipa_ctx->tree_node_cache, node);
+		}
+
+		node = ipa_search(&ipa_ctx->rt_tbl_hdl_tree, (u32)tbl);
+		if (node  == NULL)
+			WARN_ON(1);
+
+		/* do not remove the "default" routing tbl which has index 0 */
+		if (tbl->idx != 0) {
+			if (!tbl->in_sys) {
+				list_del(&tbl->link);
+				set->tbl_cnt--;
+				clear_bit(tbl->idx,
+					  &ipa_ctx->rt_idx_bitmap[ip]);
+				IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n",
+						tbl->idx, set->tbl_cnt);
+				kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
+			} else {
+				list_move(&tbl->link, &rset->head_rt_tbl_list);
+				clear_bit(tbl->idx,
+					  &ipa_ctx->rt_idx_bitmap[ip]);
+				set->tbl_cnt--;
+				IPADBG("rst sys rt tbl_idx=%d tbl_cnt=%d\n",
+						tbl->idx, set->tbl_cnt);
+			}
+			/* remove the handle from the database */
+			rb_erase(&node->node, &ipa_ctx->rt_tbl_hdl_tree);
+			kmem_cache_free(ipa_ctx->tree_node_cache, node);
+		}
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_reset_rt);
+
+/**
+ * ipa_get_rt_tbl() - lookup the specified routing table and return handle if it
+ * exists, if lookup succeeds the routing table ref cnt is increased
+ * @lookup:	[inout] routing table to lookup and its handle
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ *	Caller should call ipa_put_rt_tbl later if this function succeeds
+ */
+int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
+{
+	struct ipa_rt_tbl *entry;
+	int result = -EFAULT;
+
+	if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+	mutex_lock(&ipa_ctx->lock);
+	entry = __ipa_add_rt_tbl(lookup->ip, lookup->name);
+	if (entry && entry->cookie == IPA_COOKIE) {
+		entry->ref_cnt++;
+		lookup->hdl = (uint32_t)entry;
+
+		/* commit for get */
+		if (__ipa_commit_rt(lookup->ip))
+			IPAERR("fail to commit RT tbl\n");
+
+		result = 0;
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_get_rt_tbl);
+
+/**
+ * ipa_put_rt_tbl() - Release the specified routing table handle
+ * @rt_tbl_hdl:	[in] the routing table handle to release
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_put_rt_tbl(u32 rt_tbl_hdl)
+{
+	struct ipa_rt_tbl *entry = (struct ipa_rt_tbl *)rt_tbl_hdl;
+	struct ipa_tree_node *node;
+	enum ipa_ip_type ip = IPA_IP_MAX;
+
+	if (entry == NULL || (entry->cookie != IPA_COOKIE) ||
+			entry->ref_cnt == 0) {
+		IPAERR("bad parms\n");
+		return -EINVAL;
+	}
+	node = ipa_search(&ipa_ctx->rt_tbl_hdl_tree, rt_tbl_hdl);
+	if (node == NULL) {
+		IPAERR("lookup failed\n");
+		return -EPERM;
+	}
+
+	if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
+		ip = IPA_IP_v4;
+	else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
+		ip = IPA_IP_v6;
+	else
+		WARN_ON(1);
+
+	mutex_lock(&ipa_ctx->lock);
+	entry->ref_cnt--;
+	if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
+		if (__ipa_del_rt_tbl(entry))
+			IPAERR("fail to del RT tbl\n");
+		/* commit for put */
+		if (__ipa_commit_rt(ip))
+			IPAERR("fail to commit RT tbl\n");
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_put_rt_tbl);
diff --git a/drivers/platform/msm/ipa/ipa_utils.c b/drivers/platform/msm/ipa/ipa_utils.c
new file mode 100644
index 0000000..d5d5566
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_utils.c
@@ -0,0 +1,1353 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <net/ip.h>
+#include <linux/genalloc.h>	/* gen_pool_alloc() */
+#include <linux/io.h>
+#include "ipa_i.h"
+
+static const int ipa_ofst_meq32[] = { IPA_OFFSET_MEQ32_0,
+					IPA_OFFSET_MEQ32_1, -1 };
+static const int ipa_ofst_meq128[] = { IPA_OFFSET_MEQ128_0,
+					IPA_OFFSET_MEQ128_1, -1 };
+static const int ipa_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0,
+					IPA_IHL_OFFSET_RANGE16_1, -1 };
+static const int ipa_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0,
+					IPA_IHL_OFFSET_MEQ32_1, -1 };
+
+static const int ep_mapping[IPA_MODE_MAX][IPA_CLIENT_MAX] = {
+	{ -1, -1, -1, -1, -1, 11, -1, 8, 6, 2, 1, 5, -1, -1, -1, -1, -1, 10, 9, 7, 3, 4 },
+	{ -1, -1, -1, -1, -1, 11, -1, 8, 6, 2, 1, 5, -1, -1, -1, -1, -1, 10, 9, 7, 3, 4 },
+	{ 11, 13, 15, 17, 19, -1, -1, 8, 6, 2, 1, 5, 10, 12, 14, 16, 18, -1, 9, 7, 3, 4 },
+	{ 19, -1, -1, -1, -1, 11, 15, 8, 6, 2, 1, 5, 14, 16, 17, 18, -1, 10, 9, 7, 3, 4 },
+	{ 19, -1, -1, -1, -1, 11, 15, 8, 6, 2, 1, 5, 14, 16, 17, 18, -1, 10, 9, 7, 3, 4 },
+	{ 19, -1, -1, -1, -1, 11, 15, 8, 6, 2, 1, 5, 14, 16, 17, 18, -1, 10, 9, 7, 3, 4 },
+};
+
+/**
+ * ipa_cfg_route() - configure IPA route
+ * @route: IPA route
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa_cfg_route(struct ipa_route *route)
+{
+	ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST,
+		     IPA_SETFIELD(route->route_dis,
+				  IPA_ROUTE_ROUTE_DIS_SHFT,
+				  IPA_ROUTE_ROUTE_DIS_BMSK) |
+			IPA_SETFIELD(route->route_def_pipe,
+				     IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
+				     IPA_ROUTE_ROUTE_DEF_PIPE_BMSK) |
+			IPA_SETFIELD(route->route_def_hdr_table,
+				     IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
+				     IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK) |
+			IPA_SETFIELD(route->route_def_hdr_ofst,
+				     IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
+				     IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK));
+
+	return 0;
+}
+/**
+ * ipa_cfg_filter() - configure filter
+ * @disable: disable value
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa_cfg_filter(u32 disable)
+{
+	ipa_write_reg(ipa_ctx->mmio, IPA_FILTER_OFST,
+		     IPA_SETFIELD(!disable,
+				  IPA_FILTER_FILTER_EN_SHFT,
+				  IPA_FILTER_FILTER_EN_BMSK));
+	return 0;
+}
+
+/**
+ * ipa_init_hw() - initialize HW
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa_init_hw(void)
+{
+	u32 ipa_version = 0;
+
+	/* do soft reset of IPA */
+	ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 1);
+	ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 0);
+
+	/* enable IPA */
+	ipa_write_reg(ipa_ctx->mmio, IPA_COMP_CFG_OFST, 1);
+
+	/* Read IPA version and make sure we have access to the registers */
+	ipa_version = ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST);
+	if (ipa_version == 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+/**
+ * ipa_get_ep_mapping() - provide endpoint mapping
+ * @mode: IPA operating mode
+ * @client: client type
+ *
+ * Return value: endpoint mapping
+ */
+int ipa_get_ep_mapping(enum ipa_operating_mode mode,
+		enum ipa_client_type client)
+{
+	return ep_mapping[mode][client];
+}
+
+/**
+ * ipa_write_32() - convert 32 bit value to byte array
+ * @w: 32 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_32(u32 w, u8 *dest)
+{
+	*dest++ = (u8)((w) & 0xFF);
+	*dest++ = (u8)((w >> 8) & 0xFF);
+	*dest++ = (u8)((w >> 16) & 0xFF);
+	*dest++ = (u8)((w >> 24) & 0xFF);
+
+	return dest;
+}
+
+/**
+ * ipa_write_16() - convert 16 bit value to byte array
+ * @hw: 16 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_16(u16 hw, u8 *dest)
+{
+	*dest++ = (u8)((hw) & 0xFF);
+	*dest++ = (u8)((hw >> 8) & 0xFF);
+
+	return dest;
+}
+
+/**
+ * ipa_write_8() - convert 8 bit value to byte array
+ * @hw: 8 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_8(u8 b, u8 *dest)
+{
+	*dest++ = (b) & 0xFF;
+
+	return dest;
+}
+
+/**
+ * ipa_pad_to_32() - pad byte array to 32 bit value
+ * @dest: byte array
+ *
+ * Return value: padded value
+ */
+u8 *ipa_pad_to_32(u8 *dest)
+{
+	int i = (u32)dest & 0x3;
+	int j;
+
+	if (i)
+		for (j = 0; j < (4 - i); j++)
+			*dest++ = 0;
+
+	return dest;
+}
+
+/**
+ * ipa_generate_hw_rule() - generate HW rule
+ * @ip: IP address type
+ * @attrib: IPA rule attribute
+ * @buf: output buffer
+ * @en_rule: rule
+ *
+ * Return codes:
+ * 0: success
+ * -EPERM: wrong input
+ */
+int ipa_generate_hw_rule(enum ipa_ip_type ip,
+		const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule)
+{
+	u8 ofst_meq32 = 0;
+	u8 ihl_ofst_rng16 = 0;
+	u8 ihl_ofst_meq32 = 0;
+	u8 ofst_meq128 = 0;
+
+	if (ip == IPA_IP_v4) {
+
+		/* error check */
+		if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
+		    attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask &
+		    IPA_FLT_FLOW_LABEL) {
+			IPAERR("v6 attrib's specified for v4 rule\n");
+			return -EPERM;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_TOS) {
+			*en_rule |= IPA_TOS_EQ;
+			*buf = ipa_write_8(attrib->u.v4.tos, *buf);
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+			*en_rule |= IPA_PROTOCOL_EQ;
+			*buf = ipa_write_8(attrib->u.v4.protocol, *buf);
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+			if (ipa_ofst_meq32[ofst_meq32] == -1) {
+				IPAERR("ran out of meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq32[ofst_meq32];
+			/* 12 => offset of src ip in v4 header */
+			*buf = ipa_write_8(12, *buf);
+			*buf = ipa_write_32(attrib->u.v4.src_addr_mask, *buf);
+			*buf = ipa_write_32(attrib->u.v4.src_addr, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+			if (ipa_ofst_meq32[ofst_meq32] == -1) {
+				IPAERR("ran out of meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq32[ofst_meq32];
+			/* 16 => offset of dst ip in v4 header */
+			*buf = ipa_write_8(16, *buf);
+			*buf = ipa_write_32(attrib->u.v4.dst_addr_mask, *buf);
+			*buf = ipa_write_32(attrib->u.v4.dst_addr, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			if (attrib->src_port_hi < attrib->src_port_lo) {
+				IPAERR("bad src port range param\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 0  => offset of src port after v4 header */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_16(attrib->src_port_hi, *buf);
+			*buf = ipa_write_16(attrib->src_port_lo, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			if (attrib->dst_port_hi < attrib->dst_port_lo) {
+				IPAERR("bad dst port range param\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 2  => offset of dst port after v4 header */
+			*buf = ipa_write_8(2, *buf);
+			*buf = ipa_write_16(attrib->dst_port_hi, *buf);
+			*buf = ipa_write_16(attrib->dst_port_lo, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_TYPE) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 0  => offset of type after v4 header */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_32(0xFF, *buf);
+			*buf = ipa_write_32(attrib->type, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_CODE) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 1  => offset of code after v4 header */
+			*buf = ipa_write_8(1, *buf);
+			*buf = ipa_write_32(0xFF, *buf);
+			*buf = ipa_write_32(attrib->code, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SPI) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 0  => offset of SPI after v4 header FIXME */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_32(0xFFFFFFFF, *buf);
+			*buf = ipa_write_32(attrib->spi, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 0  => offset of src port after v4 header */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_16(attrib->src_port, *buf);
+			*buf = ipa_write_16(attrib->src_port, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 2  => offset of dst port after v4 header */
+			*buf = ipa_write_8(2, *buf);
+			*buf = ipa_write_16(attrib->dst_port, *buf);
+			*buf = ipa_write_16(attrib->dst_port, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+			*en_rule |= IPA_METADATA_COMPARE;
+			*buf = ipa_write_8(0, *buf);    /* offset, reserved */
+			*buf = ipa_write_32(attrib->meta_data_mask, *buf);
+			*buf = ipa_write_32(attrib->meta_data, *buf);
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+			*en_rule |= IPA_IPV4_IS_FRAG;
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+	} else if (ip == IPA_IP_v6) {
+
+		/* v6 code below assumes no extension headers TODO: fix this */
+
+		/* error check */
+		if (attrib->attrib_mask & IPA_FLT_TOS ||
+		    attrib->attrib_mask & IPA_FLT_PROTOCOL ||
+		    attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+			IPAERR("v4 attrib's specified for v6 rule\n");
+			return -EPERM;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+			*en_rule |= IPA_PROTOCOL_EQ;
+			*buf = ipa_write_8(attrib->u.v6.next_hdr, *buf);
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_TYPE) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 0  => offset of type after v6 header */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_32(0xFF, *buf);
+			*buf = ipa_write_32(attrib->type, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_CODE) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 1  => offset of code after v6 header */
+			*buf = ipa_write_8(1, *buf);
+			*buf = ipa_write_32(0xFF, *buf);
+			*buf = ipa_write_32(attrib->code, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SPI) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 0  => offset of SPI after v6 header FIXME */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_32(0xFFFFFFFF, *buf);
+			*buf = ipa_write_32(attrib->spi, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 0  => offset of src port after v6 header */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_16(attrib->src_port, *buf);
+			*buf = ipa_write_16(attrib->src_port, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 2  => offset of dst port after v6 header */
+			*buf = ipa_write_8(2, *buf);
+			*buf = ipa_write_16(attrib->dst_port, *buf);
+			*buf = ipa_write_16(attrib->dst_port, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			if (attrib->src_port_hi < attrib->src_port_lo) {
+				IPAERR("bad src port range param\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 0  => offset of src port after v6 header */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_16(attrib->src_port_hi, *buf);
+			*buf = ipa_write_16(attrib->src_port_lo, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			if (attrib->dst_port_hi < attrib->dst_port_lo) {
+				IPAERR("bad dst port range param\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 2  => offset of dst port after v6 header */
+			*buf = ipa_write_8(2, *buf);
+			*buf = ipa_write_16(attrib->dst_port_hi, *buf);
+			*buf = ipa_write_16(attrib->dst_port_lo, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+			/* 8 => offset of src ip in v6 header */
+			*buf = ipa_write_8(8, *buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr_mask[0],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr_mask[1],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr_mask[2],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr_mask[3],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr[0], *buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr[1], *buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr[2], *buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr[3], *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+			/* 24 => offset of dst ip in v6 header */
+			*buf = ipa_write_8(24, *buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr_mask[0],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr_mask[1],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr_mask[2],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr_mask[3],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr[0], *buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr[1], *buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr[2], *buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr[3], *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_TC) {
+			*en_rule |= IPA_FLT_TC;
+			*buf = ipa_write_8(attrib->u.v6.tc, *buf);
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+			*en_rule |= IPA_FLT_FLOW_LABEL;
+			 /* FIXME FL is only 20 bits */
+			*buf = ipa_write_32(attrib->u.v6.flow_label, *buf);
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+			*en_rule |= IPA_METADATA_COMPARE;
+			*buf = ipa_write_8(0, *buf);    /* offset, reserved */
+			*buf = ipa_write_32(attrib->meta_data_mask, *buf);
+			*buf = ipa_write_32(attrib->meta_data, *buf);
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+	} else {
+		IPAERR("unsupported ip %d\n", ip);
+		return -EPERM;
+	}
+
+	/*
+	 * default "rule" means no attributes set -> map to
+	 * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
+	 */
+	if (attrib->attrib_mask == 0) {
+		if (ipa_ofst_meq32[ofst_meq32] == -1) {
+			IPAERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= ipa_ofst_meq32[ofst_meq32];
+		*buf = ipa_write_8(0, *buf);    /* offset */
+		*buf = ipa_write_32(0, *buf);   /* mask */
+		*buf = ipa_write_32(0, *buf);   /* val */
+		*buf = ipa_pad_to_32(*buf);
+		ofst_meq32++;
+	}
+
+	return 0;
+}
+
+/**
+ * ipa_cfg_ep - IPA end-point configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * This includes nat, header, mode, aggregation and route settings and is a one
+ * shot API to configure the IPA end-point fully
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg)
+{
+	int result = -EINVAL;
+
+	if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+			ipa_ep_cfg == NULL) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	result = ipa_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr);
+	if (result)
+		return result;
+
+	result = ipa_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr);
+	if (result)
+		return result;
+
+	if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) {
+		result = ipa_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat);
+		if (result)
+			return result;
+
+		result = ipa_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode);
+		if (result)
+			return result;
+
+		result = ipa_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route);
+		if (result)
+			return result;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep);
+
+/**
+ * ipa_cfg_ep_nat() - IPA end-point NAT configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg)
+{
+	if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+			ipa_ep_cfg == NULL) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
+		IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+	/* copy over EP cfg */
+	ipa_ctx->ep[clnt_hdl].cfg.nat = *ipa_ep_cfg;
+	/* clnt_hdl is used as pipe_index */
+	ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_NAT_n_OFST(clnt_hdl),
+		      IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.nat.nat_en,
+				   IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT,
+				   IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK));
+	return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_nat);
+
+/**
+ * ipa_cfg_ep_hdr() -  IPA end-point header configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg)
+{
+	u32 val;
+	struct ipa_ep_context *ep;
+
+	if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+			ipa_ep_cfg == NULL) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa_ctx->ep[clnt_hdl];
+
+	/* copy over EP cfg */
+	ep->cfg.hdr = *ipa_ep_cfg;
+
+	val = IPA_SETFIELD(ep->cfg.hdr.hdr_len,
+		   IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT,
+		   IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK) |
+	      IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_metadata_valid,
+		   IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT,
+		   IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK) |
+	      IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_metadata,
+		   IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT,
+		   IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK) |
+	      IPA_SETFIELD(ep->cfg.hdr.hdr_additional_const_len,
+		   IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT,
+		   IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK) |
+	      IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_pkt_size_valid,
+		   IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT,
+		   IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK) |
+	      IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_pkt_size,
+		   IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT,
+		   IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK) |
+	      IPA_SETFIELD(ep->cfg.hdr.hdr_a5_mux,
+		   IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT,
+		   IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_HDR_n_OFST(clnt_hdl), val);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_hdr);
+
+/**
+ * ipa_cfg_ep_mode() - IPA end-point mode configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg)
+{
+	u32 val;
+
+	if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+			ipa_ep_cfg == NULL) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
+		IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	/* copy over EP cfg */
+	ipa_ctx->ep[clnt_hdl].cfg.mode = *ipa_ep_cfg;
+	ipa_ctx->ep[clnt_hdl].dst_pipe_index = ipa_get_ep_mapping(ipa_ctx->mode,
+			ipa_ep_cfg->dst);
+
+	val = IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.mode.mode,
+			   IPA_ENDP_INIT_MODE_n_MODE_SHFT,
+			   IPA_ENDP_INIT_MODE_n_MODE_BMSK) |
+	      IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].dst_pipe_index,
+			   IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT,
+			   IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_MODE_n_OFST(clnt_hdl), val);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_mode);
+
+/**
+ * ipa_cfg_ep_aggr() - IPA end-point aggregation configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg)
+{
+	u32 val;
+
+	if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+			ipa_ep_cfg == NULL) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+	/* copy over EP cfg */
+	ipa_ctx->ep[clnt_hdl].cfg.aggr = *ipa_ep_cfg;
+
+	val = IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr_en,
+			   IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT,
+			   IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK) |
+	      IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr,
+			   IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT,
+			   IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK) |
+	      IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr_byte_limit,
+			   IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT,
+			   IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK) |
+	      IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr_time_limit,
+			   IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT,
+			   IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_AGGR_n_OFST(clnt_hdl), val);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_aggr);
+
+/**
+ * ipa_cfg_ep_route() - IPA end-point routing configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg)
+{
+	if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+			ipa_ep_cfg == NULL) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
+		IPAERR("ROUTE does not apply to IPA out EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	/*
+	 * if DMA mode was configured previously for this EP, return with
+	 * success
+	 */
+	if (ipa_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) {
+		IPADBG("DMA mode for EP %d\n", clnt_hdl);
+		return 0;
+	}
+
+	if (ipa_ep_cfg->rt_tbl_hdl)
+		IPAERR("client specified non-zero RT TBL hdl - ignore it\n");
+
+	/* always use the "default" routing tables whose indices are 0 */
+	ipa_ctx->ep[clnt_hdl].rt_tbl_idx = 0;
+
+	ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_ROUTE_n_OFST(clnt_hdl),
+		      IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].rt_tbl_idx,
+			   IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT,
+			   IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK));
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_route);
+
+/**
+ * ipa_dump_buff_internal() - dumps buffer for debug purposes
+ * @base: buffer base address
+ * @phy_base: buffer physical base address
+ * @size: size of the buffer
+ */
+void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size)
+{
+	int i;
+	u32 *cur = (u32 *)base;
+	u8 *byt;
+	IPADBG("START phys=%x\n", phy_base);
+	for (i = 0; i < size / 4; i++) {
+		byt = (u8 *)(cur + i);
+		IPADBG("%2d %08x   %02x %02x %02x %02x\n", i, *(cur + i),
+				byt[0], byt[1], byt[2], byt[3]);
+	}
+	IPADBG("END\n");
+}
+
+/**
+ * ipa_dump() - dumps part of driver data structures for debug purposes
+ */
+void ipa_dump(void)
+{
+	struct ipa_mem_buffer hdr_mem = { 0 };
+	struct ipa_mem_buffer rt_mem = { 0 };
+	struct ipa_mem_buffer flt_mem = { 0 };
+
+	mutex_lock(&ipa_ctx->lock);
+
+	if (ipa_generate_hdr_hw_tbl(&hdr_mem))
+		IPAERR("fail\n");
+	if (ipa_generate_rt_hw_tbl(IPA_IP_v4, &rt_mem))
+		IPAERR("fail\n");
+	if (ipa_generate_flt_hw_tbl(IPA_IP_v4, &flt_mem))
+		IPAERR("fail\n");
+	IPAERR("PHY hdr=%x rt=%x flt=%x\n", hdr_mem.phys_base, rt_mem.phys_base,
+			flt_mem.phys_base);
+	IPAERR("VIRT hdr=%x rt=%x flt=%x\n", (u32)hdr_mem.base,
+			(u32)rt_mem.base, (u32)flt_mem.base);
+	IPAERR("SIZE hdr=%d rt=%d flt=%d\n", hdr_mem.size, rt_mem.size,
+			flt_mem.size);
+	IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
+	IPA_DUMP_BUFF(rt_mem.base, rt_mem.phys_base, rt_mem.size);
+	IPA_DUMP_BUFF(flt_mem.base, flt_mem.phys_base, flt_mem.size);
+	if (hdr_mem.phys_base)
+		dma_free_coherent(NULL, hdr_mem.size, hdr_mem.base,
+				hdr_mem.phys_base);
+	if (rt_mem.phys_base)
+		dma_free_coherent(NULL, rt_mem.size, rt_mem.base,
+				rt_mem.phys_base);
+	if (flt_mem.phys_base)
+		dma_free_coherent(NULL, flt_mem.size, flt_mem.base,
+				flt_mem.phys_base);
+	mutex_unlock(&ipa_ctx->lock);
+}
+
+/*
+ * TODO: add swap if needed, for now assume LE is ok for device memory
+ * even though IPA registers are assumed to be BE
+ */
+/**
+ * ipa_write_dev_8() - writes 8 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ */
+void ipa_write_dev_8(u8 val, u16 ofst_ipa_sram)
+{
+	iowrite8(val, (u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_write_dev_16() - writes 16 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ *
+ */
+void ipa_write_dev_16(u16 val, u16 ofst_ipa_sram)
+{
+	iowrite16(val, (u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_write_dev_32() - writes 32 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ */
+void ipa_write_dev_32(u32 val, u16 ofst_ipa_sram)
+{
+	iowrite32(val, (u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_read_dev_8() - reads 8 bit value
+ * @ofst_ipa_sram: address to read from
+ *
+ * Return value: value read
+ */
+unsigned int ipa_read_dev_8(u16 ofst_ipa_sram)
+{
+	return ioread8((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_read_dev_16() - reads 16 bit value
+ * @ofst_ipa_sram: address to read from
+ *
+ * Return value: value read
+ */
+unsigned int ipa_read_dev_16(u16 ofst_ipa_sram)
+{
+	return ioread16((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_read_dev_32() - reads 32 bit value
+ * @ofst_ipa_sram: address to read from
+ *
+ * Return value: value read
+ */
+unsigned int ipa_read_dev_32(u16 ofst_ipa_sram)
+{
+	return ioread32((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_write_dev_8rep() - writes 8 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ * @count: num of bytes to write
+ */
+void ipa_write_dev_8rep(u16 ofst_ipa_sram, const void *buf, unsigned long count)
+{
+	iowrite8_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), buf,
+			count);
+}
+
+/**
+ * ipa_write_dev_16rep() - writes 16 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ * @count: num of bytes to write
+ */
+void ipa_write_dev_16rep(u16 ofst_ipa_sram, const void *buf,
+		unsigned long count)
+{
+	iowrite16_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram),
+			buf, count);
+}
+
+/**
+ * ipa_write_dev_32rep() - writes 32 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ * @count: num of bytes to write
+ */
+void ipa_write_dev_32rep(u16 ofst_ipa_sram, const void *buf,
+		unsigned long count)
+{
+	iowrite32_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram),
+			buf, count);
+}
+
+/**
+ * ipa_read_dev_8rep() - reads 8 bit value
+ * @ofst_ipa_sram: address to read from
+ * @buf: buffer to read to
+ * @count: number of bytes to read
+ */
+void ipa_read_dev_8rep(u16 ofst_ipa_sram, void *buf, unsigned long count)
+{
+	ioread8_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), buf,
+			count);
+}
+
+/**
+ * ipa_read_dev_16rep() - reads 16 bit value
+ * @ofst_ipa_sram: address to read from
+ * @buf: buffer to read to
+ * @count: number of bytes to read
+ */
+void ipa_read_dev_16rep(u16 ofst_ipa_sram, void *buf, unsigned long count)
+{
+	ioread16_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), buf,
+			count);
+}
+
+/**
+ * ipa_read_dev_32rep() - reads 32 bit value
+ * @ofst_ipa_sram: address to read from
+ * @buf: buffer to read to
+ * @count: number of bytes to read
+ */
+void ipa_read_dev_32rep(u16 ofst_ipa_sram, void *buf, unsigned long count)
+{
+	ioread32_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), buf,
+			count);
+}
+
+/**
+ * ipa_memset_dev() - memset IO
+ * @ofst_ipa_sram: address to set
+ * @value: value
+ * @count: number of bytes to set
+ */
+void ipa_memset_dev(u16 ofst_ipa_sram, u8 value, unsigned int count)
+{
+	memset_io((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), value,
+			count);
+}
+
+/**
+ * ipa_memcpy_from_dev() - copy memory from device
+ * @dest: buffer to copy to
+ * @ofst_ipa_sram: address
+ * @count: number of bytes to copy
+ */
+void ipa_memcpy_from_dev(void *dest, u16 ofst_ipa_sram, unsigned int count)
+{
+	memcpy_fromio(dest, (void *)((u32)ipa_ctx->mmio + 0x4000 +
+				ofst_ipa_sram), count);
+}
+
+/**
+ * ipa_memcpy_to_dev() - copy memory to device
+ * @ofst_ipa_sram: address
+ * @source: buffer to copy from
+ * @count: number of bytes to copy
+ */
+void ipa_memcpy_to_dev(u16 ofst_ipa_sram, void *source, unsigned int count)
+{
+	memcpy_toio((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram),
+			source, count);
+}
+
+/**
+ * ipa_defrag() - handle de-frag for bridging type of cases
+ * @skb: skb
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa_defrag(struct sk_buff *skb)
+{
+	/*
+	 * Reassemble IP fragments. TODO: need to setup network_header to
+	 * point to start of IP header
+	 */
+	if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
+		if (ip_defrag(skb, IP_DEFRAG_CONNTRACK_IN))
+			return -EINPROGRESS;
+	}
+
+	/* skb is not fully assembled, send it back out */
+	return 0;
+}
+
+/**
+ * ipa_search() - search for handle in RB tree
+ * @root: tree root
+ * @hdl: handle
+ *
+ * Return value: tree node corresponding to the handle
+ */
+struct ipa_tree_node *ipa_search(struct rb_root *root, u32 hdl)
+{
+	struct rb_node *node = root->rb_node;
+
+	while (node) {
+		struct ipa_tree_node *data = container_of(node,
+				struct ipa_tree_node, node);
+
+		if (hdl < data->hdl)
+			node = node->rb_left;
+		else if (hdl > data->hdl)
+			node = node->rb_right;
+		else
+			return data;
+	}
+	return NULL;
+}
+
+/**
+ * ipa_insert() - insert new node to RB tree
+ * @root: tree root
+ * @data: new data to insert
+ *
+ * Return value:
+ * 0: success
+ * -EPERM: tree already contains the node with provided handle
+ */
+int ipa_insert(struct rb_root *root, struct ipa_tree_node *data)
+{
+	struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+	/* Figure out where to put new node */
+	while (*new) {
+		struct ipa_tree_node *this = container_of(*new,
+				struct ipa_tree_node, node);
+
+		parent = *new;
+		if (data->hdl < this->hdl)
+			new = &((*new)->rb_left);
+		else if (data->hdl > this->hdl)
+			new = &((*new)->rb_right);
+		else
+			return -EPERM;
+	}
+
+	/* Add new node and rebalance tree. */
+	rb_link_node(&data->node, parent, new);
+	rb_insert_color(&data->node, root);
+
+	return 0;
+}
+
+/**
+ * ipa_pipe_mem_init() - initialize the pipe memory
+ * @start_ofst: start offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ * -ENOMEM: no memory
+ */
+int ipa_pipe_mem_init(u32 start_ofst, u32 size)
+{
+	int res;
+	u32 aligned_start_ofst;
+	u32 aligned_size;
+	struct gen_pool *pool;
+
+	if (!size) {
+		IPAERR("no IPA pipe mem alloted\n");
+		goto fail;
+	}
+
+	aligned_start_ofst = IPA_HW_TABLE_ALIGNMENT(start_ofst);
+	aligned_size = size - (aligned_start_ofst - start_ofst);
+
+	IPADBG("start_ofst=%u aligned_start_ofst=%u size=%u aligned_size=%u\n",
+	       start_ofst, aligned_start_ofst, size, aligned_size);
+
+	/* allocation order of 8 i.e. 128 bytes, global pool */
+	pool = gen_pool_create(8, -1);
+	if (!pool) {
+		IPAERR("Failed to create a new memory pool.\n");
+		goto fail;
+	}
+
+	res = gen_pool_add(pool, aligned_start_ofst, aligned_size, -1);
+	if (res) {
+		IPAERR("Failed to add memory to IPA pipe pool\n");
+		goto err_pool_add;
+	}
+
+	ipa_ctx->pipe_mem_pool = pool;
+	return 0;
+
+err_pool_add:
+	gen_pool_destroy(pool);
+fail:
+	return -ENOMEM;
+}
+
+/**
+ * ipa_pipe_mem_alloc() - allocate pipe memory
+ * @ofst: offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa_pipe_mem_alloc(u32 *ofst, u32 size)
+{
+	u32 vaddr;
+	int res = -1;
+
+	if (!ipa_ctx->pipe_mem_pool || !size) {
+		IPAERR("failed size=%u pipe_mem_pool=%p\n", size,
+				ipa_ctx->pipe_mem_pool);
+		return res;
+	}
+
+	vaddr = gen_pool_alloc(ipa_ctx->pipe_mem_pool, size);
+
+	if (vaddr) {
+		*ofst = vaddr;
+		res = 0;
+		IPADBG("size=%u ofst=%u\n", size, vaddr);
+	} else {
+		IPAERR("size=%u failed\n", size);
+	}
+
+	return res;
+}
+
+/**
+ * ipa_pipe_mem_free() - free pipe memory
+ * @ofst: offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa_pipe_mem_free(u32 ofst, u32 size)
+{
+	IPADBG("size=%u ofst=%u\n", size, ofst);
+	if (ipa_ctx->pipe_mem_pool && size)
+		gen_pool_free(ipa_ctx->pipe_mem_pool, ofst, size);
+	return 0;
+}
+
+/**
+ * ipa_set_aggr_mode() - Set the aggregation mode which is a global setting
+ * @mode:	[in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
+ * etc
+ *
+ * Returns:	0 on success
+ */
+int ipa_set_aggr_mode(enum ipa_aggr_mode mode)
+{
+	u32 reg_val;
+	reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_2_OFST);
+	ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_2_OFST,
+			((mode & IPA_AGGREGATION_MODE_MSK) <<
+				IPA_AGGREGATION_MODE_SHFT) |
+			(reg_val & IPA_AGGREGATION_MODE_BMSK));
+	return 0;
+}
+EXPORT_SYMBOL(ipa_set_aggr_mode);
+
+/**
+ * ipa_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
+ * mode
+ * @sig:	[in] the first 3 bytes of QCNCM NDP signature (expected to be
+ * "QND")
+ *
+ * Set the NDP signature used for QCNCM aggregation mode. The fourth byte
+ * (expected to be 'P') needs to be set using the header addition mechanism
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_set_qcncm_ndp_sig(char sig[3])
+{
+	u32 reg_val;
+
+	if (sig == NULL) {
+		IPAERR("bad argument for ipa_set_qcncm_ndp_sig/n");
+		return -EINVAL;
+	}
+	reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_2_OFST);
+	ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_2_OFST, sig[0] <<
+			IPA_AGGREGATION_QCNCM_SIG0_SHFT |
+			(sig[1] << IPA_AGGREGATION_QCNCM_SIG1_SHFT) |
+			sig[2] | (reg_val & IPA_AGGREGATION_QCNCM_SIG_BMSK));
+	return 0;
+}
+EXPORT_SYMBOL(ipa_set_qcncm_ndp_sig);
+
+/**
+ * ipa_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
+ * configuration
+ * @enable:	[in] true for single NDP/MBIM; false otherwise
+ *
+ * Returns:	0 on success
+ */
+int ipa_set_single_ndp_per_mbim(bool enable)
+{
+	u32 reg_val;
+	reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST);
+	ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST, (enable &
+			IPA_AGGREGATION_SINGLE_NDP_MSK) |
+			(reg_val & IPA_AGGREGATION_SINGLE_NDP_BMSK));
+	return 0;
+}
+EXPORT_SYMBOL(ipa_set_single_ndp_per_mbim);
+
+/**
+ * ipa_straddle_boundary() - Checks whether a memory buffer straddles a boundary
+ * @start: start address of the memory buffer
+ * @end: end address of the memory buffer
+ * @boundary: boundary
+ *
+ * Return value:
+ * 1: if the interval [start, end] straddles boundary
+ * 0: otherwise
+ */
+int ipa_straddle_boundary(u32 start, u32 end, u32 boundary)
+{
+	u32 next_start;
+	u32 prev_end;
+
+	IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary);
+
+	next_start = (start + (boundary - 1)) & ~(boundary - 1);
+	prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary;
+
+	while (next_start < prev_end)
+		next_start += boundary;
+
+	if (next_start == prev_end)
+		return 1;
+	else
+		return 0;
+}
+
diff --git a/drivers/platform/msm/ipa/rmnet_bridge.c b/drivers/platform/msm/ipa/rmnet_bridge.c
new file mode 100644
index 0000000..3c7f5ca
--- /dev/null
+++ b/drivers/platform/msm/ipa/rmnet_bridge.c
@@ -0,0 +1,122 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <mach/bam_dmux.h>
+#include <mach/ipa.h>
+#include <mach/sps.h>
+#include "a2_service.h"
+#include "ipa_i.h"
+
+static struct rmnet_bridge_cb_type {
+	u32 producer_handle;
+	u32 consumer_handle;
+	bool is_connected;
+} rmnet_bridge_cb;
+
+/**
+* rmnet_bridge_init() - Initialize RmNet bridge module
+*
+* Return codes:
+* 0: success
+*/
+int rmnet_bridge_init(void)
+{
+	memset(&rmnet_bridge_cb, 0, sizeof(struct rmnet_bridge_cb_type));
+
+	return 0;
+}
+EXPORT_SYMBOL(rmnet_bridge_init);
+
+/**
+* rmnet_bridge_disconnect() - Disconnect RmNet bridge module
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int rmnet_bridge_disconnect(void)
+{
+	int ret = 0;
+	if (false == rmnet_bridge_cb.is_connected) {
+		pr_err("%s: trying to disconnect already disconnected RmNet bridge\n",
+		       __func__);
+		goto bail;
+	}
+
+	rmnet_bridge_cb.is_connected = false;
+
+	ret = ipa_bridge_teardown(IPA_DL);
+	ret = ipa_bridge_teardown(IPA_UL);
+bail:
+	return ret;
+}
+EXPORT_SYMBOL(rmnet_bridge_disconnect);
+
+/**
+* rmnet_bridge_connect() - Connect RmNet bridge module
+* @producer_hdl:	IPA producer handle
+* @consumer_hdl:	IPA consumer handle
+* @wwan_logical_channel_id:	WWAN logical channel ID
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int rmnet_bridge_connect(u32 producer_hdl,
+			 u32 consumer_hdl,
+			 int wwan_logical_channel_id)
+{
+	int ret = 0;
+
+	if (true == rmnet_bridge_cb.is_connected) {
+		ret = 0;
+		pr_err("%s: trying to connect already connected RmNet bridge\n",
+		       __func__);
+		goto bail;
+	}
+
+	rmnet_bridge_cb.consumer_handle = consumer_hdl;
+	rmnet_bridge_cb.producer_handle = producer_hdl;
+	rmnet_bridge_cb.is_connected = true;
+
+	ret = ipa_bridge_setup(IPA_DL);
+	if (ret) {
+		pr_err("%s: IPA DL bridge setup failure\n", __func__);
+		goto bail_dl;
+	}
+	ret = ipa_bridge_setup(IPA_UL);
+	if (ret) {
+		pr_err("%s: IPA UL bridge setup failure\n", __func__);
+		goto bail_ul;
+	}
+	return 0;
+bail_ul:
+	ipa_bridge_teardown(IPA_DL);
+bail_dl:
+	rmnet_bridge_cb.is_connected = false;
+bail:
+	return ret;
+}
+EXPORT_SYMBOL(rmnet_bridge_connect);
+
+void rmnet_bridge_get_client_handles(u32 *producer_handle,
+		u32 *consumer_handle)
+{
+	if (producer_handle == NULL || consumer_handle == NULL)
+		return;
+
+	*producer_handle = rmnet_bridge_cb.producer_handle;
+	*consumer_handle = rmnet_bridge_cb.consumer_handle;
+}
diff --git a/include/linux/msm_ipa.h b/include/linux/msm_ipa.h
new file mode 100644
index 0000000..613cd9f
--- /dev/null
+++ b/include/linux/msm_ipa.h
@@ -0,0 +1,714 @@
+#ifndef _MSM_IPA_H_
+#define _MSM_IPA_H_
+
+#ifndef __KERNEL__
+#include <stdint.h>
+#include <stddef.h>
+#include <sys/stat.h>
+#endif
+#include <linux/ioctl.h>
+
+/**
+ * unique magic number of the IPA device
+ */
+#define IPA_IOC_MAGIC 0xCF
+
+/**
+ * name of the default routing tables for v4 and v6
+ */
+#define IPA_DFLT_RT_TBL_NAME "ipa_dflt_rt"
+
+/**
+ *   the commands supported by IPA driver
+ */
+#define IPA_IOCTL_ADD_HDR            0
+#define IPA_IOCTL_DEL_HDR            1
+#define IPA_IOCTL_ADD_RT_RULE        2
+#define IPA_IOCTL_DEL_RT_RULE        3
+#define IPA_IOCTL_ADD_FLT_RULE       4
+#define IPA_IOCTL_DEL_FLT_RULE       5
+#define IPA_IOCTL_COMMIT_HDR         6
+#define IPA_IOCTL_RESET_HDR          7
+#define IPA_IOCTL_COMMIT_RT          8
+#define IPA_IOCTL_RESET_RT           9
+#define IPA_IOCTL_COMMIT_FLT        10
+#define IPA_IOCTL_RESET_FLT         11
+#define IPA_IOCTL_DUMP              12
+#define IPA_IOCTL_GET_RT_TBL        13
+#define IPA_IOCTL_PUT_RT_TBL        14
+#define IPA_IOCTL_COPY_HDR          15
+#define IPA_IOCTL_QUERY_INTF        16
+#define IPA_IOCTL_QUERY_INTF_TX_PROPS 17
+#define IPA_IOCTL_QUERY_INTF_RX_PROPS 18
+#define IPA_IOCTL_GET_HDR           19
+#define IPA_IOCTL_PUT_HDR           20
+#define IPA_IOCTL_SET_FLT        21
+#define IPA_IOCTL_ALLOC_NAT_MEM  22
+#define IPA_IOCTL_V4_INIT_NAT    23
+#define IPA_IOCTL_NAT_DMA        24
+#define IPA_IOCTL_V4_DEL_NAT     26
+#define IPA_IOCTL_GET_ASYNC_MSG  27
+#define IPA_IOCTL_GET_NAT_OFFSET 28
+#define IPA_IOCTL_MAX            29
+
+/**
+ * max size of the header to be inserted
+ */
+#define IPA_HDR_MAX_SIZE 64
+
+/**
+ * max size of the name of the resource (routing table, header)
+ */
+#define IPA_RESOURCE_NAME_MAX 20
+
+/**
+ * the attributes of the rule (routing or filtering)
+ */
+#define IPA_FLT_TOS            (1ul << 0)
+#define IPA_FLT_PROTOCOL       (1ul << 1)
+#define IPA_FLT_SRC_ADDR       (1ul << 2)
+#define IPA_FLT_DST_ADDR       (1ul << 3)
+#define IPA_FLT_SRC_PORT_RANGE (1ul << 4)
+#define IPA_FLT_DST_PORT_RANGE (1ul << 5)
+#define IPA_FLT_TYPE           (1ul << 6)
+#define IPA_FLT_CODE           (1ul << 7)
+#define IPA_FLT_SPI            (1ul << 8)
+#define IPA_FLT_SRC_PORT       (1ul << 9)
+#define IPA_FLT_DST_PORT       (1ul << 10)
+#define IPA_FLT_TC             (1ul << 11)
+#define IPA_FLT_FLOW_LABEL     (1ul << 12)
+#define IPA_FLT_NEXT_HDR       (1ul << 13)
+#define IPA_FLT_META_DATA      (1ul << 14)
+#define IPA_FLT_FRAGMENT       (1ul << 15)
+
+/**
+ * enum ipa_client_type - names for the various IPA "clients"
+ * these are from the perspective of the clients, for e.g.
+ * HSIC1_PROD means HSIC client is the producer and IPA is the
+ * consumer
+ */
+enum ipa_client_type {
+	IPA_CLIENT_PROD,
+	IPA_CLIENT_HSIC1_PROD = IPA_CLIENT_PROD,
+	IPA_CLIENT_HSIC2_PROD,
+	IPA_CLIENT_HSIC3_PROD,
+	IPA_CLIENT_HSIC4_PROD,
+	IPA_CLIENT_HSIC5_PROD,
+	IPA_CLIENT_USB_PROD,
+	IPA_CLIENT_A5_WLAN_AMPDU_PROD,
+	IPA_CLIENT_A2_EMBEDDED_PROD,
+	IPA_CLIENT_A2_TETHERED_PROD,
+	IPA_CLIENT_A5_LAN_WAN_PROD,
+	IPA_CLIENT_A5_CMD_PROD,
+	IPA_CLIENT_Q6_LAN_PROD,
+
+	IPA_CLIENT_CONS,
+	IPA_CLIENT_HSIC1_CONS = IPA_CLIENT_CONS,
+	IPA_CLIENT_HSIC2_CONS,
+	IPA_CLIENT_HSIC3_CONS,
+	IPA_CLIENT_HSIC4_CONS,
+	IPA_CLIENT_HSIC5_CONS,
+	IPA_CLIENT_USB_CONS,
+	IPA_CLIENT_A2_EMBEDDED_CONS,
+	IPA_CLIENT_A2_TETHERED_CONS,
+	IPA_CLIENT_A5_LAN_WAN_CONS,
+	IPA_CLIENT_Q6_LAN_CONS,
+
+	IPA_CLIENT_MAX,
+};
+
+/**
+ * enum ipa_ip_type - Address family: IPv4 or IPv6
+ */
+enum ipa_ip_type {
+	IPA_IP_v4,
+	IPA_IP_v6,
+	IPA_IP_MAX
+};
+
+/**
+ * enum ipa_flt_action - action field of filtering rule
+ *
+ * Pass to routing: 5'd0
+ * Pass to source NAT: 5'd1
+ * Pass to destination NAT: 5'd2
+ * Pass to default output pipe (e.g., A5): 5'd3
+ */
+enum ipa_flt_action {
+	IPA_PASS_TO_ROUTING,
+	IPA_PASS_TO_SRC_NAT,
+	IPA_PASS_TO_DST_NAT,
+	IPA_PASS_TO_EXCEPTION
+};
+
+/**
+ * struct ipa_rule_attrib - attributes of a routing/filtering
+ * rule, all in LE
+ * @attrib_mask: what attributes are valid
+ * @src_port_lo: low port of src port range
+ * @src_port_hi: high port of src port range
+ * @dst_port_lo: low port of dst port range
+ * @dst_port_hi: high port of dst port range
+ * @type: ICMP/IGMP type
+ * @code: ICMP/IGMP code
+ * @spi: IPSec SPI
+ * @src_port: exact src port
+ * @dst_port: exact dst port
+ * @meta_data: meta-data val
+ * @meta_data_mask: meta-data mask
+ * @u.v4.tos: type of service
+ * @u.v4.protocol: protocol
+ * @u.v4.src_addr: src address value
+ * @u.v4.src_addr_mask: src address mask
+ * @u.v4.dst_addr: dst address value
+ * @u.v4.dst_addr_mask: dst address mask
+ * @u.v6.tc: traffic class
+ * @u.v6.flow_label: flow label
+ * @u.v6.next_hdr: next header
+ * @u.v6.src_addr: src address val
+ * @u.v6.src_addr_mask: src address mask
+ * @u.v6.dst_addr: dst address val
+ * @u.v6.dst_addr_mask: dst address mask
+ */
+struct ipa_rule_attrib {
+	uint32_t attrib_mask;
+	uint16_t src_port_lo;
+	uint16_t src_port_hi;
+	uint16_t dst_port_lo;
+	uint16_t dst_port_hi;
+	uint8_t type;
+	uint8_t code;
+	uint32_t spi;
+	uint16_t src_port;
+	uint16_t dst_port;
+	uint32_t meta_data;
+	uint32_t meta_data_mask;
+	union {
+		struct {
+			uint8_t tos;
+			uint8_t protocol;
+			uint32_t src_addr;
+			uint32_t src_addr_mask;
+			uint32_t dst_addr;
+			uint32_t dst_addr_mask;
+		} v4;
+		struct {
+			uint8_t tc;
+			uint32_t flow_label;
+			uint8_t next_hdr;
+			uint32_t src_addr[4];
+			uint32_t src_addr_mask[4];
+			uint32_t dst_addr[4];
+			uint32_t dst_addr_mask[4];
+		} v6;
+	} u;
+};
+
+/**
+ * struct ipa_flt_rule - attributes of a filtering rule
+ * @action: action field
+ * @rt_tbl_hdl: handle of table from "get"
+ * @attrib: attributes of the rule
+ */
+struct ipa_flt_rule {
+	enum ipa_flt_action action;
+	uint32_t rt_tbl_hdl;
+	struct ipa_rule_attrib attrib;
+};
+
+/**
+ * struct ipa_rt_rule - attributes of a routing rule
+ * @dst: dst "client"
+ * @hdr_hdl: handle to the dynamic header
+	it is not an index or an offset
+ * @attrib: attributes of the rule
+ */
+struct ipa_rt_rule {
+	enum ipa_client_type dst;
+	uint32_t hdr_hdl;
+	struct ipa_rule_attrib attrib;
+};
+
+/**
+ * struct ipa_hdr_add - header descriptor includes in and out
+ * parameters
+ * @name: name of the header
+ * @hdr: actual header to be inserted
+ * @hdr_len: size of above header
+ * @is_partial: header not fully specified
+ * @hdr_hdl: out paramerer, handle to header, valid when status is 0
+ * @status:	out paramerer, status of header add operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_hdr_add {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint8_t hdr[IPA_HDR_MAX_SIZE];
+	uint8_t hdr_len;
+	uint8_t is_partial;
+	uint32_t hdr_hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_add_hdr - header addition parameters (support
+ * multiple headers and commit)
+ * @commit: should headers be written to IPA HW also?
+ * @num_hdrs: num of headers that follow
+ * @ipa_hdr_add hdr:	all headers need to go here back to
+ *			back, no pointers
+ */
+struct ipa_ioc_add_hdr {
+	uint8_t commit;
+	uint8_t num_hdrs;
+	struct ipa_hdr_add hdr[0];
+};
+
+/**
+ * struct ipa_ioc_copy_hdr - retrieve a copy of the specified
+ * header - caller can then derive the complete header
+ * @name: name of the header resource
+ * @hdr:	out parameter, contents of specified header,
+ *	valid only when ioctl return val is non-negative
+ * @hdr_len: out parameter, size of above header
+ *	valid only when ioctl return val is non-negative
+ * @is_partial:	out parameter, indicates whether specified header is partial
+ *		valid only when ioctl return val is non-negative
+ */
+struct ipa_ioc_copy_hdr {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint8_t hdr[IPA_HDR_MAX_SIZE];
+	uint8_t hdr_len;
+	uint8_t is_partial;
+};
+
+/**
+ * struct ipa_ioc_get_hdr - header entry lookup parameters, if lookup was
+ * successful caller must call put to release the reference count when done
+ * @name: name of the header resource
+ * @hdl:	out parameter, handle of header entry
+ *		valid only when ioctl return val is non-negative
+ */
+struct ipa_ioc_get_hdr {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t hdl;
+};
+
+/**
+ * struct ipa_hdr_del - header descriptor includes in and out
+ * parameters
+ *
+ * @hdl: handle returned from header add operation
+ * @status:	out parameter, status of header remove operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_hdr_del {
+	uint32_t hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_del_hdr - header deletion parameters (support
+ * multiple headers and commit)
+ * @commit: should headers be removed from IPA HW also?
+ * @num_hdls: num of headers being removed
+ * @ipa_hdr_del hdl: all handles need to go here back to back, no pointers
+ */
+struct ipa_ioc_del_hdr {
+	uint8_t commit;
+	uint8_t num_hdls;
+	struct ipa_hdr_del hdl[0];
+};
+
+/**
+ * struct ipa_rt_rule_add - routing rule descriptor includes in
+ * and out parameters
+ * @rule: actual rule to be added
+ * @at_rear:	add at back of routing table, it is NOT possible to add rules at
+ *		the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of routing rule add operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_add {
+	struct ipa_rt_rule rule;
+	uint8_t at_rear;
+	uint32_t rt_rule_hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule - routing rule addition parameters (supports
+ * multiple rules and commit);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @ipa_rt_rule_add rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_rt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t num_rules;
+	struct ipa_rt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_rt_rule_del - routing rule descriptor includes in
+ * and out parameters
+ * @hdl: handle returned from route rule add operation
+ * @status:	output parameter, status of route rule delete operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_del {
+	uint32_t hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_del_rt_rule - routing rule deletion parameters (supports
+ * multiple headers and commit)
+ * @commit: should rules be removed from IPA HW also?
+ * @ip: IP family of rules
+ * @num_hdls: num of rules being removed
+ * @ipa_rt_rule_del hdl: all handles need to go back to back here, no pointers
+ */
+struct ipa_ioc_del_rt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	uint8_t num_hdls;
+	struct ipa_rt_rule_del hdl[0];
+};
+
+/**
+ * struct ipa_flt_rule_add - filtering rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @at_rear: add at back of filtering table?
+ * @flt_rule_hdl: out parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of filtering rule add   operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_flt_rule_add {
+	struct ipa_flt_rule rule;
+	uint8_t at_rear;
+	uint32_t flt_rule_hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_add_flt_rule - filtering rule addition parameters (supports
+ * multiple rules and commit)
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @ep:	which "clients" pipe does this rule apply to?
+ *	valid only when global is 0
+ * @global: does this apply to global filter table of specific IP family
+ * @num_rules: number of filtering rules that follow
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_flt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	enum ipa_client_type ep;
+	uint8_t global;
+	uint8_t num_rules;
+	struct ipa_flt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_flt_rule_del - filtering rule descriptor includes
+ * in and out parameters
+ *
+ * @hdl: handle returned from filtering rule add operation
+ * @status:	output parameter, status of filtering rule delete operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_flt_rule_del {
+	uint32_t hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_del_flt_rule - filtering rule deletion parameters (supports
+ * multiple headers and commit)
+ * @commit: should rules be removed from IPA HW also?
+ * @ip: IP family of rules
+ * @num_hdls: num of rules being removed
+ * @hdl: all handles need to go back to back here, no pointers
+ */
+struct ipa_ioc_del_flt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	uint8_t num_hdls;
+	struct ipa_flt_rule_del hdl[0];
+};
+
+/**
+ * struct ipa_ioc_get_rt_tbl - routing table lookup parameters, if lookup was
+ * successful caller must call put to release the reference
+ * count when done
+ * @ip: IP family of table
+ * @name: name of routing table resource
+ * @htl:	output parameter, handle of routing table, valid only when ioctl
+ *		return val is non-negative
+ */
+struct ipa_ioc_get_rt_tbl {
+	enum ipa_ip_type ip;
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t hdl;
+};
+
+/**
+ * struct ipa_ioc_query_intf - used to lookup number of tx and
+ * rx properties of interface
+ * @name: name of interface
+ * @num_tx_props:	output parameter, number of tx properties
+ *			valid only when ioctl return val is non-negative
+ * @num_rx_props:	output parameter, number of rx properties
+ *			valid only when ioctl return val is non-negative
+ */
+struct ipa_ioc_query_intf {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t num_tx_props;
+	uint32_t num_rx_props;
+};
+
+/**
+ * struct ipa_ioc_tx_intf_prop - interface tx property
+ * @ip: IP family of routing rule
+ * @attrib: routing rule
+ * @dst_pipe: routing output pipe
+ * @hdr_name: name of associated header if any, empty string when no header
+ */
+struct ipa_ioc_tx_intf_prop {
+	enum ipa_ip_type ip;
+	struct ipa_rule_attrib attrib;
+	enum ipa_client_type dst_pipe;
+	char hdr_name[IPA_RESOURCE_NAME_MAX];
+};
+
+/**
+ * struct ipa_ioc_query_intf_tx_props - interface tx propertie
+ * @name: name of interface
+ * @tx[0]: output parameter, the tx properties go here back to back
+ */
+struct ipa_ioc_query_intf_tx_props {
+	char name[IPA_RESOURCE_NAME_MAX];
+	struct ipa_ioc_tx_intf_prop tx[0];
+};
+
+/**
+ * struct ipa_ioc_rx_intf_prop - interface rx property
+ * @ip: IP family of filtering rule
+ * @attrib: filtering rule
+ * @src_pipe: input pipe
+ */
+struct ipa_ioc_rx_intf_prop {
+	enum ipa_ip_type ip;
+	struct ipa_rule_attrib attrib;
+	enum ipa_client_type src_pipe;
+};
+
+/**
+ * struct ipa_ioc_query_intf_rx_props - interface rx propertie
+ * @name: name of interface
+ * @rx: output parameter, the rx properties go here back to back
+ */
+struct ipa_ioc_query_intf_rx_props {
+	char name[IPA_RESOURCE_NAME_MAX];
+	struct ipa_ioc_rx_intf_prop rx[0];
+};
+
+/**
+ * struct ipa_ioc_nat_alloc_mem - nat table memory allocation
+ * properties
+ * @dev_name: input parameter, the name of table
+ * @size: input parameter, size of table in bytes
+ * @offset: output parameter, offset into page in case of system memory
+ */
+struct ipa_ioc_nat_alloc_mem {
+	char dev_name[IPA_RESOURCE_NAME_MAX];
+	size_t size;
+	off_t offset;
+};
+
+/**
+ * struct ipa_ioc_v4_nat_init - nat table initialization
+ * parameters
+ * @tbl_index: input parameter, index of the table
+ * @ipv4_rules_offset: input parameter, ipv4 rules address offset
+ * @expn_rules_offset: input parameter, ipv4 expansion rules address offset
+ * @index_offset: input parameter, index rules offset
+ * @index_expn_offset: input parameter, index expansion rules offset
+ * @table_entries: input parameter, ipv4 rules table size in entries
+ * @expn_table_entries: input parameter, ipv4 expansion rules table size
+ * @ip_addr: input parameter, public ip address
+ */
+struct ipa_ioc_v4_nat_init {
+	uint8_t tbl_index;
+	uint32_t ipv4_rules_offset;
+	uint32_t expn_rules_offset;
+
+	uint32_t index_offset;
+	uint32_t index_expn_offset;
+
+	uint16_t table_entries;
+	uint16_t expn_table_entries;
+	uint32_t ip_addr;
+};
+
+/**
+ * struct ipa_ioc_v4_nat_del - nat table delete parameter
+ * @table_index: input parameter, index of the table
+ * @public_ip_addr: input parameter, public ip address
+ */
+struct ipa_ioc_v4_nat_del {
+	uint8_t table_index;
+	uint32_t public_ip_addr;
+};
+
+/**
+ * struct ipa_ioc_nat_dma_one - nat dma command parameter
+ * @table_index: input parameter, index of the table
+ * @base_addr:	type of table, from which the base address of the table
+ *		can be inferred
+ * @offset: destination offset within the NAT table
+ * @data: data to be written.
+ */
+struct ipa_ioc_nat_dma_one {
+	uint8_t table_index;
+	uint8_t base_addr;
+
+	uint32_t offset;
+	uint16_t data;
+
+};
+
+/**
+ * struct ipa_ioc_nat_dma_cmd - To hold multiple nat dma commands
+ * @entries: number of dma commands in use
+ * @dma: data pointer to the dma commands
+ */
+struct ipa_ioc_nat_dma_cmd {
+	uint8_t entries;
+	struct ipa_ioc_nat_dma_one dma[0];
+
+};
+
+/**
+ * struct ipa_msg_meta - Format of the message meta-data.
+ * @msg_type: the type of the message
+ * @msg_len: the length of the message in bytes
+ * @rsvd: reserved bits for future use.
+ *
+ * Client in user-space should issue a read on the device (/dev/ipa) with a
+ * buffer of atleast this size in an continuous loop, call will block when there
+ * is no pending async message.
+ *
+ * After reading a message's meta-data using above scheme, client should issue a
+ * GET_MSG IOCTL to actually read the message itself into the buffer of
+ * "msg_len" immediately following the ipa_msg_meta itself in the IOCTL payload
+ */
+struct ipa_msg_meta {
+	uint8_t msg_type;
+	uint16_t msg_len;
+	uint8_t rsvd;
+};
+
+/**
+ *   actual IOCTLs supported by IPA driver
+ */
+#define IPA_IOC_ADD_HDR _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_HDR, \
+					struct ipa_ioc_add_hdr *)
+#define IPA_IOC_DEL_HDR _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_HDR, \
+					struct ipa_ioc_del_hdr *)
+#define IPA_IOC_ADD_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE, \
+					struct ipa_ioc_add_rt_rule *)
+#define IPA_IOC_DEL_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_RT_RULE, \
+					struct ipa_ioc_del_rt_rule *)
+#define IPA_IOC_ADD_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_FLT_RULE, \
+					struct ipa_ioc_add_flt_rule *)
+#define IPA_IOC_DEL_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_FLT_RULE, \
+					struct ipa_ioc_del_flt_rule *)
+#define IPA_IOC_COMMIT_HDR _IO(IPA_IOC_MAGIC,\
+					IPA_IOCTL_COMMIT_HDR)
+#define IPA_IOC_RESET_HDR _IO(IPA_IOC_MAGIC,\
+					IPA_IOCTL_RESET_HDR)
+#define IPA_IOC_COMMIT_RT _IOW(IPA_IOC_MAGIC, \
+					IPA_IOCTL_COMMIT_RT, \
+					enum ipa_ip_type)
+#define IPA_IOC_RESET_RT _IOW(IPA_IOC_MAGIC, \
+					IPA_IOCTL_RESET_RT, \
+					enum ipa_ip_type)
+#define IPA_IOC_COMMIT_FLT _IOW(IPA_IOC_MAGIC, \
+					IPA_IOCTL_COMMIT_FLT, \
+					enum ipa_ip_type)
+#define IPA_IOC_RESET_FLT _IOW(IPA_IOC_MAGIC, \
+			IPA_IOCTL_RESET_FLT, \
+			enum ipa_ip_type)
+#define IPA_IOC_DUMP _IO(IPA_IOC_MAGIC, \
+			IPA_IOCTL_DUMP)
+#define IPA_IOC_GET_RT_TBL _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_RT_TBL, \
+				struct ipa_ioc_get_rt_tbl *)
+#define IPA_IOC_PUT_RT_TBL _IOW(IPA_IOC_MAGIC, \
+				IPA_IOCTL_PUT_RT_TBL, \
+				uint32_t)
+#define IPA_IOC_COPY_HDR _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_COPY_HDR, \
+				struct ipa_ioc_copy_hdr *)
+#define IPA_IOC_QUERY_INTF _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF, \
+				struct ipa_ioc_query_intf *)
+#define IPA_IOC_QUERY_INTF_TX_PROPS _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF_TX_PROPS, \
+				struct ipa_ioc_query_intf_tx_props *)
+#define IPA_IOC_QUERY_INTF_RX_PROPS _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_QUERY_INTF_RX_PROPS, \
+					struct ipa_ioc_query_intf_rx_props *)
+#define IPA_IOC_GET_HDR _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_HDR, \
+				struct ipa_ioc_get_hdr *)
+#define IPA_IOC_PUT_HDR _IOW(IPA_IOC_MAGIC, \
+				IPA_IOCTL_PUT_HDR, \
+				uint32_t)
+#define IPA_IOC_ALLOC_NAT_MEM _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ALLOC_NAT_MEM, \
+				struct ipa_ioc_nat_alloc_mem *)
+#define IPA_IOC_V4_INIT_NAT _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_INIT_NAT, \
+				struct ipa_ioc_v4_nat_init *)
+#define IPA_IOC_NAT_DMA _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NAT_DMA, \
+				struct ipa_ioc_nat_dma_cmd *)
+#define IPA_IOC_V4_DEL_NAT _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_DEL_NAT, \
+				struct ipa_ioc_v4_nat_del *)
+#define IPA_IOC_GET_NAT_OFFSET _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_NAT_OFFSET, \
+				uint32_t *)
+#define IPA_IOC_SET_FLT _IOW(IPA_IOC_MAGIC, \
+			IPA_IOCTL_SET_FLT, \
+			uint32_t)
+#define IPA_IOC_GET_ASYNC_MSG _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_ASYNC_MSG, \
+				struct ipa_msg_meta *)
+
+#endif /* _MSM_IPA_H_ */