msm: ipa: initial commit of IPA driver
This is a snapshot of IPA from kernel msm-4.4 based on
commit ebc2a18351d4 ("msm: ipa: WA to get PA of sgt_tbl from wlan")
CRs-Fixed: 1077422
Change-Id: I97cf9ee9c104ac5ab5bc0577eb9413264b08a7a5
Signed-off-by: Amir Levy <alevy@codeaurora.org>
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
new file mode 100644
index 0000000..8c0b9c6
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -0,0 +1,207 @@
+Qualcomm technologies inc. Internet Packet Accelerator
+
+Internet Packet Accelerator (IPA) is a programmable protocol
+processor HW block. It is designed to support generic HW processing
+of UL/DL IP packets for various use cases independent of radio technology.
+
+Required properties:
+
+IPA node:
+
+- compatible : "qcom,ipa"
+- reg: Specifies the base physical addresses and the sizes of the IPA
+ registers.
+- reg-names: "ipa-base" - string to identify the IPA CORE base registers.
+ "bam-base" - string to identify the IPA BAM base registers.
+ "a2-bam-base" - string to identify the A2 BAM base registers.
+- interrupts: Specifies the interrupt associated with IPA.
+- interrupt-names: "ipa-irq" - string to identify the IPA core interrupt.
+ "bam-irq" - string to identify the IPA BAM interrupt.
+ "a2-bam-irq" - string to identify the A2 BAM interrupt.
+- qcom,ipa-hw-ver: Specifies the IPA hardware version.
+
+Optional:
+
+- qcom,wan-rx-ring-size: size of WAN rx ring, default is 32
+- qcom,arm-smmu: SMMU is present and ARM SMMU driver is used
+- qcom,msm-smmu: SMMU is present and QSMMU driver is used
+- qcom,smmu-s1-bypass: Boolean context flag to set SMMU to S1 bypass
+- qcom,smmu-fast-map: Boolean context flag to set SMMU to fastpath mode
+- ipa_smmu_ap: AP general purpose SMMU device
+ compatible "qcom,ipa-smmu-ap-cb"
+- ipa_smmu_wlan: WDI SMMU device
+ compatible "qcom,ipa-smmu-wlan-cb"
+- ipa_smmu_uc: uc SMMU device
+ compatible "qcom,ipa-smmu-uc-cb"
+- qcom,smmu-disable-htw: boolean value to turn off SMMU page table caching
+- qcom,use-a2-service: determine if A2 service will be used
+- qcom,use-ipa-tethering-bridge: determine if tethering bridge will be used
+- qcom,use-ipa-bamdma-a2-bridge: determine if a2/ipa hw bridge will be used
+- qcom,ee: which EE is assigned to (non-secure) APPS from IPA-BAM POV. This
+is a number
+- qcom,ipa-hw-mode: IPA hardware mode - Normal, Virtual memory allocation,
+memory allocation over a PCIe bridge
+- qcom,msm-bus,name: String representing the client-name
+- qcom,msm-bus,num-cases: Total number of usecases
+- qcom,msm-bus,active-only: Boolean context flag for requests in active or
+ dual (active & sleep) contex
+- qcom,msm-bus,num-paths: Total number of master-slave pairs
+- qcom,msm-bus,vectors-KBps: Arrays of unsigned integers representing:
+ master-id, slave-id, arbitrated bandwidth
+ in KBps, instantaneous bandwidth in KBps
+- qcom,ipa-bam-remote-mode: Boolean context flag to determine if ipa bam
+ is in remote mode.
+- qcom,modem-cfg-emb-pipe-flt: Boolean context flag to determine if modem
+ configures embedded pipe filtering rules
+- qcom,skip-uc-pipe-reset: Boolean context flag to indicate whether
+ a pipe reset via the IPA uC is required
+- qcom,ipa-wdi2: Boolean context flag to indicate whether
+ using wdi-2.0 or not
+- qcom,use-dma-zone: Boolean context flag to indicate whether memory
+ allocations controlled by IPA driver that do not
+ specify a struct device * should use GFP_DMA to
+ workaround IPA HW limitations
+- qcom,use-gsi: Boolean context flag to indicate if the
+ transport protocol is GSI
+- qcom,use-rg10-limitation-mitigation: Boolean context flag to activate
+ the mitigation to register group 10
+ AP access limitation
+- qcom,do-not-use-ch-gsi-20: Boolean context flag to activate
+ software workaround for IPA limitation
+ to not use GSI physical channel 20
+- qcom,tethered-flow-control: Boolean context flag to indicate whether
+ apps based flow control is needed for tethered
+ call.
+IPA pipe sub nodes (A2 static pipes configurations):
+
+-label: two labels are supported, a2-to-ipa and ipa-to-a2 which
+supply static configuration for A2-IPA connection.
+-qcom,src-bam-physical-address: The physical address of the source BAM
+-qcom,ipa-bam-mem-type:The memory type:
+ 0(Pipe memory), 1(Private memory), 2(System memory)
+-qcom,src-bam-pipe-index: Source pipe index
+-qcom,dst-bam-physical-address: The physical address of the
+ destination BAM
+-qcom,dst-bam-pipe-index: Destination pipe index
+-qcom,data-fifo-offset: Data fifo base offset
+-qcom,data-fifo-size: Data fifo size (bytes)
+-qcom,descriptor-fifo-offset: Descriptor fifo base offset
+-qcom,descriptor-fifo-size: Descriptor fifo size (bytes)
+
+Optional properties:
+-qcom,ipa-pipe-mem: Specifies the base physical address and the
+ size of the IPA pipe memory region.
+ Pipe memory is a feature which may be supported by the
+ target (HW platform). The Driver support using pipe
+ memory instead of system memory. In case this property
+ will not appear in the IPA DTS entry, the driver will
+ use system memory.
+- clocks: This property shall provide a list of entries each of which
+ contains a phandle to clock controller device and a macro that is
+ the clock's name in hardware.This should be "clock_rpm" as clock
+ controller phandle and "clk_ipa_clk" as macro for "iface_clk"
+- clock-names: This property shall contain the clock input names used
+ by driver in same order as the clocks property.This should be "iface_clk"
+
+IPA SMMU sub nodes
+
+-compatible: "qcom,ipa-smmu-ap-cb" - represents the AP context bank.
+
+-compatible: "qcom,ipa-smmu-wlan-cb" - represents IPA WLAN context bank.
+
+-compatible: "qcom,ipa-smmu-uc-cb" - represents IPA uC context bank (for uC
+ offload scenarios).
+- iommus : the phandle and stream IDs for the SMMU used by this root
+
+- qcom,iova-mapping: specifies the start address and size of iova space.
+
+IPA SMP2P sub nodes
+
+-compatible: "qcom,smp2pgpio-map-ipa-1-out" - represents the out gpio from
+ ipa driver to modem.
+
+-compatible: "qcom,smp2pgpio-map-ipa-1-in" - represents the in gpio to
+ ipa driver from modem.
+
+-gpios: Binding to the gpio defined in XXX-smp2p.dtsi
+
+
+Example:
+
+qcom,ipa@fd4c0000 {
+ compatible = "qcom,ipa";
+ reg = <0xfd4c0000 0x26000>,
+ <0xfd4c4000 0x14818>;
+ <0xfc834000 0x7000>;
+ reg-names = "ipa-base", "bam-base"; "a2-bam-base";
+ interrupts = <0 252 0>,
+ <0 253 0>;
+ <0 29 1>;
+ interrupt-names = "ipa-irq", "bam-irq"; "a2-bam-irq";
+ qcom,ipa-hw-ver = <1>;
+ clocks = <&clock_rpm clk_ipa_clk>;
+ clock-names = "iface_clk";
+
+ qcom,msm-bus,name = "ipa";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <2>;
+ qcom,msm-bus,vectors-KBps =
+ <90 512 0 0>, <90 585 0 0>, /* No vote */
+ <90 512 100000 800000>, <90 585 100000 800000>, /* SVS */
+ <90 512 100000 1200000>, <90 585 100000 1200000>; /* PERF */
+ qcom,bus-vector-names = "MIN", "SVS", "PERF";
+
+ qcom,pipe1 {
+ label = "a2-to-ipa";
+ qcom,src-bam-physical-address = <0xfc834000>;
+ qcom,ipa-bam-mem-type = <0>;
+ qcom,src-bam-pipe-index = <1>;
+ qcom,dst-bam-physical-address = <0xfd4c0000>;
+ qcom,dst-bam-pipe-index = <6>;
+ qcom,data-fifo-offset = <0x1000>;
+ qcom,data-fifo-size = <0xd00>;
+ qcom,descriptor-fifo-offset = <0x1d00>;
+ qcom,descriptor-fifo-size = <0x300>;
+ };
+
+ qcom,pipe2 {
+ label = "ipa-to-a2";
+ qcom,src-bam-physical-address = <0xfd4c0000>;
+ qcom,ipa-bam-mem-type = <0>;
+ qcom,src-bam-pipe-index = <7>;
+ qcom,dst-bam-physical-address = <0xfc834000>;
+ qcom,dst-bam-pipe-index = <0>;
+ qcom,data-fifo-offset = <0x00>;
+ qcom,data-fifo-size = <0xd00>;
+ qcom,descriptor-fifo-offset = <0xd00>;
+ qcom,descriptor-fifo-size = <0x300>;
+ };
+
+ /* smp2p gpio information */
+ qcom,smp2pgpio_map_ipa_1_out {
+ compatible = "qcom,smp2pgpio-map-ipa-1-out";
+ gpios = <&smp2pgpio_ipa_1_out 0 0>;
+ };
+
+ qcom,smp2pgpio_map_ipa_1_in {
+ compatible = "qcom,smp2pgpio-map-ipa-1-in";
+ gpios = <&smp2pgpio_ipa_1_in 0 0>;
+ };
+
+ ipa_smmu_ap: ipa_smmu_ap {
+ compatible = "qcom,ipa-smmu-ap-cb";
+ iommus = <&anoc2_smmu 0x30>;
+ qcom,iova-mapping = <0x10000000 0x40000000>;
+ };
+
+ ipa_smmu_wlan: ipa_smmu_wlan {
+ compatible = "qcom,ipa-smmu-wlan-cb";
+ iommus = <&anoc2_smmu 0x31>;
+ };
+
+ ipa_smmu_uc: ipa_smmu_uc {
+ compatible = "qcom,ipa-smmu-uc-cb";
+ iommus = <&anoc2_smmu 0x32>;
+ qcom,iova-mapping = <0x40000000 0x20000000>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
new file mode 100644
index 0000000..c7024e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
@@ -0,0 +1,18 @@
+* Qualcomm Technologies, Inc. RmNet IPA driver module
+
+This module enables embedded data calls using IPA HW.
+
+Required properties:
+- compatible: Must be "qcom,rmnet-ipa"
+
+Optional:
+- qcom,rmnet-ipa-ssr: determine if modem SSR is supported
+- qcom,ipa-loaduC: indicate that ipa uC should be loaded
+- qcom,ipa-advertise-sg-support: determine how to respond to a query
+regarding scatter-gather capability
+
+Example:
+ qcom,rmnet-ipa {
+ compatible = "qcom,rmnet-ipa";
+ }
+
diff --git a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
new file mode 100644
index 0000000..3f55312
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
@@ -0,0 +1,18 @@
+* Qualcomm Technologies, Inc. RmNet IPA driver module
+
+This module enables embedded data calls using IPA v3 HW.
+
+Required properties:
+- compatible: Must be "qcom,rmnet-ipa3"
+
+Optional:
+- qcom,rmnet-ipa-ssr: determine if modem SSR is supported
+- qcom,ipa-loaduC: indicate that ipa uC should be loaded
+- qcom,ipa-advertise-sg-support: determine how to respond to a query
+regarding scatter-gather capability
+
+Example:
+ qcom,rmnet-ipa3 {
+ compatible = "qcom,rmnet-ipa3";
+ }
+
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index c11db8b..e29f6c2 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -8,3 +8,5 @@
source "drivers/platform/goldfish/Kconfig"
source "drivers/platform/chrome/Kconfig"
+
+source "drivers/platform/msm/Kconfig"
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index ca26925..3fef6b2 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -7,3 +7,4 @@
obj-$(CONFIG_OLPC) += olpc/
obj-$(CONFIG_GOLDFISH) += goldfish/
obj-$(CONFIG_CHROME_PLATFORMS) += chrome/
+obj-$(CONFIG_ARCH_QCOM) += msm/
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
new file mode 100644
index 0000000..ac9545e
--- /dev/null
+++ b/drivers/platform/msm/Kconfig
@@ -0,0 +1,68 @@
+menu "Qualcomm technologies inc. MSM specific device drivers"
+ depends on ARCH_QCOM
+
+config IPA
+ tristate "IPA support"
+ depends on SPS && NET
+ help
+ This driver supports the Internet Packet Accelerator (IPA) core.
+ IPA is a programmable protocol processor HW block.
+ It is designed to support generic HW processing of UL/DL IP packets
+ for various use cases independent of radio technology.
+ The driver support client connection and configuration
+ for the IPA core.
+ Kernel and user-space processes can call the IPA driver
+ to configure IPA core.
+
+config RMNET_IPA
+ tristate "IPA RMNET WWAN Network Device"
+ depends on IPA && MSM_QMI_INTERFACE
+ help
+ This WWAN Network Driver implements network stack class device.
+ It supports Embedded data transfer from A7 to Q6. Configures IPA HW
+ for RmNet Data Driver and also exchange of QMI messages between
+ A7 and Q6 IPA-driver.
+
+config GSI
+ bool "GSI support"
+ help
+ This driver provides the transport needed to talk to the
+ IPA core. It replaces the BAM transport used previously.
+
+ The GSI connects to a peripheral component via uniform TLV
+ interface, and allows it to interface with other peripherals
+ and CPUs over various types of interfaces such as MHI, xDCI,
+ xHCI, GPI, WDI, Ethernet, etc.
+
+config IPA3
+ tristate "IPA3 support"
+ depends on GSI && NET
+ help
+ This driver supports the Internet Packet Accelerator (IPA3) core.
+ IPA is a programmable protocol processor HW block.
+ It is designed to support generic HW processing of UL/DL IP packets
+ for various use cases independent of radio technology.
+ The driver support client connection and configuration
+ for the IPA core.
+ Kernel and user-space processes can call the IPA driver
+ to configure IPA core.
+
+config RMNET_IPA3
+ tristate "IPA3 RMNET WWAN Network Device"
+ depends on IPA3 && MSM_QMI_INTERFACE
+ help
+ This WWAN Network Driver implements network stack class device.
+ It supports Embedded data transfer from A7 to Q6. Configures IPA HW
+ for RmNet Data Driver and also exchange of QMI messages between
+ A7 and Q6 IPA-driver.
+
+config IPA_UT
+ tristate "IPA Unit-Test Framework and Test Suites"
+ depends on IPA3 && DEBUG_FS
+ help
+ This Module implements IPA in-kernel test framework.
+ The framework supports defining and running tests, grouped
+ into suites according to the sub-unit of the IPA being tested.
+ The user interface to run and control the tests is debugfs file
+ system.
+endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
new file mode 100644
index 0000000..1f9e11b
--- /dev/null
+++ b/drivers/platform/msm/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the MSM specific device drivers.
+#
+obj-$(CONFIG_GSI) += gsi/
+obj-$(CONFIG_IPA) += ipa/
+obj-$(CONFIG_IPA3) += ipa/
diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile
new file mode 100644
index 0000000..15ed471
--- /dev/null
+++ b/drivers/platform/msm/ipa/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_IPA) += ipa_v2/ ipa_clients/ ipa_common
+obj-$(CONFIG_IPA3) += ipa_v3/ ipa_clients/ ipa_common
+obj-$(CONFIG_IPA_UT) += test/
+
+ipa_common += ipa_api.o ipa_rm.o ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
new file mode 100644
index 0000000..8010561
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -0,0 +1,2931 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/ipa_uc_offload.h>
+#include "ipa_api.h"
+
+#define DRV_NAME "ipa"
+
+#define IPA_API_DISPATCH_RETURN(api, p...) \
+ do { \
+ if (!ipa_api_ctrl) { \
+ pr_err("IPA HW is not supported on this target\n"); \
+ ret = -EPERM; \
+ } \
+ else { \
+ if (ipa_api_ctrl->api) { \
+ ret = ipa_api_ctrl->api(p); \
+ } else { \
+ pr_err("%s not implemented for IPA ver %d\n", \
+ __func__, ipa_api_hw_type); \
+ WARN_ON(1); \
+ ret = -EPERM; \
+ } \
+ } \
+ } while (0)
+
+#define IPA_API_DISPATCH(api, p...) \
+ do { \
+ if (!ipa_api_ctrl) \
+ pr_err("IPA HW is not supported on this target\n"); \
+ else { \
+ if (ipa_api_ctrl->api) { \
+ ipa_api_ctrl->api(p); \
+ } else { \
+ pr_err("%s not implemented for IPA ver %d\n", \
+ __func__, ipa_api_hw_type); \
+ WARN_ON(1); \
+ } \
+ } \
+ } while (0)
+
+#define IPA_API_DISPATCH_RETURN_PTR(api, p...) \
+ do { \
+ if (!ipa_api_ctrl) { \
+ pr_err("IPA HW is not supported on this target\n"); \
+ ret = NULL; \
+ } \
+ else { \
+ if (ipa_api_ctrl->api) { \
+ ret = ipa_api_ctrl->api(p); \
+ } else { \
+ pr_err("%s not implemented for IPA ver %d\n", \
+ __func__, ipa_api_hw_type); \
+ WARN_ON(1); \
+ ret = NULL; \
+ } \
+ } \
+ } while (0)
+
+#define IPA_API_DISPATCH_RETURN_BOOL(api, p...) \
+ do { \
+ if (!ipa_api_ctrl) { \
+ pr_err("IPA HW is not supported on this target\n"); \
+ ret = false; \
+ } \
+ else { \
+ if (ipa_api_ctrl->api) { \
+ ret = ipa_api_ctrl->api(p); \
+ } else { \
+ pr_err("%s not implemented for IPA ver %d\n", \
+ __func__, ipa_api_hw_type); \
+ WARN_ON(1); \
+ ret = false; \
+ } \
+ } \
+ } while (0)
+
+static enum ipa_hw_type ipa_api_hw_type;
+static struct ipa_api_controller *ipa_api_ctrl;
+
+const char *ipa_clients_strings[IPA_CLIENT_MAX] = {
+ __stringify(IPA_CLIENT_HSIC1_PROD),
+ __stringify(IPA_CLIENT_WLAN1_PROD),
+ __stringify(IPA_CLIENT_HSIC2_PROD),
+ __stringify(IPA_CLIENT_USB2_PROD),
+ __stringify(IPA_CLIENT_HSIC3_PROD),
+ __stringify(IPA_CLIENT_USB3_PROD),
+ __stringify(IPA_CLIENT_HSIC4_PROD),
+ __stringify(IPA_CLIENT_USB4_PROD),
+ __stringify(IPA_CLIENT_HSIC5_PROD),
+ __stringify(IPA_CLIENT_USB_PROD),
+ __stringify(IPA_CLIENT_A5_WLAN_AMPDU_PROD),
+ __stringify(IPA_CLIENT_A2_EMBEDDED_PROD),
+ __stringify(IPA_CLIENT_A2_TETHERED_PROD),
+ __stringify(IPA_CLIENT_APPS_LAN_WAN_PROD),
+ __stringify(IPA_CLIENT_APPS_CMD_PROD),
+ __stringify(IPA_CLIENT_ODU_PROD),
+ __stringify(IPA_CLIENT_MHI_PROD),
+ __stringify(IPA_CLIENT_Q6_LAN_PROD),
+ __stringify(IPA_CLIENT_Q6_WAN_PROD),
+ __stringify(IPA_CLIENT_Q6_CMD_PROD),
+ __stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD),
+ __stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD),
+ __stringify(IPA_CLIENT_Q6_DECOMP_PROD),
+ __stringify(IPA_CLIENT_Q6_DECOMP2_PROD),
+ __stringify(IPA_CLIENT_UC_USB_PROD),
+
+ /* Below PROD client type is only for test purpose */
+ __stringify(IPA_CLIENT_TEST_PROD),
+ __stringify(IPA_CLIENT_TEST1_PROD),
+ __stringify(IPA_CLIENT_TEST2_PROD),
+ __stringify(IPA_CLIENT_TEST3_PROD),
+ __stringify(IPA_CLIENT_TEST4_PROD),
+
+ __stringify(IPA_CLIENT_HSIC1_CONS),
+ __stringify(IPA_CLIENT_WLAN1_CONS),
+ __stringify(IPA_CLIENT_HSIC2_CONS),
+ __stringify(IPA_CLIENT_USB2_CONS),
+ __stringify(IPA_CLIENT_WLAN2_CONS),
+ __stringify(IPA_CLIENT_HSIC3_CONS),
+ __stringify(IPA_CLIENT_USB3_CONS),
+ __stringify(IPA_CLIENT_WLAN3_CONS),
+ __stringify(IPA_CLIENT_HSIC4_CONS),
+ __stringify(IPA_CLIENT_USB4_CONS),
+ __stringify(IPA_CLIENT_WLAN4_CONS),
+ __stringify(IPA_CLIENT_HSIC5_CONS),
+ __stringify(IPA_CLIENT_USB_CONS),
+ __stringify(IPA_CLIENT_USB_DPL_CONS),
+ __stringify(IPA_CLIENT_A2_EMBEDDED_CONS),
+ __stringify(IPA_CLIENT_A2_TETHERED_CONS),
+ __stringify(IPA_CLIENT_A5_LAN_WAN_CONS),
+ __stringify(IPA_CLIENT_APPS_LAN_CONS),
+ __stringify(IPA_CLIENT_APPS_WAN_CONS),
+ __stringify(IPA_CLIENT_ODU_EMB_CONS),
+ __stringify(IPA_CLIENT_ODU_TETH_CONS),
+ __stringify(IPA_CLIENT_MHI_CONS),
+ __stringify(IPA_CLIENT_Q6_LAN_CONS),
+ __stringify(IPA_CLIENT_Q6_WAN_CONS),
+ __stringify(IPA_CLIENT_Q6_DUN_CONS),
+ __stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS),
+ __stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS),
+ __stringify(IPA_CLIENT_Q6_DECOMP_CONS),
+ __stringify(IPA_CLIENT_Q6_DECOMP2_CONS),
+ __stringify(IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS),
+ /* Below CONS client type is only for test purpose */
+ __stringify(IPA_CLIENT_TEST_CONS),
+ __stringify(IPA_CLIENT_TEST1_CONS),
+ __stringify(IPA_CLIENT_TEST2_CONS),
+ __stringify(IPA_CLIENT_TEST3_CONS),
+ __stringify(IPA_CLIENT_TEST4_CONS),
+};
+
+/**
+ * ipa_write_64() - convert 64 bit value to byte array
+ * @w: 64 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_64(u64 w, u8 *dest)
+{
+ if (unlikely(dest == NULL)) {
+ pr_err("ipa_write_64: NULL address!\n");
+ return dest;
+ }
+ *dest++ = (u8)((w) & 0xFF);
+ *dest++ = (u8)((w >> 8) & 0xFF);
+ *dest++ = (u8)((w >> 16) & 0xFF);
+ *dest++ = (u8)((w >> 24) & 0xFF);
+ *dest++ = (u8)((w >> 32) & 0xFF);
+ *dest++ = (u8)((w >> 40) & 0xFF);
+ *dest++ = (u8)((w >> 48) & 0xFF);
+ *dest++ = (u8)((w >> 56) & 0xFF);
+
+ return dest;
+}
+
+/**
+ * ipa_write_32() - convert 32 bit value to byte array
+ * @w: 32 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_32(u32 w, u8 *dest)
+{
+ if (unlikely(dest == NULL)) {
+ pr_err("ipa_write_32: NULL address!\n");
+ return dest;
+ }
+ *dest++ = (u8)((w) & 0xFF);
+ *dest++ = (u8)((w >> 8) & 0xFF);
+ *dest++ = (u8)((w >> 16) & 0xFF);
+ *dest++ = (u8)((w >> 24) & 0xFF);
+
+ return dest;
+}
+
+/**
+ * ipa_write_16() - convert 16 bit value to byte array
+ * @hw: 16 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_16(u16 hw, u8 *dest)
+{
+ if (unlikely(dest == NULL)) {
+ pr_err("ipa_write_16: NULL address!\n");
+ return dest;
+ }
+ *dest++ = (u8)((hw) & 0xFF);
+ *dest++ = (u8)((hw >> 8) & 0xFF);
+
+ return dest;
+}
+
+/**
+ * ipa_write_8() - convert 8 bit value to byte array
+ * @hw: 8 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_8(u8 b, u8 *dest)
+{
+ if (unlikely(dest == NULL)) {
+ pr_err("ipa_write_8: NULL address!\n");
+ return dest;
+ }
+ *dest++ = (b) & 0xFF;
+
+ return dest;
+}
+
+/**
+ * ipa_pad_to_64() - pad byte array to 64 bit value
+ * @dest: byte array
+ *
+ * Return value: padded value
+ */
+u8 *ipa_pad_to_64(u8 *dest)
+{
+ int i = (long)dest & 0x7;
+ int j;
+
+ if (i)
+ for (j = 0; j < (8 - i); j++)
+ *dest++ = 0;
+
+ return dest;
+}
+
+/**
+ * ipa_pad_to_32() - pad byte array to 32 bit value
+ * @dest: byte array
+ *
+ * Return value: padded value
+ */
+u8 *ipa_pad_to_32(u8 *dest)
+{
+ int i = (long)dest & 0x3;
+ int j;
+
+ if (i)
+ for (j = 0; j < (4 - i); j++)
+ *dest++ = 0;
+
+ return dest;
+}
+
+/**
+ * ipa_connect() - low-level IPA client connect
+ * @in: [in] input parameters from client
+ * @sps: [out] sps output from IPA needed by client for sps_connect
+ * @clnt_hdl: [out] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to connect to
+ * IPA in BAM-BAM mode. these peripherals are USB and HSIC. this api
+ * expects caller to take responsibility to add any needed headers, routing
+ * and filtering tables and rules as needed.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps,
+ u32 *clnt_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_connect, in, sps, clnt_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_connect);
+
+/**
+ * ipa_disconnect() - low-level IPA client disconnect
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to disconnect
+ * from IPA in BAM-BAM mode. this api expects caller to take responsibility to
+ * free any needed headers, routing and filtering tables and rules as needed.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_disconnect(u32 clnt_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_disconnect, clnt_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_disconnect);
+
+/**
+* ipa_clear_endpoint_delay() - Clear ep_delay.
+* @clnt_hdl: [in] IPA client handle
+*
+* Returns: 0 on success, negative on failure
+*
+* Note: Should not be called from atomic context
+*/
+int ipa_clear_endpoint_delay(u32 clnt_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_clear_endpoint_delay, clnt_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_clear_endpoint_delay);
+
+/**
+* ipa_reset_endpoint() - reset an endpoint from BAM perspective
+* @clnt_hdl: [in] IPA client handle
+*
+* Returns: 0 on success, negative on failure
+*
+* Note: Should not be called from atomic context
+*/
+int ipa_reset_endpoint(u32 clnt_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_reset_endpoint, clnt_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_reset_endpoint);
+
+/**
+* ipa_disable_endpoint() - Disable an endpoint from IPA perspective
+* @clnt_hdl: [in] IPA client handle
+*
+* Returns: 0 on success, negative on failure
+*
+* Note: Should not be called from atomic context
+*/
+int ipa_disable_endpoint(u32 clnt_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_disable_endpoint, clnt_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_disable_endpoint);
+
+
+/**
+ * ipa_cfg_ep - IPA end-point configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * This includes nat, header, mode, aggregation and route settings and is a one
+ * shot API to configure the IPA end-point fully
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_cfg_ep, clnt_hdl, ipa_ep_cfg);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep);
+
+/**
+ * ipa_cfg_ep_nat() - IPA end-point NAT configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_cfg_ep_nat, clnt_hdl, ep_nat);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_nat);
+
+/**
+ * ipa_cfg_ep_hdr() - IPA end-point header configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_cfg_ep_hdr, clnt_hdl, ep_hdr);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_hdr);
+
+/**
+ * ipa_cfg_ep_hdr_ext() - IPA end-point extended header configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ep_hdr_ext: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_hdr_ext(u32 clnt_hdl,
+ const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_cfg_ep_hdr_ext, clnt_hdl, ep_hdr_ext);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_hdr_ext);
+
+/**
+ * ipa_cfg_ep_mode() - IPA end-point mode configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_cfg_ep_mode, clnt_hdl, ep_mode);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_mode);
+
+/**
+ * ipa_cfg_ep_aggr() - IPA end-point aggregation configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_cfg_ep_aggr, clnt_hdl, ep_aggr);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_aggr);
+
+/**
+ * ipa_cfg_ep_deaggr() - IPA end-point deaggregation configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ep_deaggr: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_deaggr(u32 clnt_hdl,
+ const struct ipa_ep_cfg_deaggr *ep_deaggr)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_cfg_ep_deaggr, clnt_hdl, ep_deaggr);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_deaggr);
+
+/**
+ * ipa_cfg_ep_route() - IPA end-point routing configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_cfg_ep_route, clnt_hdl, ep_route);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_route);
+
+/**
+ * ipa_cfg_ep_holb() - IPA end-point holb configuration
+ *
+ * If an IPA producer pipe is full, IPA HW by default will block
+ * indefinitely till space opens up. During this time no packets
+ * including those from unrelated pipes will be processed. Enabling
+ * HOLB means IPA HW will be allowed to drop packets as/when needed
+ * and indefinite blocking is avoided.
+ *
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_cfg_ep_holb, clnt_hdl, ep_holb);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_holb);
+
+
+/**
+ * ipa_cfg_ep_cfg() - IPA end-point cfg configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_cfg_ep_cfg, clnt_hdl, cfg);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_cfg);
+
+/**
+ * ipa_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_metadata_mask(u32 clnt_hdl, const struct ipa_ep_cfg_metadata_mask
+ *metadata_mask)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_cfg_ep_metadata_mask, clnt_hdl,
+ metadata_mask);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_metadata_mask);
+
+/**
+ * ipa_cfg_ep_holb_by_client() - IPA end-point holb configuration
+ *
+ * Wrapper function for ipa_cfg_ep_holb() with client name instead of
+ * client handle. This function is used for clients that does not have
+ * client handle.
+ *
+ * @client: [in] client name
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_cfg_ep_holb_by_client(enum ipa_client_type client,
+ const struct ipa_ep_cfg_holb *ep_holb)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_cfg_ep_holb_by_client, client, ep_holb);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_holb_by_client);
+
+/**
+ * ipa_cfg_ep_ctrl() - IPA end-point Control configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg_ctrl: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_cfg_ep_ctrl, clnt_hdl, ep_ctrl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_ctrl);
+
+/**
+ * ipa_add_hdr() - add the specified headers to SW and optionally commit them to
+ * IPA HW
+ * @hdrs: [inout] set of headers to add
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_add_hdr, hdrs);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_add_hdr);
+
+/**
+ * ipa_del_hdr() - Remove the specified headers from SW and optionally
+ * commit them to IPA HW
+ * @hdls: [inout] set of headers to delete
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_del_hdr, hdls);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_del_hdr);
+
+/**
+ * ipa_commit_hdr() - commit to IPA HW the current header table in SW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_commit_hdr(void)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_commit_hdr);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_commit_hdr);
+
+/**
+ * ipa_reset_hdr() - reset the current header table in SW (does not commit to
+ * HW)
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_reset_hdr(void)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_reset_hdr);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_reset_hdr);
+
+/**
+ * ipa_get_hdr() - Lookup the specified header resource
+ * @lookup: [inout] header to lookup and its handle
+ *
+ * lookup the specified header resource and return handle if it exists
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ * Caller should call ipa_put_hdr later if this function succeeds
+ */
+int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_get_hdr, lookup);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_get_hdr);
+
+/**
+ * ipa_put_hdr() - Release the specified header handle
+ * @hdr_hdl: [in] the header handle to release
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_put_hdr(u32 hdr_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_put_hdr, hdr_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_put_hdr);
+
+/**
+ * ipa_copy_hdr() - Lookup the specified header resource and return a copy of it
+ * @copy: [inout] header to lookup and its copy
+ *
+ * lookup the specified header resource and return a copy of it (along with its
+ * attributes) if it exists, this would be called for partial headers
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_copy_hdr, copy);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_copy_hdr);
+
+/**
+ * ipa_add_hdr_proc_ctx() - add the specified headers to SW
+ * and optionally commit them to IPA HW
+ * @proc_ctxs: [inout] set of processing context headers to add
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_add_hdr_proc_ctx, proc_ctxs);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_add_hdr_proc_ctx);
+
+/**
+ * ipa_del_hdr_proc_ctx() -
+ * Remove the specified processing context headers from SW and
+ * optionally commit them to IPA HW.
+ * @hdls: [inout] set of processing context headers to delete
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_del_hdr_proc_ctx, hdls);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_del_hdr_proc_ctx);
+
+/**
+ * ipa_add_rt_rule() - Add the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @rules: [inout] set of routing rules to add
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_add_rt_rule, rules);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_add_rt_rule);
+
+/**
+ * ipa_del_rt_rule() - Remove the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @hdls: [inout] set of routing rules to delete
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_del_rt_rule, hdls);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_del_rt_rule);
+
+/**
+ * ipa_commit_rt_rule() - Commit the current SW routing table of specified type
+ * to IPA HW
+ * @ip: The family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_commit_rt(enum ipa_ip_type ip)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_commit_rt, ip);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_commit_rt);
+
+/**
+ * ipa_reset_rt() - reset the current SW routing table of specified type
+ * (does not commit to HW)
+ * @ip: The family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_reset_rt(enum ipa_ip_type ip)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_reset_rt, ip);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_reset_rt);
+
+/**
+ * ipa_get_rt_tbl() - lookup the specified routing table and return handle if it
+ * exists, if lookup succeeds the routing table ref cnt is increased
+ * @lookup: [inout] routing table to lookup and its handle
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ * Caller should call ipa_put_rt_tbl later if this function succeeds
+ */
+int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_get_rt_tbl, lookup);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_get_rt_tbl);
+
+/**
+ * ipa_put_rt_tbl() - Release the specified routing table handle
+ * @rt_tbl_hdl: [in] the routing table handle to release
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_put_rt_tbl(u32 rt_tbl_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_put_rt_tbl, rt_tbl_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_put_rt_tbl);
+
+/**
+ * ipa_query_rt_index() - find the routing table index
+ * which name and ip type are given as parameters
+ * @in: [out] the index of the wanted routing table
+ *
+ * Returns: the routing table which name is given as parameter, or NULL if it
+ * doesn't exist
+ */
+int ipa_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_query_rt_index, in);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_query_rt_index);
+
+/**
+ * ipa_mdfy_rt_rule() - Modify the specified routing rules in SW and optionally
+ * commit to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_mdfy_rt_rule, hdls);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_mdfy_rt_rule);
+
+/**
+ * ipa_add_flt_rule() - Add the specified filtering rules to SW and optionally
+ * commit to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_add_flt_rule, rules);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_add_flt_rule);
+
+/**
+ * ipa_del_flt_rule() - Remove the specified filtering rules from SW and
+ * optionally commit to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_del_flt_rule, hdls);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_del_flt_rule);
+
+/**
+ * ipa_mdfy_flt_rule() - Modify the specified filtering rules in SW and
+ * optionally commit to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_mdfy_flt_rule, hdls);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_mdfy_flt_rule);
+
+/**
+ * ipa_commit_flt() - Commit the current SW filtering table of specified type to
+ * IPA HW
+ * @ip: [in] the family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_commit_flt(enum ipa_ip_type ip)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_commit_flt, ip);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_commit_flt);
+
+/**
+ * ipa_reset_flt() - Reset the current SW filtering table of specified type
+ * (does not commit to HW)
+ * @ip: [in] the family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_reset_flt(enum ipa_ip_type ip)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_reset_flt, ip);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_reset_flt);
+
+/**
+ * allocate_nat_device() - Allocates memory for the NAT device
+ * @mem: [in/out] memory parameters
+ *
+ * Called by NAT client driver to allocate memory for the NAT entries. Based on
+ * the request size either shared or system memory will be used.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(allocate_nat_device, mem);
+
+ return ret;
+}
+EXPORT_SYMBOL(allocate_nat_device);
+
+/**
+ * ipa_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW
+ * @init: [in] initialization command attributes
+ *
+ * Called by NAT client driver to post IP_V4_NAT_INIT command to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_nat_init_cmd, init);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_nat_init_cmd);
+
+/**
+ * ipa_nat_dma_cmd() - Post NAT_DMA command to IPA HW
+ * @dma: [in] initialization command attributes
+ *
+ * Called by NAT client driver to post NAT_DMA command to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_nat_dma_cmd, dma);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_nat_dma_cmd);
+
+/**
+ * ipa_nat_del_cmd() - Delete a NAT table
+ * @del: [in] delete table table table parameters
+ *
+ * Called by NAT client driver to delete the nat table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_nat_del_cmd, del);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_nat_del_cmd);
+
+/**
+ * ipa_send_msg() - Send "message" from kernel client to IPA driver
+ * @meta: [in] message meta-data
+ * @buff: [in] the payload for message
+ * @callback: [in] free callback
+ *
+ * Client supplies the message meta-data and payload which IPA driver buffers
+ * till read by user-space. After read from user space IPA driver invokes the
+ * callback supplied to free the message payload. Client must not touch/free
+ * the message payload after calling this API.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_send_msg(struct ipa_msg_meta *meta, void *buff,
+ ipa_msg_free_fn callback)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_send_msg, meta, buff, callback);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_send_msg);
+
+/**
+ * ipa_register_pull_msg() - register pull message type
+ * @meta: [in] message meta-data
+ * @callback: [in] pull callback
+ *
+ * Register message callback by kernel client with IPA driver for IPA driver to
+ * pull message on-demand.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_register_pull_msg, meta, callback);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_register_pull_msg);
+
+/**
+ * ipa_deregister_pull_msg() - De-register pull message type
+ * @meta: [in] message meta-data
+ *
+ * De-register "message" by kernel client from IPA driver
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_deregister_pull_msg(struct ipa_msg_meta *meta)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_deregister_pull_msg, meta);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_deregister_pull_msg);
+
+/**
+ * ipa_register_intf() - register "logical" interface
+ * @name: [in] interface name
+ * @tx: [in] TX properties of the interface
+ * @rx: [in] RX properties of the interface
+ *
+ * Register an interface and its tx and rx properties, this allows
+ * configuration of rules from user-space
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_register_intf(const char *name, const struct ipa_tx_intf *tx,
+ const struct ipa_rx_intf *rx)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_register_intf, name, tx, rx);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_register_intf);
+
+/**
+ * ipa_register_intf_ext() - register "logical" interface which has only
+ * extended properties
+ * @name: [in] interface name
+ * @tx: [in] TX properties of the interface
+ * @rx: [in] RX properties of the interface
+ * @ext: [in] EXT properties of the interface
+ *
+ * Register an interface and its tx, rx and ext properties, this allows
+ * configuration of rules from user-space
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
+ const struct ipa_rx_intf *rx,
+ const struct ipa_ext_intf *ext)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_register_intf_ext, name, tx, rx, ext);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_register_intf_ext);
+
+/**
+ * ipa_deregister_intf() - de-register previously registered logical interface
+ * @name: [in] interface name
+ *
+ * De-register a previously registered interface
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_deregister_intf(const char *name)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_deregister_intf, name);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_deregister_intf);
+
+/**
+ * ipa_set_aggr_mode() - Set the aggregation mode which is a global setting
+ * @mode: [in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
+ * etc
+ *
+ * Returns: 0 on success
+ */
+int ipa_set_aggr_mode(enum ipa_aggr_mode mode)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_set_aggr_mode, mode);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_set_aggr_mode);
+
+
+/**
+ * ipa_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
+ * mode
+ * @sig: [in] the first 3 bytes of QCNCM NDP signature (expected to be
+ * "QND")
+ *
+ * Set the NDP signature used for QCNCM aggregation mode. The fourth byte
+ * (expected to be 'P') needs to be set using the header addition mechanism
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_set_qcncm_ndp_sig(char sig[3])
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_set_qcncm_ndp_sig, sig);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_set_qcncm_ndp_sig);
+
+/**
+ * ipa_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
+ * configuration
+ * @enable: [in] true for single NDP/MBIM; false otherwise
+ *
+ * Returns: 0 on success
+ */
+int ipa_set_single_ndp_per_mbim(bool enable)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_set_single_ndp_per_mbim, enable);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_set_single_ndp_per_mbim);
+
+/**
+ * ipa_tx_dp() - Data-path tx handler
+ * @dst: [in] which IPA destination to route tx packets to
+ * @skb: [in] the packet to send
+ * @metadata: [in] TX packet meta-data
+ *
+ * Data-path tx handler, this is used for both SW data-path which by-passes most
+ * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
+ * dst is a "valid" CONS type, then SW data-path is used. If dst is the
+ * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
+ * is an error. For errors, client needs to free the skb as needed. For success,
+ * IPA driver will later invoke client callback if one was supplied. That
+ * callback should free the skb. If no callback supplied, IPA driver will free
+ * the skb internally
+ *
+ * The function will use two descriptors for this send command
+ * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
+ * the first descriptor will be used to inform the IPA hardware that
+ * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
+ * Once this send was done from SPS point-of-view the IPA driver will
+ * get notified by the supplied callback - ipa_sps_irq_tx_comp()
+ *
+ * ipa_sps_irq_tx_comp will call to the user supplied
+ * callback (from ipa_connect)
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+ struct ipa_tx_meta *meta)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_tx_dp, dst, skb, meta);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_tx_dp);
+
+/**
+ * ipa_tx_dp_mul() - Data-path tx handler for multiple packets
+ * @src: [in] - Client that is sending data
+ * @ipa_tx_data_desc: [in] data descriptors from wlan
+ *
+ * this is used for to transfer data descriptors that received
+ * from WLAN1_PROD pipe to IPA HW
+ *
+ * The function will send data descriptors from WLAN1_PROD (one
+ * at a time) using sps_transfer_one. Will set EOT flag for last
+ * descriptor Once this send was done from SPS point-of-view the
+ * IPA driver will get notified by the supplied callback -
+ * ipa_sps_irq_tx_no_aggr_notify()
+ *
+ * ipa_sps_irq_tx_no_aggr_notify will call to the user supplied
+ * callback (from ipa_connect)
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_tx_dp_mul(enum ipa_client_type src,
+ struct ipa_tx_data_desc *data_desc)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_tx_dp_mul, src, data_desc);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_tx_dp_mul);
+
+void ipa_free_skb(struct ipa_rx_data *data)
+{
+ IPA_API_DISPATCH(ipa_free_skb, data);
+}
+EXPORT_SYMBOL(ipa_free_skb);
+
+/**
+ * ipa_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
+ * IPA EP configuration
+ * @sys_in: [in] input needed to setup BAM pipe and configure EP
+ * @clnt_hdl: [out] client handle
+ *
+ * - configure the end-point registers with the supplied
+ * parameters from the user.
+ * - call SPS APIs to create a system-to-bam connection with IPA.
+ * - allocate descriptor FIFO
+ * - register callback function(ipa_sps_irq_rx_notify or
+ * ipa_sps_irq_tx_notify - depends on client type) in case the driver is
+ * not configured to pulling mode
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_setup_sys_pipe, sys_in, clnt_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_setup_sys_pipe);
+
+/**
+ * ipa_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP
+ * @clnt_hdl: [in] the handle obtained from ipa_setup_sys_pipe
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_teardown_sys_pipe(u32 clnt_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_teardown_sys_pipe, clnt_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_teardown_sys_pipe);
+
+int ipa_sys_setup(struct ipa_sys_connect_params *sys_in,
+ unsigned long *ipa_bam_or_gsi_hdl,
+ u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status)
+
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_sys_setup, sys_in, ipa_bam_or_gsi_hdl,
+ ipa_pipe_num, clnt_hdl, en_status);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_sys_setup);
+
+int ipa_sys_teardown(u32 clnt_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_sys_teardown, clnt_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_sys_teardown);
+
+int ipa_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
+ unsigned long gsi_ev_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_sys_update_gsi_hdls, clnt_hdl,
+ gsi_ch_hdl, gsi_ev_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_sys_update_gsi_hdls);
+
+/**
+ * ipa_connect_wdi_pipe() - WDI client connect
+ * @in: [in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+ struct ipa_wdi_out_params *out)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_connect_wdi_pipe, in, out);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_connect_wdi_pipe);
+
+/**
+ * ipa_disconnect_wdi_pipe() - WDI client disconnect
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_disconnect_wdi_pipe(u32 clnt_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_disconnect_wdi_pipe, clnt_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_disconnect_wdi_pipe);
+
+/**
+ * ipa_enable_wdi_pipe() - WDI client enable
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_enable_wdi_pipe(u32 clnt_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_enable_wdi_pipe, clnt_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_enable_wdi_pipe);
+
+/**
+ * ipa_disable_wdi_pipe() - WDI client disable
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_disable_wdi_pipe(u32 clnt_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_disable_wdi_pipe, clnt_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_disable_wdi_pipe);
+
+/**
+ * ipa_resume_wdi_pipe() - WDI client resume
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_resume_wdi_pipe(u32 clnt_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_resume_wdi_pipe, clnt_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_resume_wdi_pipe);
+
+/**
+ * ipa_suspend_wdi_pipe() - WDI client suspend
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_suspend_wdi_pipe(u32 clnt_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_suspend_wdi_pipe, clnt_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_suspend_wdi_pipe);
+
+/**
+ * ipa_get_wdi_stats() - Query WDI statistics from uc
+ * @stats: [inout] stats blob from client populated by driver
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_get_wdi_stats, stats);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_get_wdi_stats);
+
+/**
+ * ipa_get_smem_restr_bytes()- Return IPA smem restricted bytes
+ *
+ * Return value: u16 - number of IPA smem restricted bytes
+ */
+u16 ipa_get_smem_restr_bytes(void)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_get_smem_restr_bytes);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_get_smem_restr_bytes);
+
+/**
+ * ipa_uc_wdi_get_dbpa() - To retrieve
+ * doorbell physical address of wlan pipes
+ * @param: [in/out] input/output parameters
+ * from/to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa_uc_wdi_get_dbpa(
+ struct ipa_wdi_db_params *param)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_uc_wdi_get_dbpa, param);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_wdi_get_dbpa);
+
+/**
+ * ipa_uc_reg_rdyCB() - To register uC
+ * ready CB if uC not ready
+ * @inout: [in/out] input/output parameters
+ * from/to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa_uc_reg_rdyCB(
+ struct ipa_wdi_uc_ready_params *inout)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_uc_reg_rdyCB, inout);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_reg_rdyCB);
+
+/**
+ * ipa_uc_dereg_rdyCB() - To de-register uC ready CB
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa_uc_dereg_rdyCB(void)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_uc_dereg_rdyCB);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_dereg_rdyCB);
+
+/**
+* teth_bridge_init() - Initialize the Tethering bridge driver
+* @params - in/out params for USB initialization API (please look at struct
+* definition for more info)
+*
+* USB driver gets a pointer to a callback function (usb_notify_cb) and an
+* associated data. USB driver installs this callback function in the call to
+* ipa_connect().
+*
+* Builds IPA resource manager dependency graph.
+*
+* Return codes: 0: success,
+* -EINVAL - Bad parameter
+* Other negative value - Failure
+*/
+int teth_bridge_init(struct teth_bridge_init_params *params)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(teth_bridge_init, params);
+
+ return ret;
+}
+EXPORT_SYMBOL(teth_bridge_init);
+
+/**
+* teth_bridge_disconnect() - Disconnect tethering bridge module
+*/
+int teth_bridge_disconnect(enum ipa_client_type client)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(teth_bridge_disconnect, client);
+
+ return ret;
+}
+EXPORT_SYMBOL(teth_bridge_disconnect);
+
+/**
+* teth_bridge_connect() - Connect bridge for a tethered Rmnet / MBIM call
+* @connect_params: Connection info
+*
+* Return codes: 0: success
+* -EINVAL: invalid parameters
+* -EPERM: Operation not permitted as the bridge is already
+* connected
+*/
+int teth_bridge_connect(struct teth_bridge_connect_params *connect_params)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(teth_bridge_connect, connect_params);
+
+ return ret;
+}
+EXPORT_SYMBOL(teth_bridge_connect);
+
+/* ipa_set_client() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+
+void ipa_set_client(int index, enum ipacm_client_enum client, bool uplink)
+{
+ IPA_API_DISPATCH(ipa_set_client, index, client, uplink);
+}
+EXPORT_SYMBOL(ipa_set_client);
+
+/**
+ * ipa_get_client() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+enum ipacm_client_enum ipa_get_client(int pipe_idx)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_get_client, pipe_idx);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_get_client);
+
+/**
+ * ipa_get_client_uplink() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+bool ipa_get_client_uplink(int pipe_idx)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_get_client_uplink, pipe_idx);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_get_client_uplink);
+
+/**
+ * ipa_dma_init() -Initialize IPADMA.
+ *
+ * This function initialize all IPADMA internal data and connect in dma:
+ * MEMCPY_DMA_SYNC_PROD ->MEMCPY_DMA_SYNC_CONS
+ * MEMCPY_DMA_ASYNC_PROD->MEMCPY_DMA_SYNC_CONS
+ *
+ * Return codes: 0: success
+ * -EFAULT: IPADMA is already initialized
+ * -ENOMEM: allocating memory error
+ * -EPERM: pipe connection failed
+ */
+int ipa_dma_init(void)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_dma_init);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_dma_init);
+
+/**
+ * ipa_dma_enable() -Vote for IPA clocks.
+ *
+ *Return codes: 0: success
+ * -EINVAL: IPADMA is not initialized
+ * -EPERM: Operation not permitted as ipa_dma is already
+ * enabled
+ */
+int ipa_dma_enable(void)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_dma_enable);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_dma_enable);
+
+/**
+ * ipa_dma_disable()- Unvote for IPA clocks.
+ *
+ * enter to power save mode.
+ *
+ * Return codes: 0: success
+ * -EINVAL: IPADMA is not initialized
+ * -EPERM: Operation not permitted as ipa_dma is already
+ * diabled
+ * -EFAULT: can not disable ipa_dma as there are pending
+ * memcopy works
+ */
+int ipa_dma_disable(void)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_dma_disable);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_dma_disable);
+
+/**
+ * ipa_dma_sync_memcpy()- Perform synchronous memcpy using IPA.
+ *
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Return codes: 0: success
+ * -EINVAL: invalid params
+ * -EPERM: operation not permitted as ipa_dma isn't enable or
+ * initialized
+ * -SPS_ERROR: on sps faliures
+ * -EFAULT: other
+ */
+int ipa_dma_sync_memcpy(u64 dest, u64 src, int len)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_dma_sync_memcpy, dest, src, len);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_dma_sync_memcpy);
+
+/**
+ * ipa_dma_async_memcpy()- Perform asynchronous memcpy using IPA.
+ *
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ * @user_cb: callback function to notify the client when the copy was done.
+ * @user_param: cookie for user_cb.
+ *
+ * Return codes: 0: success
+ * -EINVAL: invalid params
+ * -EPERM: operation not permitted as ipa_dma isn't enable or
+ * initialized
+ * -SPS_ERROR: on sps faliures
+ * -EFAULT: descr fifo is full.
+ */
+int ipa_dma_async_memcpy(u64 dest, u64 src, int len,
+ void (*user_cb)(void *user1), void *user_param)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_dma_async_memcpy, dest, src, len, user_cb,
+ user_param);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_dma_async_memcpy);
+
+/**
+ * ipa_dma_uc_memcpy() - Perform a memcpy action using IPA uC
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Return codes: 0: success
+ * -EINVAL: invalid params
+ * -EPERM: operation not permitted as ipa_dma isn't enable or
+ * initialized
+ * -EBADF: IPA uC is not loaded
+ */
+int ipa_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_dma_uc_memcpy, dest, src, len);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_dma_uc_memcpy);
+
+/**
+ * ipa_dma_destroy() -teardown IPADMA pipes and release ipadma.
+ *
+ * this is a blocking function, returns just after destroying IPADMA.
+ */
+void ipa_dma_destroy(void)
+{
+ IPA_API_DISPATCH(ipa_dma_destroy);
+}
+EXPORT_SYMBOL(ipa_dma_destroy);
+
+int ipa_mhi_init_engine(struct ipa_mhi_init_engine *params)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_mhi_init_engine, params);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_mhi_init_engine);
+
+/**
+ * ipa_connect_mhi_pipe() - Connect pipe to IPA and start corresponding
+ * MHI channel
+ * @in: connect parameters
+ * @clnt_hdl: [out] client handle for this pipe
+ *
+ * This function is called by IPA MHI client driver on MHI channel start.
+ * This function is called after MHI engine was started.
+ * This function is doing the following:
+ * - Send command to uC to start corresponding MHI channel
+ * - Configure IPA EP control
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
+ u32 *clnt_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_connect_mhi_pipe, in, clnt_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_connect_mhi_pipe);
+
+/**
+ * ipa_disconnect_mhi_pipe() - Disconnect pipe from IPA and reset corresponding
+ * MHI channel
+ * @in: connect parameters
+ * @clnt_hdl: [out] client handle for this pipe
+ *
+ * This function is called by IPA MHI client driver on MHI channel reset.
+ * This function is called after MHI channel was started.
+ * This function is doing the following:
+ * - Send command to uC to reset corresponding MHI channel
+ * - Configure IPA EP control
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa_disconnect_mhi_pipe(u32 clnt_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_disconnect_mhi_pipe, clnt_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_disconnect_mhi_pipe);
+
+bool ipa_mhi_stop_gsi_channel(enum ipa_client_type client)
+{
+ bool ret;
+
+ IPA_API_DISPATCH_RETURN_BOOL(ipa_mhi_stop_gsi_channel, client);
+
+ return ret;
+}
+
+int ipa_uc_mhi_reset_channel(int channelHandle)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_uc_mhi_reset_channel, channelHandle);
+
+ return ret;
+}
+
+bool ipa_mhi_sps_channel_empty(enum ipa_client_type client)
+{
+ bool ret;
+
+ IPA_API_DISPATCH_RETURN_BOOL(ipa_mhi_sps_channel_empty, client);
+
+ return ret;
+}
+
+int ipa_qmi_enable_force_clear_datapath_send(
+ struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_qmi_enable_force_clear_datapath_send, req);
+
+ return ret;
+}
+
+int ipa_qmi_disable_force_clear_datapath_send(
+ struct ipa_disable_force_clear_datapath_req_msg_v01 *req)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_qmi_disable_force_clear_datapath_send, req);
+
+ return ret;
+}
+
+int ipa_generate_tag_process(void)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_generate_tag_process);
+
+ return ret;
+}
+
+int ipa_disable_sps_pipe(enum ipa_client_type client)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_disable_sps_pipe, client);
+
+ return ret;
+}
+
+int ipa_mhi_reset_channel_internal(enum ipa_client_type client)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_mhi_reset_channel_internal, client);
+
+ return ret;
+}
+
+int ipa_mhi_start_channel_internal(enum ipa_client_type client)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_mhi_start_channel_internal, client);
+
+ return ret;
+}
+
+void ipa_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb)
+{
+ IPA_API_DISPATCH(ipa_get_holb, ep_idx, holb);
+}
+
+void ipa_set_tag_process_before_gating(bool val)
+{
+ IPA_API_DISPATCH(ipa_set_tag_process_before_gating, val);
+}
+
+int ipa_mhi_query_ch_info(enum ipa_client_type client,
+ struct gsi_chan_info *ch_info)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_mhi_query_ch_info, client, ch_info);
+
+ return ret;
+}
+
+int ipa_uc_mhi_suspend_channel(int channelHandle)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_uc_mhi_suspend_channel, channelHandle);
+
+ return ret;
+}
+
+int ipa_uc_mhi_stop_event_update_channel(int channelHandle)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_uc_mhi_stop_event_update_channel,
+ channelHandle);
+
+ return ret;
+}
+
+bool ipa_has_open_aggr_frame(enum ipa_client_type client)
+{
+ bool ret;
+
+ IPA_API_DISPATCH_RETURN_BOOL(ipa_has_open_aggr_frame, client);
+
+ return ret;
+}
+
+int ipa_mhi_resume_channels_internal(enum ipa_client_type client,
+ bool LPTransitionRejected, bool brstmode_enabled,
+ union __packed gsi_channel_scratch ch_scratch, u8 index)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_mhi_resume_channels_internal, client,
+ LPTransitionRejected, brstmode_enabled, ch_scratch,
+ index);
+
+ return ret;
+}
+
+int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_uc_mhi_send_dl_ul_sync_info,
+ cmd);
+
+ return ret;
+}
+
+int ipa_mhi_destroy_channel(enum ipa_client_type client)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_mhi_destroy_channel, client);
+
+ return ret;
+}
+
+int ipa_uc_mhi_init(void (*ready_cb)(void),
+ void (*wakeup_request_cb)(void))
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_uc_mhi_init, ready_cb, wakeup_request_cb);
+
+ return ret;
+}
+
+void ipa_uc_mhi_cleanup(void)
+{
+ IPA_API_DISPATCH(ipa_uc_mhi_cleanup);
+}
+
+int ipa_uc_mhi_print_stats(char *dbg_buff, int size)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_uc_mhi_print_stats, dbg_buff, size);
+
+ return ret;
+}
+
+/**
+ * ipa_uc_state_check() - Check the status of the uC interface
+ *
+ * Return value: 0 if the uC is loaded, interface is initialized
+ * and there was no recent failure in one of the commands.
+ * A negative value is returned otherwise.
+ */
+int ipa_uc_state_check(void)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_uc_state_check);
+
+ return ret;
+}
+
+int ipa_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_write_qmap_id, param_in);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_write_qmap_id);
+
+/**
+* ipa_add_interrupt_handler() - Adds handler to an interrupt type
+* @interrupt: Interrupt type
+* @handler: The handler to be added
+* @deferred_flag: whether the handler processing should be deferred in
+* a workqueue
+* @private_data: the client's private data
+*
+* Adds handler to an interrupt type and enable the specific bit
+* in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled
+*/
+int ipa_add_interrupt_handler(enum ipa_irq_type interrupt,
+ ipa_irq_handler_t handler,
+ bool deferred_flag,
+ void *private_data)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_add_interrupt_handler, interrupt, handler,
+ deferred_flag, private_data);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_add_interrupt_handler);
+
+/**
+* ipa_remove_interrupt_handler() - Removes handler to an interrupt type
+* @interrupt: Interrupt type
+*
+* Removes the handler and disable the specific bit in IRQ_EN register
+*/
+int ipa_remove_interrupt_handler(enum ipa_irq_type interrupt)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_remove_interrupt_handler, interrupt);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_remove_interrupt_handler);
+
+/**
+* ipa_restore_suspend_handler() - restores the original suspend IRQ handler
+* as it was registered in the IPA init sequence.
+* Return codes:
+* 0: success
+* -EPERM: failed to remove current handler or failed to add original handler
+*/
+int ipa_restore_suspend_handler(void)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_restore_suspend_handler);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_restore_suspend_handler);
+
+/**
+ * ipa_bam_reg_dump() - Dump selected BAM registers for IPA and DMA-BAM
+ *
+ * Function is rate limited to avoid flooding kernel log buffer
+ */
+void ipa_bam_reg_dump(void)
+{
+ IPA_API_DISPATCH(ipa_bam_reg_dump);
+}
+EXPORT_SYMBOL(ipa_bam_reg_dump);
+
+/**
+ * ipa_get_ep_mapping() - provide endpoint mapping
+ * @client: client type
+ *
+ * Return value: endpoint mapping
+ */
+int ipa_get_ep_mapping(enum ipa_client_type client)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_get_ep_mapping, client);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_get_ep_mapping);
+
+/**
+ * ipa_is_ready() - check if IPA module was initialized
+ * successfully
+ *
+ * Return value: true for yes; false for no
+ */
+bool ipa_is_ready(void)
+{
+ if (!ipa_api_ctrl || !ipa_api_ctrl->ipa_is_ready)
+ return false;
+ return ipa_api_ctrl->ipa_is_ready();
+}
+EXPORT_SYMBOL(ipa_is_ready);
+
+/**
+ * ipa_proxy_clk_vote() - called to add IPA clock proxy vote
+ *
+ * Return value: none
+ */
+void ipa_proxy_clk_vote(void)
+{
+ IPA_API_DISPATCH(ipa_proxy_clk_vote);
+}
+EXPORT_SYMBOL(ipa_proxy_clk_vote);
+
+/**
+ * ipa_proxy_clk_unvote() - called to remove IPA clock proxy vote
+ *
+ * Return value: none
+ */
+void ipa_proxy_clk_unvote(void)
+{
+ IPA_API_DISPATCH(ipa_proxy_clk_unvote);
+}
+EXPORT_SYMBOL(ipa_proxy_clk_unvote);
+
+/**
+ * ipa_get_hw_type() - Return IPA HW version
+ *
+ * Return value: enum ipa_hw_type
+ */
+enum ipa_hw_type ipa_get_hw_type(void)
+{
+ return ipa_api_hw_type;
+}
+EXPORT_SYMBOL(ipa_get_hw_type);
+
+/**
+ * ipa_is_client_handle_valid() - check if IPA client handle is valid handle
+ *
+ * Return value: true for yes; false for no
+ */
+bool ipa_is_client_handle_valid(u32 clnt_hdl)
+{
+ if (!ipa_api_ctrl || !ipa_api_ctrl->ipa_is_client_handle_valid)
+ return false;
+ return ipa_api_ctrl->ipa_is_client_handle_valid(clnt_hdl);
+}
+EXPORT_SYMBOL(ipa_is_client_handle_valid);
+
+/**
+ * ipa_get_client_mapping() - provide client mapping
+ * @pipe_idx: IPA end-point number
+ *
+ * Return value: client mapping
+ */
+enum ipa_client_type ipa_get_client_mapping(int pipe_idx)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_get_client_mapping, pipe_idx);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_get_client_mapping);
+
+/**
+ * ipa_get_rm_resource_from_ep() - get the IPA_RM resource which is related to
+ * the supplied pipe index.
+ *
+ * @pipe_idx:
+ *
+ * Return value: IPA_RM resource related to the pipe, -1 if a resource was not
+ * found.
+ */
+enum ipa_rm_resource_name ipa_get_rm_resource_from_ep(int pipe_idx)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_get_rm_resource_from_ep, pipe_idx);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_get_rm_resource_from_ep);
+
+/**
+ * ipa_get_modem_cfg_emb_pipe_flt()- Return ipa_ctx->modem_cfg_emb_pipe_flt
+ *
+ * Return value: true if modem configures embedded pipe flt, false otherwise
+ */
+bool ipa_get_modem_cfg_emb_pipe_flt(void)
+{
+ if (!ipa_api_ctrl || !ipa_api_ctrl->ipa_get_modem_cfg_emb_pipe_flt)
+ return false;
+ return ipa_api_ctrl->ipa_get_modem_cfg_emb_pipe_flt();
+}
+EXPORT_SYMBOL(ipa_get_modem_cfg_emb_pipe_flt);
+
+/**
+ * ipa_get_transport_type()- Return ipa_ctx->transport_prototype
+ *
+ * Return value: enum ipa_transport_type
+ */
+enum ipa_transport_type ipa_get_transport_type(void)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_get_transport_type);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_get_transport_type);
+
+/**
+ * ipa_get_smmu_domain()- Return the smmu domain
+ *
+ * Return value: pointer to iommu domain if smmu_cb valid, NULL otherwise
+ */
+struct iommu_domain *ipa_get_smmu_domain(void)
+{
+ struct iommu_domain *ret;
+
+ IPA_API_DISPATCH_RETURN_PTR(ipa_get_smmu_domain);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_get_smmu_domain);
+
+/**
+ * ipa_disable_apps_wan_cons_deaggr()- set
+ * ipa_ctx->ipa_client_apps_wan_cons_agg_gro
+ *
+ * Return value: 0 or negative in case of failure
+ */
+int ipa_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_disable_apps_wan_cons_deaggr, agg_size,
+ agg_count);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_disable_apps_wan_cons_deaggr);
+
+/**
+ * ipa_get_dma_dev()- Returns ipa_ctx dma dev pointer
+ *
+ * Return value: pointer to ipa_ctx dma dev pointer
+ */
+struct device *ipa_get_dma_dev(void)
+{
+ struct device *ret;
+
+ IPA_API_DISPATCH_RETURN_PTR(ipa_get_dma_dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_get_dma_dev);
+
+/**
+ * ipa_release_wdi_mapping() - release iommu mapping
+ *
+ *
+ * @num_buffers: number of buffers to be released
+ *
+ * @info: pointer to wdi buffers info array
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_release_wdi_mapping, num_buffers, info);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_release_wdi_mapping);
+
+/**
+ * ipa_create_wdi_mapping() - Perform iommu mapping
+ *
+ *
+ * @num_buffers: number of buffers to be mapped
+ *
+ * @info: pointer to wdi buffers info array
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_create_wdi_mapping, num_buffers, info);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_create_wdi_mapping);
+
+/**
+ * ipa_get_gsi_ep_info() - provide gsi ep information
+ * @ipa_ep_idx: IPA endpoint index
+ *
+ * Return value: pointer to ipa_gsi_ep_info
+ */
+struct ipa_gsi_ep_config *ipa_get_gsi_ep_info(int ipa_ep_idx)
+{
+ if (!ipa_api_ctrl || !ipa_api_ctrl->ipa_get_gsi_ep_info)
+ return NULL;
+ return ipa_api_ctrl->ipa_get_gsi_ep_info(ipa_ep_idx);
+}
+EXPORT_SYMBOL(ipa_get_gsi_ep_info);
+
+/**
+ * ipa_stop_gsi_channel()- Stops a GSI channel in IPA
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+int ipa_stop_gsi_channel(u32 clnt_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_stop_gsi_channel, clnt_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_stop_gsi_channel);
+
+/**
+ * ipa_get_version_string() - Get string representation of IPA version
+ * @ver: IPA version
+ *
+ * Return: Constant string representation
+ */
+const char *ipa_get_version_string(enum ipa_hw_type ver)
+{
+ const char *str;
+
+ switch (ver) {
+ case IPA_HW_v1_0:
+ str = "1.0";
+ break;
+ case IPA_HW_v1_1:
+ str = "1.1";
+ break;
+ case IPA_HW_v2_0:
+ str = "2.0";
+ break;
+ case IPA_HW_v2_1:
+ str = "2.1";
+ break;
+ case IPA_HW_v2_5:
+ str = "2.5/2.6";
+ break;
+ case IPA_HW_v2_6L:
+ str = "2.6L";
+ break;
+ case IPA_HW_v3_0:
+ str = "3.0";
+ break;
+ case IPA_HW_v3_1:
+ str = "3.1";
+ break;
+ case IPA_HW_v3_5:
+ str = "3.5";
+ break;
+ case IPA_HW_v3_5_1:
+ str = "3.5.1";
+ break;
+ default:
+ str = "Invalid version";
+ break;
+ }
+
+ return str;
+}
+EXPORT_SYMBOL(ipa_get_version_string);
+
+static const struct of_device_id ipa_plat_drv_match[] = {
+ { .compatible = "qcom,ipa", },
+ { .compatible = "qcom,ipa-smmu-ap-cb", },
+ { .compatible = "qcom,ipa-smmu-wlan-cb", },
+ { .compatible = "qcom,ipa-smmu-uc-cb", },
+ { .compatible = "qcom,smp2pgpio-map-ipa-1-in", },
+ { .compatible = "qcom,smp2pgpio-map-ipa-1-out", },
+ {}
+};
+
+static int ipa_generic_plat_drv_probe(struct platform_device *pdev_p)
+{
+ int result;
+
+ /*
+ * IPA probe function can be called for multiple times as the same probe
+ * function handles multiple compatibilities
+ */
+ pr_debug("ipa: IPA driver probing started for %s\n",
+ pdev_p->dev.of_node->name);
+
+ if (!ipa_api_ctrl) {
+ ipa_api_ctrl = kzalloc(sizeof(*ipa_api_ctrl), GFP_KERNEL);
+ if (!ipa_api_ctrl)
+ return -ENOMEM;
+
+ /* Get IPA HW Version */
+ result = of_property_read_u32(pdev_p->dev.of_node,
+ "qcom,ipa-hw-ver", &ipa_api_hw_type);
+ if ((result) || (ipa_api_hw_type == 0)) {
+ pr_err("ipa: get resource failed for ipa-hw-ver!\n");
+ kfree(ipa_api_ctrl);
+ ipa_api_ctrl = 0;
+ return -ENODEV;
+ }
+ pr_debug("ipa: ipa_api_hw_type = %d", ipa_api_hw_type);
+ }
+
+ /* call probe based on IPA HW version */
+ switch (ipa_api_hw_type) {
+ case IPA_HW_v2_0:
+ case IPA_HW_v2_1:
+ case IPA_HW_v2_5:
+ case IPA_HW_v2_6L:
+ result = ipa_plat_drv_probe(pdev_p, ipa_api_ctrl,
+ ipa_plat_drv_match);
+ break;
+ case IPA_HW_v3_0:
+ case IPA_HW_v3_1:
+ case IPA_HW_v3_5:
+ case IPA_HW_v3_5_1:
+ result = ipa3_plat_drv_probe(pdev_p, ipa_api_ctrl,
+ ipa_plat_drv_match);
+ break;
+ default:
+ pr_err("ipa: unsupported version %d\n", ipa_api_hw_type);
+ return -EPERM;
+ }
+
+ if (result && result != -EPROBE_DEFER)
+ pr_err("ipa: ipa_plat_drv_probe failed\n");
+
+ return result;
+}
+
+static int ipa_ap_suspend(struct device *dev)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_ap_suspend, dev);
+
+ return ret;
+}
+
+static int ipa_ap_resume(struct device *dev)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_ap_resume, dev);
+
+ return ret;
+}
+
+int ipa_register_ipa_ready_cb(void (*ipa_ready_cb)(void *user_data),
+ void *user_data)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_register_ipa_ready_cb,
+ ipa_ready_cb, user_data);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_register_ipa_ready_cb);
+
+/**
+ * ipa_inc_client_enable_clks() - Increase active clients counter, and
+ * enable ipa clocks if necessary
+ *
+ * Please do not use this API, use the wrapper macros instead (ipa_i.h)
+ * IPA_ACTIVE_CLIENTS_INC_XXX();
+ *
+ * Return codes:
+ * None
+*/
+void ipa_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
+{
+ IPA_API_DISPATCH(ipa_inc_client_enable_clks, id);
+}
+EXPORT_SYMBOL(ipa_inc_client_enable_clks);
+
+/**
+ * ipa_dec_client_disable_clks() - Increase active clients counter, and
+ * enable ipa clocks if necessary
+ *
+ * Please do not use this API, use the wrapper macros instead (ipa_i.h)
+ * IPA_ACTIVE_CLIENTS_DEC_XXX();
+ *
+ * Return codes:
+ * None
+*/
+void ipa_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
+{
+ IPA_API_DISPATCH(ipa_dec_client_disable_clks, id);
+}
+EXPORT_SYMBOL(ipa_dec_client_disable_clks);
+
+/**
+ * ipa_inc_client_enable_clks_no_block() - Only increment the number of active
+ * clients if no asynchronous actions should be done.Asynchronous actions are
+ * locking a mutex and waking up IPA HW.
+ *
+ * Please do not use this API, use the wrapper macros instead(ipa_i.h)
+ *
+ *
+ * Return codes : 0 for success
+ * -EPERM if an asynchronous action should have been done
+ */
+int ipa_inc_client_enable_clks_no_block(
+ struct ipa_active_client_logging_info *id)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_inc_client_enable_clks_no_block, id);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_inc_client_enable_clks_no_block);
+
+/**
+* ipa_suspend_resource_no_block() - suspend client endpoints related to the
+* IPA_RM resource and decrement active clients counter. This function is
+* guaranteed to avoid sleeping.
+*
+* @resource: [IN] IPA Resource Manager resource
+*
+* Return codes: 0 on success, negative on failure.
+*/
+int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_suspend_resource_no_block, resource);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_suspend_resource_no_block);
+/**
+ * ipa_resume_resource() - resume client endpoints related to the IPA_RM
+ * resource.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa_resume_resource(enum ipa_rm_resource_name resource)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_resume_resource, resource);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_resume_resource);
+
+/**
+ * ipa_suspend_resource_sync() - suspend client endpoints related to the IPA_RM
+ * resource and decrement active clients counter, which may result in clock
+ * gating of IPA clocks.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_suspend_resource_sync, resource);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_suspend_resource_sync);
+
+/**
+ * ipa_set_required_perf_profile() - set IPA to the specified performance
+ * profile based on the bandwidth, unless minimum voltage required is
+ * higher. In this case the floor_voltage specified will be used.
+ * @floor_voltage: minimum voltage to operate
+ * @bandwidth_mbps: needed bandwidth from IPA
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+ u32 bandwidth_mbps)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_set_required_perf_profile, floor_voltage,
+ bandwidth_mbps);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_set_required_perf_profile);
+
+/**
+ * ipa_get_ipc_logbuf() - return a pointer to IPA driver IPC log
+ */
+void *ipa_get_ipc_logbuf(void)
+{
+ void *ret;
+
+ IPA_API_DISPATCH_RETURN_PTR(ipa_get_ipc_logbuf);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_get_ipc_logbuf);
+
+/**
+ * ipa_get_ipc_logbuf_low() - return a pointer to IPA driver IPC low prio log
+ */
+void *ipa_get_ipc_logbuf_low(void)
+{
+ void *ret;
+
+ IPA_API_DISPATCH_RETURN_PTR(ipa_get_ipc_logbuf_low);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_get_ipc_logbuf_low);
+
+/**
+ * ipa_assert() - general function for assertion
+ */
+void ipa_assert(void)
+{
+ pr_err("IPA: unrecoverable error has occurred, asserting\n");
+ BUG();
+}
+
+/**
+ * ipa_rx_poll() - Poll the rx packets from IPA HW in the
+ * softirq context
+ *
+ * @budget: number of packets to be polled in single iteration
+ *
+ * Return codes: >= 0 : Actual number of packets polled
+ *
+ */
+int ipa_rx_poll(u32 clnt_hdl, int budget)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_rx_poll, clnt_hdl, budget);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_rx_poll);
+
+/**
+ * ipa_recycle_wan_skb() - Recycle the Wan skb
+ *
+ * @skb: skb that needs to recycle
+ *
+ */
+void ipa_recycle_wan_skb(struct sk_buff *skb)
+{
+ IPA_API_DISPATCH(ipa_recycle_wan_skb, skb);
+}
+EXPORT_SYMBOL(ipa_recycle_wan_skb);
+
+/**
+ * ipa_setup_uc_ntn_pipes() - setup uc offload pipes
+ */
+int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *inp,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_setup_uc_ntn_pipes, inp,
+ notify, priv, hdr_len, outp);
+
+ return ret;
+}
+
+/**
+ * ipa_tear_down_uc_offload_pipes() - tear down uc offload pipes
+ */
+int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
+ int ipa_ep_idx_dl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_tear_down_uc_offload_pipes, ipa_ep_idx_ul,
+ ipa_ep_idx_dl);
+
+ return ret;
+}
+
+static const struct dev_pm_ops ipa_pm_ops = {
+ .suspend_noirq = ipa_ap_suspend,
+ .resume_noirq = ipa_ap_resume,
+};
+
+static struct platform_driver ipa_plat_drv = {
+ .probe = ipa_generic_plat_drv_probe,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &ipa_pm_ops,
+ .of_match_table = ipa_plat_drv_match,
+ },
+};
+
+static int __init ipa_module_init(void)
+{
+ pr_debug("IPA module init\n");
+
+ /* Register as a platform device driver */
+ return platform_driver_register(&ipa_plat_drv);
+}
+subsys_initcall(ipa_module_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA HW device driver");
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
new file mode 100644
index 0000000..f662661
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -0,0 +1,400 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa_mhi.h>
+#include <linux/ipa_uc_offload.h>
+#include "ipa_common_i.h"
+
+#ifndef _IPA_API_H_
+#define _IPA_API_H_
+
+struct ipa_api_controller {
+ int (*ipa_connect)(const struct ipa_connect_params *in,
+ struct ipa_sps_params *sps, u32 *clnt_hdl);
+
+ int (*ipa_disconnect)(u32 clnt_hdl);
+
+ int (*ipa_reset_endpoint)(u32 clnt_hdl);
+
+ int (*ipa_clear_endpoint_delay)(u32 clnt_hdl);
+
+ int (*ipa_disable_endpoint)(u32 clnt_hdl);
+
+ int (*ipa_cfg_ep)(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
+
+ int (*ipa_cfg_ep_nat)(u32 clnt_hdl,
+ const struct ipa_ep_cfg_nat *ipa_ep_cfg);
+
+ int (*ipa_cfg_ep_hdr)(u32 clnt_hdl,
+ const struct ipa_ep_cfg_hdr *ipa_ep_cfg);
+
+ int (*ipa_cfg_ep_hdr_ext)(u32 clnt_hdl,
+ const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg);
+
+ int (*ipa_cfg_ep_mode)(u32 clnt_hdl,
+ const struct ipa_ep_cfg_mode *ipa_ep_cfg);
+
+ int (*ipa_cfg_ep_aggr)(u32 clnt_hdl,
+ const struct ipa_ep_cfg_aggr *ipa_ep_cfg);
+
+ int (*ipa_cfg_ep_deaggr)(u32 clnt_hdl,
+ const struct ipa_ep_cfg_deaggr *ipa_ep_cfg);
+
+ int (*ipa_cfg_ep_route)(u32 clnt_hdl,
+ const struct ipa_ep_cfg_route *ipa_ep_cfg);
+
+ int (*ipa_cfg_ep_holb)(u32 clnt_hdl,
+ const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+ int (*ipa_cfg_ep_cfg)(u32 clnt_hdl,
+ const struct ipa_ep_cfg_cfg *ipa_ep_cfg);
+
+ int (*ipa_cfg_ep_metadata_mask)(u32 clnt_hdl,
+ const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg);
+
+ int (*ipa_cfg_ep_holb_by_client)(enum ipa_client_type client,
+ const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+ int (*ipa_cfg_ep_ctrl)(u32 clnt_hdl,
+ const struct ipa_ep_cfg_ctrl *ep_ctrl);
+
+ int (*ipa_add_hdr)(struct ipa_ioc_add_hdr *hdrs);
+
+ int (*ipa_del_hdr)(struct ipa_ioc_del_hdr *hdls);
+
+ int (*ipa_commit_hdr)(void);
+
+ int (*ipa_reset_hdr)(void);
+
+ int (*ipa_get_hdr)(struct ipa_ioc_get_hdr *lookup);
+
+ int (*ipa_put_hdr)(u32 hdr_hdl);
+
+ int (*ipa_copy_hdr)(struct ipa_ioc_copy_hdr *copy);
+
+ int (*ipa_add_hdr_proc_ctx)(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs);
+
+ int (*ipa_del_hdr_proc_ctx)(struct ipa_ioc_del_hdr_proc_ctx *hdls);
+
+ int (*ipa_add_rt_rule)(struct ipa_ioc_add_rt_rule *rules);
+
+ int (*ipa_del_rt_rule)(struct ipa_ioc_del_rt_rule *hdls);
+
+ int (*ipa_commit_rt)(enum ipa_ip_type ip);
+
+ int (*ipa_reset_rt)(enum ipa_ip_type ip);
+
+ int (*ipa_get_rt_tbl)(struct ipa_ioc_get_rt_tbl *lookup);
+
+ int (*ipa_put_rt_tbl)(u32 rt_tbl_hdl);
+
+ int (*ipa_query_rt_index)(struct ipa_ioc_get_rt_tbl_indx *in);
+
+ int (*ipa_mdfy_rt_rule)(struct ipa_ioc_mdfy_rt_rule *rules);
+
+ int (*ipa_add_flt_rule)(struct ipa_ioc_add_flt_rule *rules);
+
+ int (*ipa_del_flt_rule)(struct ipa_ioc_del_flt_rule *hdls);
+
+ int (*ipa_mdfy_flt_rule)(struct ipa_ioc_mdfy_flt_rule *rules);
+
+ int (*ipa_commit_flt)(enum ipa_ip_type ip);
+
+ int (*ipa_reset_flt)(enum ipa_ip_type ip);
+
+ int (*allocate_nat_device)(struct ipa_ioc_nat_alloc_mem *mem);
+
+ int (*ipa_nat_init_cmd)(struct ipa_ioc_v4_nat_init *init);
+
+ int (*ipa_nat_dma_cmd)(struct ipa_ioc_nat_dma_cmd *dma);
+
+ int (*ipa_nat_del_cmd)(struct ipa_ioc_v4_nat_del *del);
+
+ int (*ipa_send_msg)(struct ipa_msg_meta *meta, void *buff,
+ ipa_msg_free_fn callback);
+
+ int (*ipa_register_pull_msg)(struct ipa_msg_meta *meta,
+ ipa_msg_pull_fn callback);
+
+ int (*ipa_deregister_pull_msg)(struct ipa_msg_meta *meta);
+
+ int (*ipa_register_intf)(const char *name,
+ const struct ipa_tx_intf *tx,
+ const struct ipa_rx_intf *rx);
+
+ int (*ipa_register_intf_ext)(const char *name,
+ const struct ipa_tx_intf *tx,
+ const struct ipa_rx_intf *rx,
+ const struct ipa_ext_intf *ext);
+
+ int (*ipa_deregister_intf)(const char *name);
+
+ int (*ipa_set_aggr_mode)(enum ipa_aggr_mode mode);
+
+ int (*ipa_set_qcncm_ndp_sig)(char sig[3]);
+
+ int (*ipa_set_single_ndp_per_mbim)(bool enable);
+
+ int (*ipa_tx_dp)(enum ipa_client_type dst, struct sk_buff *skb,
+ struct ipa_tx_meta *metadata);
+
+ int (*ipa_tx_dp_mul)(enum ipa_client_type dst,
+ struct ipa_tx_data_desc *data_desc);
+
+ void (*ipa_free_skb)(struct ipa_rx_data *);
+
+ int (*ipa_setup_sys_pipe)(struct ipa_sys_connect_params *sys_in,
+ u32 *clnt_hdl);
+
+ int (*ipa_teardown_sys_pipe)(u32 clnt_hdl);
+
+ int (*ipa_sys_setup)(struct ipa_sys_connect_params *sys_in,
+ unsigned long *ipa_bam_hdl,
+ u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status);
+
+ int (*ipa_sys_teardown)(u32 clnt_hdl);
+
+ int (*ipa_sys_update_gsi_hdls)(u32 clnt_hdl, unsigned long gsi_ch_hdl,
+ unsigned long gsi_ev_hdl);
+
+ int (*ipa_connect_wdi_pipe)(struct ipa_wdi_in_params *in,
+ struct ipa_wdi_out_params *out);
+
+ int (*ipa_disconnect_wdi_pipe)(u32 clnt_hdl);
+
+ int (*ipa_enable_wdi_pipe)(u32 clnt_hdl);
+
+ int (*ipa_disable_wdi_pipe)(u32 clnt_hdl);
+
+ int (*ipa_resume_wdi_pipe)(u32 clnt_hdl);
+
+ int (*ipa_suspend_wdi_pipe)(u32 clnt_hdl);
+
+ int (*ipa_get_wdi_stats)(struct IpaHwStatsWDIInfoData_t *stats);
+
+ u16 (*ipa_get_smem_restr_bytes)(void);
+
+ int (*ipa_uc_wdi_get_dbpa)(struct ipa_wdi_db_params *out);
+
+ int (*ipa_uc_reg_rdyCB)(struct ipa_wdi_uc_ready_params *param);
+
+ int (*ipa_uc_dereg_rdyCB)(void);
+
+ int (*teth_bridge_init)(struct teth_bridge_init_params *params);
+
+ int (*teth_bridge_disconnect)(enum ipa_client_type client);
+
+ int (*teth_bridge_connect)(
+ struct teth_bridge_connect_params *connect_params);
+
+ void (*ipa_set_client)(
+ int index, enum ipacm_client_enum client, bool uplink);
+
+ enum ipacm_client_enum (*ipa_get_client)(int pipe_idx);
+
+ bool (*ipa_get_client_uplink)(int pipe_idx);
+
+ int (*ipa_dma_init)(void);
+
+ int (*ipa_dma_enable)(void);
+
+ int (*ipa_dma_disable)(void);
+
+ int (*ipa_dma_sync_memcpy)(u64 dest, u64 src, int len);
+
+ int (*ipa_dma_async_memcpy)(u64 dest, u64 src, int len,
+ void (*user_cb)(void *user1), void *user_param);
+
+ int (*ipa_dma_uc_memcpy)(phys_addr_t dest, phys_addr_t src, int len);
+
+ void (*ipa_dma_destroy)(void);
+
+ bool (*ipa_has_open_aggr_frame)(enum ipa_client_type client);
+
+ int (*ipa_generate_tag_process)(void);
+
+ int (*ipa_disable_sps_pipe)(enum ipa_client_type client);
+
+ void (*ipa_set_tag_process_before_gating)(bool val);
+
+ int (*ipa_mhi_init_engine)(struct ipa_mhi_init_engine *params);
+
+ int (*ipa_connect_mhi_pipe)(struct ipa_mhi_connect_params_internal *in,
+ u32 *clnt_hdl);
+
+ int (*ipa_disconnect_mhi_pipe)(u32 clnt_hdl);
+
+ bool (*ipa_mhi_stop_gsi_channel)(enum ipa_client_type client);
+
+ int (*ipa_qmi_disable_force_clear)(u32 request_id);
+
+ int (*ipa_qmi_enable_force_clear_datapath_send)(
+ struct ipa_enable_force_clear_datapath_req_msg_v01 *req);
+
+ int (*ipa_qmi_disable_force_clear_datapath_send)(
+ struct ipa_disable_force_clear_datapath_req_msg_v01 *req);
+
+ bool (*ipa_mhi_sps_channel_empty)(enum ipa_client_type client);
+
+ int (*ipa_mhi_reset_channel_internal)(enum ipa_client_type client);
+
+ int (*ipa_mhi_start_channel_internal)(enum ipa_client_type client);
+
+ void (*ipa_get_holb)(int ep_idx, struct ipa_ep_cfg_holb *holb);
+
+ int (*ipa_mhi_query_ch_info)(enum ipa_client_type client,
+ struct gsi_chan_info *ch_info);
+
+ int (*ipa_mhi_resume_channels_internal)(
+ enum ipa_client_type client,
+ bool LPTransitionRejected,
+ bool brstmode_enabled,
+ union __packed gsi_channel_scratch ch_scratch,
+ u8 index);
+
+ int (*ipa_mhi_destroy_channel)(enum ipa_client_type client);
+
+ int (*ipa_uc_mhi_send_dl_ul_sync_info)
+ (union IpaHwMhiDlUlSyncCmdData_t *cmd);
+
+ int (*ipa_uc_mhi_init)
+ (void (*ready_cb)(void), void (*wakeup_request_cb)(void));
+
+ void (*ipa_uc_mhi_cleanup)(void);
+
+ int (*ipa_uc_mhi_print_stats)(char *dbg_buff, int size);
+
+ int (*ipa_uc_mhi_reset_channel)(int channelHandle);
+
+ int (*ipa_uc_mhi_suspend_channel)(int channelHandle);
+
+ int (*ipa_uc_mhi_stop_event_update_channel)(int channelHandle);
+
+ int (*ipa_uc_state_check)(void);
+
+ int (*ipa_write_qmap_id)(struct ipa_ioc_write_qmapid *param_in);
+
+ int (*ipa_add_interrupt_handler)(enum ipa_irq_type interrupt,
+ ipa_irq_handler_t handler,
+ bool deferred_flag,
+ void *private_data);
+
+ int (*ipa_remove_interrupt_handler)(enum ipa_irq_type interrupt);
+
+ int (*ipa_restore_suspend_handler)(void);
+
+ void (*ipa_bam_reg_dump)(void);
+
+ int (*ipa_get_ep_mapping)(enum ipa_client_type client);
+
+ bool (*ipa_is_ready)(void);
+
+ void (*ipa_proxy_clk_vote)(void);
+
+ void (*ipa_proxy_clk_unvote)(void);
+
+ bool (*ipa_is_client_handle_valid)(u32 clnt_hdl);
+
+ enum ipa_client_type (*ipa_get_client_mapping)(int pipe_idx);
+
+ enum ipa_rm_resource_name (*ipa_get_rm_resource_from_ep)(int pipe_idx);
+
+ bool (*ipa_get_modem_cfg_emb_pipe_flt)(void);
+
+ enum ipa_transport_type (*ipa_get_transport_type)(void);
+
+ int (*ipa_ap_suspend)(struct device *dev);
+
+ int (*ipa_ap_resume)(struct device *dev);
+
+ int (*ipa_stop_gsi_channel)(u32 clnt_hdl);
+
+ struct iommu_domain *(*ipa_get_smmu_domain)(void);
+
+ int (*ipa_disable_apps_wan_cons_deaggr)(uint32_t agg_size,
+ uint32_t agg_count);
+
+ struct device *(*ipa_get_dma_dev)(void);
+
+ int (*ipa_release_wdi_mapping)(u32 num_buffers,
+ struct ipa_wdi_buffer_info *info);
+
+ int (*ipa_create_wdi_mapping)(u32 num_buffers,
+ struct ipa_wdi_buffer_info *info);
+
+ struct ipa_gsi_ep_config *(*ipa_get_gsi_ep_info)(int ipa_ep_idx);
+
+ int (*ipa_register_ipa_ready_cb)(void (*ipa_ready_cb)(void *user_data),
+ void *user_data);
+
+ void (*ipa_inc_client_enable_clks)(
+ struct ipa_active_client_logging_info *id);
+
+ void (*ipa_dec_client_disable_clks)(
+ struct ipa_active_client_logging_info *id);
+
+ int (*ipa_inc_client_enable_clks_no_block)(
+ struct ipa_active_client_logging_info *id);
+
+ int (*ipa_suspend_resource_no_block)(
+ enum ipa_rm_resource_name resource);
+
+ int (*ipa_resume_resource)(enum ipa_rm_resource_name name);
+
+ int (*ipa_suspend_resource_sync)(enum ipa_rm_resource_name resource);
+
+ int (*ipa_set_required_perf_profile)(
+ enum ipa_voltage_level floor_voltage, u32 bandwidth_mbps);
+
+ void *(*ipa_get_ipc_logbuf)(void);
+
+ void *(*ipa_get_ipc_logbuf_low)(void);
+
+ int (*ipa_rx_poll)(u32 clnt_hdl, int budget);
+
+ void (*ipa_recycle_wan_skb)(struct sk_buff *skb);
+
+ int (*ipa_setup_uc_ntn_pipes)(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *);
+
+ int (*ipa_tear_down_uc_offload_pipes)(int ipa_ep_idx_ul,
+ int ipa_ep_idx_dl);
+};
+
+#ifdef CONFIG_IPA
+int ipa_plat_drv_probe(struct platform_device *pdev_p,
+ struct ipa_api_controller *api_ctrl,
+ const struct of_device_id *pdrv_match);
+#else
+static inline int ipa_plat_drv_probe(struct platform_device *pdev_p,
+ struct ipa_api_controller *api_ctrl,
+ const struct of_device_id *pdrv_match)
+{
+ return -ENODEV;
+}
+#endif /* (CONFIG_IPA) */
+
+#ifdef CONFIG_IPA3
+int ipa3_plat_drv_probe(struct platform_device *pdev_p,
+ struct ipa_api_controller *api_ctrl,
+ const struct of_device_id *pdrv_match);
+#else
+static inline int ipa3_plat_drv_probe(struct platform_device *pdev_p,
+ struct ipa_api_controller *api_ctrl,
+ const struct of_device_id *pdrv_match)
+{
+ return -ENODEV;
+}
+#endif /* (CONFIG_IPA3) */
+
+#endif /* _IPA_API_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_clients/Makefile b/drivers/platform/msm/ipa/ipa_clients/Makefile
new file mode 100644
index 0000000..61cef2d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_clients/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o
+obj-$(CONFIG_IPA) += odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
new file mode 100644
index 0000000..6addf14
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
@@ -0,0 +1,2609 @@
+/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/ipa.h>
+#include <linux/msm_gsi.h>
+#include <linux/ipa_qmi_service_v01.h>
+#include <linux/ipa_mhi.h>
+#include "../ipa_common_i.h"
+
+#define IPA_MHI_DRV_NAME "ipa_mhi_client"
+#define IPA_MHI_DBG(fmt, args...) \
+ pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args)
+#define IPA_MHI_ERR(fmt, args...) \
+ pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+#define IPA_MHI_FUNC_ENTRY() \
+ IPA_MHI_DBG("ENTRY\n")
+#define IPA_MHI_FUNC_EXIT() \
+ IPA_MHI_DBG("EXIT\n")
+
+#define IPA_MHI_RM_TIMEOUT_MSEC 10000
+#define IPA_MHI_CH_EMPTY_TIMEOUT_MSEC 10
+
+#define IPA_MHI_SUSPEND_SLEEP_MIN 900
+#define IPA_MHI_SUSPEND_SLEEP_MAX 1100
+
+#define IPA_MHI_MAX_UL_CHANNELS 1
+#define IPA_MHI_MAX_DL_CHANNELS 1
+
+#if (IPA_MHI_MAX_UL_CHANNELS + IPA_MHI_MAX_DL_CHANNELS) > \
+ (IPA_MHI_GSI_ER_END - IPA_MHI_GSI_ER_START)
+#error not enought event rings for MHI
+#endif
+
+/* bit #40 in address should be asserted for MHI transfers over pcie */
+#define IPA_MHI_CLIENT_HOST_ADDR_COND(addr) \
+ ((ipa_mhi_client_ctx->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr))
+
+enum ipa_mhi_rm_state {
+ IPA_MHI_RM_STATE_RELEASED,
+ IPA_MHI_RM_STATE_REQUESTED,
+ IPA_MHI_RM_STATE_GRANTED,
+ IPA_MHI_RM_STATE_MAX
+};
+
+enum ipa_mhi_state {
+ IPA_MHI_STATE_INITIALIZED,
+ IPA_MHI_STATE_READY,
+ IPA_MHI_STATE_STARTED,
+ IPA_MHI_STATE_SUSPEND_IN_PROGRESS,
+ IPA_MHI_STATE_SUSPENDED,
+ IPA_MHI_STATE_RESUME_IN_PROGRESS,
+ IPA_MHI_STATE_MAX
+};
+
+static char *ipa_mhi_state_str[] = {
+ __stringify(IPA_MHI_STATE_INITIALIZED),
+ __stringify(IPA_MHI_STATE_READY),
+ __stringify(IPA_MHI_STATE_STARTED),
+ __stringify(IPA_MHI_STATE_SUSPEND_IN_PROGRESS),
+ __stringify(IPA_MHI_STATE_SUSPENDED),
+ __stringify(IPA_MHI_STATE_RESUME_IN_PROGRESS),
+};
+
+#define MHI_STATE_STR(state) \
+ (((state) >= 0 && (state) < IPA_MHI_STATE_MAX) ? \
+ ipa_mhi_state_str[(state)] : \
+ "INVALID")
+
+enum ipa_mhi_dma_dir {
+ IPA_MHI_DMA_TO_HOST,
+ IPA_MHI_DMA_FROM_HOST,
+};
+
+/**
+ * struct ipa_mhi_channel_ctx - MHI Channel context
+ * @valid: entry is valid
+ * @id: MHI channel ID
+ * @hdl: channel handle for uC
+ * @client: IPA Client
+ * @state: Channel state
+ */
+struct ipa_mhi_channel_ctx {
+ bool valid;
+ u8 id;
+ u8 index;
+ enum ipa_client_type client;
+ enum ipa_hw_mhi_channel_states state;
+ bool stop_in_proc;
+ struct gsi_chan_info ch_info;
+ u64 channel_context_addr;
+ struct ipa_mhi_ch_ctx ch_ctx_host;
+ u64 event_context_addr;
+ struct ipa_mhi_ev_ctx ev_ctx_host;
+ bool brstmode_enabled;
+ union __packed gsi_channel_scratch ch_scratch;
+ unsigned long cached_gsi_evt_ring_hdl;
+};
+
+struct ipa_mhi_client_ctx {
+ enum ipa_mhi_state state;
+ spinlock_t state_lock;
+ mhi_client_cb cb_notify;
+ void *cb_priv;
+ struct completion rm_prod_granted_comp;
+ enum ipa_mhi_rm_state rm_cons_state;
+ struct completion rm_cons_comp;
+ bool trigger_wakeup;
+ bool wakeup_notified;
+ struct workqueue_struct *wq;
+ struct ipa_mhi_channel_ctx ul_channels[IPA_MHI_MAX_UL_CHANNELS];
+ struct ipa_mhi_channel_ctx dl_channels[IPA_MHI_MAX_DL_CHANNELS];
+ u32 total_channels;
+ struct ipa_mhi_msi_info msi;
+ u32 mmio_addr;
+ u32 first_ch_idx;
+ u32 first_er_idx;
+ u32 host_ctrl_addr;
+ u32 host_data_addr;
+ u64 channel_context_array_addr;
+ u64 event_context_array_addr;
+ u32 qmi_req_id;
+ u32 use_ipadma;
+ bool assert_bit40;
+ bool test_mode;
+};
+
+static struct ipa_mhi_client_ctx *ipa_mhi_client_ctx;
+
+#ifdef CONFIG_DEBUG_FS
+#define IPA_MHI_MAX_MSG_LEN 512
+static char dbg_buff[IPA_MHI_MAX_MSG_LEN];
+static struct dentry *dent;
+
+static char *ipa_mhi_channel_state_str[] = {
+ __stringify(IPA_HW_MHI_CHANNEL_STATE_DISABLE),
+ __stringify(IPA_HW_MHI_CHANNEL_STATE_ENABLE),
+ __stringify(IPA_HW_MHI_CHANNEL_STATE_RUN),
+ __stringify(IPA_HW_MHI_CHANNEL_STATE_SUSPEND),
+ __stringify(IPA_HW_MHI_CHANNEL_STATE_STOP),
+ __stringify(IPA_HW_MHI_CHANNEL_STATE_ERROR),
+};
+
+#define MHI_CH_STATE_STR(state) \
+ (((state) >= 0 && (state) <= IPA_HW_MHI_CHANNEL_STATE_ERROR) ? \
+ ipa_mhi_channel_state_str[(state)] : \
+ "INVALID")
+
+static int ipa_mhi_read_write_host(enum ipa_mhi_dma_dir dir, void *dev_addr,
+ u64 host_addr, int size)
+{
+ struct ipa_mem_buffer mem;
+ int res;
+ struct device *pdev;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ if (ipa_mhi_client_ctx->use_ipadma) {
+ pdev = ipa_get_dma_dev();
+ host_addr = IPA_MHI_CLIENT_HOST_ADDR_COND(host_addr);
+
+ mem.size = size;
+ mem.base = dma_alloc_coherent(pdev, mem.size,
+ &mem.phys_base, GFP_KERNEL);
+ if (!mem.base) {
+ IPA_MHI_ERR(
+ "dma_alloc_coherent failed, DMA buff size %d\n"
+ , mem.size);
+ return -ENOMEM;
+ }
+
+ if (dir == IPA_MHI_DMA_FROM_HOST) {
+ res = ipa_dma_sync_memcpy(mem.phys_base, host_addr,
+ size);
+ if (res) {
+ IPA_MHI_ERR(
+ "ipa_dma_sync_memcpy from host fail%d\n"
+ , res);
+ goto fail_memcopy;
+ }
+ memcpy(dev_addr, mem.base, size);
+ } else {
+ memcpy(mem.base, dev_addr, size);
+ res = ipa_dma_sync_memcpy(host_addr, mem.phys_base,
+ size);
+ if (res) {
+ IPA_MHI_ERR(
+ "ipa_dma_sync_memcpy to host fail %d\n"
+ , res);
+ goto fail_memcopy;
+ }
+ }
+ dma_free_coherent(pdev, mem.size, mem.base,
+ mem.phys_base);
+ } else {
+ void *host_ptr;
+
+ if (!ipa_mhi_client_ctx->test_mode)
+ host_ptr = ioremap(host_addr, size);
+ else
+ host_ptr = phys_to_virt(host_addr);
+ if (!host_ptr) {
+ IPA_MHI_ERR("ioremap failed for 0x%llx\n", host_addr);
+ return -EFAULT;
+ }
+ if (dir == IPA_MHI_DMA_FROM_HOST)
+ memcpy(dev_addr, host_ptr, size);
+ else
+ memcpy(host_ptr, dev_addr, size);
+ if (!ipa_mhi_client_ctx->test_mode)
+ iounmap(host_ptr);
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+
+fail_memcopy:
+ dma_free_coherent(ipa_get_dma_dev(), mem.size, mem.base,
+ mem.phys_base);
+ return res;
+}
+
+static int ipa_mhi_print_channel_info(struct ipa_mhi_channel_ctx *channel,
+ char *buff, int len)
+{
+ int nbytes = 0;
+
+ if (channel->valid) {
+ nbytes += scnprintf(&buff[nbytes],
+ len - nbytes,
+ "channel idx=%d ch_id=%d client=%d state=%s\n",
+ channel->index, channel->id, channel->client,
+ MHI_CH_STATE_STR(channel->state));
+
+ nbytes += scnprintf(&buff[nbytes],
+ len - nbytes,
+ " ch_ctx=%llx\n",
+ channel->channel_context_addr);
+
+ nbytes += scnprintf(&buff[nbytes],
+ len - nbytes,
+ " gsi_evt_ring_hdl=%ld ev_ctx=%llx\n",
+ channel->cached_gsi_evt_ring_hdl,
+ channel->event_context_addr);
+ }
+ return nbytes;
+}
+
+static int ipa_mhi_print_host_channel_ctx_info(
+ struct ipa_mhi_channel_ctx *channel, char *buff, int len)
+{
+ int res, nbytes = 0;
+ struct ipa_mhi_ch_ctx ch_ctx_host;
+
+ memset(&ch_ctx_host, 0, sizeof(ch_ctx_host));
+
+ /* reading ch context from host */
+ res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
+ &ch_ctx_host, channel->channel_context_addr,
+ sizeof(ch_ctx_host));
+ if (res) {
+ nbytes += scnprintf(&buff[nbytes], len - nbytes,
+ "Failed to read from host %d\n", res);
+ return nbytes;
+ }
+
+ nbytes += scnprintf(&buff[nbytes], len - nbytes,
+ "ch_id: %d\n", channel->id);
+ nbytes += scnprintf(&buff[nbytes], len - nbytes,
+ "chstate: 0x%x\n", ch_ctx_host.chstate);
+ nbytes += scnprintf(&buff[nbytes], len - nbytes,
+ "brstmode: 0x%x\n", ch_ctx_host.brstmode);
+ nbytes += scnprintf(&buff[nbytes], len - nbytes,
+ "chtype: 0x%x\n", ch_ctx_host.chtype);
+ nbytes += scnprintf(&buff[nbytes], len - nbytes,
+ "erindex: 0x%x\n", ch_ctx_host.erindex);
+ nbytes += scnprintf(&buff[nbytes], len - nbytes,
+ "rbase: 0x%llx\n", ch_ctx_host.rbase);
+ nbytes += scnprintf(&buff[nbytes], len - nbytes,
+ "rlen: 0x%llx\n", ch_ctx_host.rlen);
+ nbytes += scnprintf(&buff[nbytes], len - nbytes,
+ "rp: 0x%llx\n", ch_ctx_host.rp);
+ nbytes += scnprintf(&buff[nbytes], len - nbytes,
+ "wp: 0x%llx\n", ch_ctx_host.wp);
+
+ return nbytes;
+}
+
+static ssize_t ipa_mhi_debugfs_stats(struct file *file,
+ char __user *ubuf,
+ size_t count,
+ loff_t *ppos)
+{
+ int nbytes = 0;
+ int i;
+ struct ipa_mhi_channel_ctx *channel;
+
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPA_MHI_MAX_MSG_LEN - nbytes,
+ "IPA MHI state: %s\n",
+ MHI_STATE_STR(ipa_mhi_client_ctx->state));
+
+ for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+ channel = &ipa_mhi_client_ctx->ul_channels[i];
+ nbytes += ipa_mhi_print_channel_info(channel,
+ &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
+ }
+
+ for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
+ channel = &ipa_mhi_client_ctx->dl_channels[i];
+ nbytes += ipa_mhi_print_channel_info(channel,
+ &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_mhi_debugfs_uc_stats(struct file *file,
+ char __user *ubuf,
+ size_t count,
+ loff_t *ppos)
+{
+ int nbytes = 0;
+
+ nbytes += ipa_uc_mhi_print_stats(dbg_buff, IPA_MHI_MAX_MSG_LEN);
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_mhi_debugfs_dump_host_ch_ctx_arr(struct file *file,
+ char __user *ubuf,
+ size_t count,
+ loff_t *ppos)
+{
+ int i, nbytes = 0;
+ struct ipa_mhi_channel_ctx *channel;
+
+ if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_INITIALIZED ||
+ ipa_mhi_client_ctx->state == IPA_MHI_STATE_READY) {
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPA_MHI_MAX_MSG_LEN - nbytes,
+ "Cannot dump host channel context ");
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPA_MHI_MAX_MSG_LEN - nbytes,
+ "before IPA MHI was STARTED\n");
+ return simple_read_from_buffer(ubuf, count, ppos,
+ dbg_buff, nbytes);
+ }
+ if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) {
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPA_MHI_MAX_MSG_LEN - nbytes,
+ "IPA MHI is suspended, cannot dump channel ctx array");
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPA_MHI_MAX_MSG_LEN - nbytes,
+ " from host -PCIe can be in D3 state\n");
+ return simple_read_from_buffer(ubuf, count, ppos,
+ dbg_buff, nbytes);
+ }
+
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPA_MHI_MAX_MSG_LEN - nbytes,
+ "channel contex array - dump from host\n");
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPA_MHI_MAX_MSG_LEN - nbytes,
+ "***** UL channels *******\n");
+
+ for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+ channel = &ipa_mhi_client_ctx->ul_channels[i];
+ if (!channel->valid)
+ continue;
+ nbytes += ipa_mhi_print_host_channel_ctx_info(channel,
+ &dbg_buff[nbytes],
+ IPA_MHI_MAX_MSG_LEN - nbytes);
+ }
+
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPA_MHI_MAX_MSG_LEN - nbytes,
+ "\n***** DL channels *******\n");
+
+ for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
+ channel = &ipa_mhi_client_ctx->dl_channels[i];
+ if (!channel->valid)
+ continue;
+ nbytes += ipa_mhi_print_host_channel_ctx_info(channel,
+ &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+const struct file_operations ipa_mhi_stats_ops = {
+ .read = ipa_mhi_debugfs_stats,
+};
+
+const struct file_operations ipa_mhi_uc_stats_ops = {
+ .read = ipa_mhi_debugfs_uc_stats,
+};
+
+const struct file_operations ipa_mhi_dump_host_ch_ctx_ops = {
+ .read = ipa_mhi_debugfs_dump_host_ch_ctx_arr,
+};
+
+
+static void ipa_mhi_debugfs_init(void)
+{
+ const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+ const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+ S_IWUSR | S_IWGRP;
+ struct dentry *file;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ dent = debugfs_create_dir("ipa_mhi", 0);
+ if (IS_ERR(dent)) {
+ IPA_MHI_ERR("fail to create folder ipa_mhi\n");
+ return;
+ }
+
+ file = debugfs_create_file("stats", read_only_mode, dent,
+ 0, &ipa_mhi_stats_ops);
+ if (!file || IS_ERR(file)) {
+ IPA_MHI_ERR("fail to create file stats\n");
+ goto fail;
+ }
+
+ file = debugfs_create_file("uc_stats", read_only_mode, dent,
+ 0, &ipa_mhi_uc_stats_ops);
+ if (!file || IS_ERR(file)) {
+ IPA_MHI_ERR("fail to create file uc_stats\n");
+ goto fail;
+ }
+
+ file = debugfs_create_u32("use_ipadma", read_write_mode, dent,
+ &ipa_mhi_client_ctx->use_ipadma);
+ if (!file || IS_ERR(file)) {
+ IPA_MHI_ERR("fail to create file use_ipadma\n");
+ goto fail;
+ }
+
+ file = debugfs_create_file("dump_host_channel_ctx_array",
+ read_only_mode, dent, 0, &ipa_mhi_dump_host_ch_ctx_ops);
+ if (!file || IS_ERR(file)) {
+ IPA_MHI_ERR("fail to create file dump_host_channel_ctx_arr\n");
+ goto fail;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return;
+fail:
+ debugfs_remove_recursive(dent);
+}
+
+#else
+static void ipa_mhi_debugfs_init(void) {}
+static void ipa_mhi_debugfs_destroy(void) {}
+#endif /* CONFIG_DEBUG_FS */
+
+static union IpaHwMhiDlUlSyncCmdData_t ipa_cached_dl_ul_sync_info;
+
+static void ipa_mhi_wq_notify_wakeup(struct work_struct *work);
+static DECLARE_WORK(ipa_mhi_notify_wakeup_work, ipa_mhi_wq_notify_wakeup);
+
+static void ipa_mhi_wq_notify_ready(struct work_struct *work);
+static DECLARE_WORK(ipa_mhi_notify_ready_work, ipa_mhi_wq_notify_ready);
+
+/**
+ * ipa_mhi_notify_wakeup() - Schedule work to notify data available
+ *
+ * This function will schedule a work to notify data available event.
+ * In case this function is called more than once, only one notification will
+ * be sent to MHI client driver. No further notifications will be sent until
+ * IPA MHI state will become STARTED.
+ */
+static void ipa_mhi_notify_wakeup(void)
+{
+ IPA_MHI_FUNC_ENTRY();
+ if (ipa_mhi_client_ctx->wakeup_notified) {
+ IPA_MHI_DBG("wakeup already called\n");
+ return;
+ }
+ queue_work(ipa_mhi_client_ctx->wq, &ipa_mhi_notify_wakeup_work);
+ ipa_mhi_client_ctx->wakeup_notified = true;
+ IPA_MHI_FUNC_EXIT();
+}
+
+/**
+ * ipa_mhi_rm_cons_request() - callback function for IPA RM request resource
+ *
+ * In case IPA MHI is not suspended, MHI CONS will be granted immediately.
+ * In case IPA MHI is suspended, MHI CONS will be granted after resume.
+ */
+static int ipa_mhi_rm_cons_request(void)
+{
+ unsigned long flags;
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ IPA_MHI_DBG("%s\n", MHI_STATE_STR(ipa_mhi_client_ctx->state));
+ spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+ ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_REQUESTED;
+ if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_STARTED) {
+ ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED;
+ res = 0;
+ } else if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) {
+ ipa_mhi_notify_wakeup();
+ res = -EINPROGRESS;
+ } else if (ipa_mhi_client_ctx->state ==
+ IPA_MHI_STATE_SUSPEND_IN_PROGRESS) {
+ /* wakeup event will be trigger after suspend finishes */
+ ipa_mhi_client_ctx->trigger_wakeup = true;
+ res = -EINPROGRESS;
+ } else {
+ res = -EINPROGRESS;
+ }
+
+ spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+ IPA_MHI_DBG("EXIT with %d\n", res);
+ return res;
+}
+
+static int ipa_mhi_rm_cons_release(void)
+{
+ unsigned long flags;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+ ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED;
+ complete_all(&ipa_mhi_client_ctx->rm_cons_comp);
+ spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+static void ipa_mhi_rm_prod_notify(void *user_data, enum ipa_rm_event event,
+ unsigned long data)
+{
+ IPA_MHI_FUNC_ENTRY();
+
+ switch (event) {
+ case IPA_RM_RESOURCE_GRANTED:
+ IPA_MHI_DBG("IPA_RM_RESOURCE_GRANTED\n");
+ complete_all(&ipa_mhi_client_ctx->rm_prod_granted_comp);
+ break;
+
+ case IPA_RM_RESOURCE_RELEASED:
+ IPA_MHI_DBG("IPA_RM_RESOURCE_RELEASED\n");
+ break;
+
+ default:
+ IPA_MHI_ERR("unexpected event %d\n", event);
+ WARN_ON(1);
+ break;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+}
+
+/**
+ * ipa_mhi_wq_notify_wakeup() - Notify MHI client on data available
+ *
+ * This function is called from IPA MHI workqueue to notify
+ * MHI client driver on data available event.
+ */
+static void ipa_mhi_wq_notify_wakeup(struct work_struct *work)
+{
+ IPA_MHI_FUNC_ENTRY();
+ ipa_mhi_client_ctx->cb_notify(ipa_mhi_client_ctx->cb_priv,
+ IPA_MHI_EVENT_DATA_AVAILABLE, 0);
+ IPA_MHI_FUNC_EXIT();
+}
+
+/**
+ * ipa_mhi_wq_notify_ready() - Notify MHI client on ready
+ *
+ * This function is called from IPA MHI workqueue to notify
+ * MHI client driver on ready event when IPA uC is loaded
+ */
+static void ipa_mhi_wq_notify_ready(struct work_struct *work)
+{
+ IPA_MHI_FUNC_ENTRY();
+ ipa_mhi_client_ctx->cb_notify(ipa_mhi_client_ctx->cb_priv,
+ IPA_MHI_EVENT_READY, 0);
+ IPA_MHI_FUNC_EXIT();
+}
+
+/**
+ * ipa_mhi_notify_ready() - Schedule work to notify ready
+ *
+ * This function will schedule a work to notify ready event.
+ */
+static void ipa_mhi_notify_ready(void)
+{
+ IPA_MHI_FUNC_ENTRY();
+ queue_work(ipa_mhi_client_ctx->wq, &ipa_mhi_notify_ready_work);
+ IPA_MHI_FUNC_EXIT();
+}
+
+/**
+ * ipa_mhi_set_state() - Set new state to IPA MHI
+ * @state: new state
+ *
+ * Sets a new state to IPA MHI if possible according to IPA MHI state machine.
+ * In some state transitions a wakeup request will be triggered.
+ *
+ * Returns: 0 on success, -1 otherwise
+ */
+static int ipa_mhi_set_state(enum ipa_mhi_state new_state)
+{
+ unsigned long flags;
+ int res = -EPERM;
+
+ spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+ IPA_MHI_DBG("Current state: %s\n",
+ MHI_STATE_STR(ipa_mhi_client_ctx->state));
+
+ switch (ipa_mhi_client_ctx->state) {
+ case IPA_MHI_STATE_INITIALIZED:
+ if (new_state == IPA_MHI_STATE_READY) {
+ ipa_mhi_notify_ready();
+ res = 0;
+ }
+ break;
+
+ case IPA_MHI_STATE_READY:
+ if (new_state == IPA_MHI_STATE_READY)
+ res = 0;
+ if (new_state == IPA_MHI_STATE_STARTED)
+ res = 0;
+ break;
+
+ case IPA_MHI_STATE_STARTED:
+ if (new_state == IPA_MHI_STATE_INITIALIZED)
+ res = 0;
+ else if (new_state == IPA_MHI_STATE_SUSPEND_IN_PROGRESS)
+ res = 0;
+ break;
+
+ case IPA_MHI_STATE_SUSPEND_IN_PROGRESS:
+ if (new_state == IPA_MHI_STATE_SUSPENDED) {
+ if (ipa_mhi_client_ctx->trigger_wakeup) {
+ ipa_mhi_client_ctx->trigger_wakeup = false;
+ ipa_mhi_notify_wakeup();
+ }
+ res = 0;
+ } else if (new_state == IPA_MHI_STATE_STARTED) {
+ ipa_mhi_client_ctx->wakeup_notified = false;
+ ipa_mhi_client_ctx->trigger_wakeup = false;
+ if (ipa_mhi_client_ctx->rm_cons_state ==
+ IPA_MHI_RM_STATE_REQUESTED) {
+ ipa_rm_notify_completion(
+ IPA_RM_RESOURCE_GRANTED,
+ IPA_RM_RESOURCE_MHI_CONS);
+ ipa_mhi_client_ctx->rm_cons_state =
+ IPA_MHI_RM_STATE_GRANTED;
+ }
+ res = 0;
+ }
+ break;
+
+ case IPA_MHI_STATE_SUSPENDED:
+ if (new_state == IPA_MHI_STATE_RESUME_IN_PROGRESS)
+ res = 0;
+ break;
+
+ case IPA_MHI_STATE_RESUME_IN_PROGRESS:
+ if (new_state == IPA_MHI_STATE_SUSPENDED) {
+ if (ipa_mhi_client_ctx->trigger_wakeup) {
+ ipa_mhi_client_ctx->trigger_wakeup = false;
+ ipa_mhi_notify_wakeup();
+ }
+ res = 0;
+ } else if (new_state == IPA_MHI_STATE_STARTED) {
+ ipa_mhi_client_ctx->trigger_wakeup = false;
+ ipa_mhi_client_ctx->wakeup_notified = false;
+ if (ipa_mhi_client_ctx->rm_cons_state ==
+ IPA_MHI_RM_STATE_REQUESTED) {
+ ipa_rm_notify_completion(
+ IPA_RM_RESOURCE_GRANTED,
+ IPA_RM_RESOURCE_MHI_CONS);
+ ipa_mhi_client_ctx->rm_cons_state =
+ IPA_MHI_RM_STATE_GRANTED;
+ }
+ res = 0;
+ }
+ break;
+
+ default:
+ IPA_MHI_ERR("Invalid state %d\n", ipa_mhi_client_ctx->state);
+ WARN_ON(1);
+ }
+
+ if (res)
+ IPA_MHI_ERR("Invalid state change to %s\n",
+ MHI_STATE_STR(new_state));
+ else {
+ IPA_MHI_DBG("New state change to %s\n",
+ MHI_STATE_STR(new_state));
+ ipa_mhi_client_ctx->state = new_state;
+ }
+ spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+ return res;
+}
+
+static void ipa_mhi_uc_ready_cb(void)
+{
+ IPA_MHI_FUNC_ENTRY();
+ ipa_mhi_set_state(IPA_MHI_STATE_READY);
+ IPA_MHI_FUNC_EXIT();
+}
+
+static void ipa_mhi_uc_wakeup_request_cb(void)
+{
+ unsigned long flags;
+
+ IPA_MHI_FUNC_ENTRY();
+ IPA_MHI_DBG("MHI state: %s\n",
+ MHI_STATE_STR(ipa_mhi_client_ctx->state));
+ spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+ if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED)
+ ipa_mhi_notify_wakeup();
+ else if (ipa_mhi_client_ctx->state ==
+ IPA_MHI_STATE_SUSPEND_IN_PROGRESS)
+ /* wakeup event will be triggered after suspend finishes */
+ ipa_mhi_client_ctx->trigger_wakeup = true;
+
+ spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+ IPA_MHI_FUNC_EXIT();
+}
+
+static int ipa_mhi_request_prod(void)
+{
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ reinit_completion(&ipa_mhi_client_ctx->rm_prod_granted_comp);
+ IPA_MHI_DBG("requesting mhi prod\n");
+ res = ipa_rm_request_resource(IPA_RM_RESOURCE_MHI_PROD);
+ if (res) {
+ if (res != -EINPROGRESS) {
+ IPA_MHI_ERR("failed to request mhi prod %d\n", res);
+ return res;
+ }
+ res = wait_for_completion_timeout(
+ &ipa_mhi_client_ctx->rm_prod_granted_comp,
+ msecs_to_jiffies(IPA_MHI_RM_TIMEOUT_MSEC));
+ if (res == 0) {
+ IPA_MHI_ERR("timeout request mhi prod\n");
+ return -ETIME;
+ }
+ }
+
+ IPA_MHI_DBG("mhi prod granted\n");
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+
+}
+
+static int ipa_mhi_release_prod(void)
+{
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ res = ipa_rm_release_resource(IPA_RM_RESOURCE_MHI_PROD);
+
+ IPA_MHI_FUNC_EXIT();
+ return res;
+
+}
+
+/**
+ * ipa_mhi_start() - Start IPA MHI engine
+ * @params: pcie addresses for MHI
+ *
+ * This function is called by MHI client driver on MHI engine start for
+ * handling MHI accelerated channels. This function is called after
+ * ipa_mhi_init() was called and can be called after MHI reset to restart MHI
+ * engine. When this function returns device can move to M0 state.
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa_mhi_start(struct ipa_mhi_start_params *params)
+{
+ int res;
+ struct ipa_mhi_init_engine init_params;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ if (!params) {
+ IPA_MHI_ERR("null args\n");
+ return -EINVAL;
+ }
+
+ if (!ipa_mhi_client_ctx) {
+ IPA_MHI_ERR("not initialized\n");
+ return -EPERM;
+ }
+
+ res = ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_set_state %d\n", res);
+ return res;
+ }
+
+ ipa_mhi_client_ctx->host_ctrl_addr = params->host_ctrl_addr;
+ ipa_mhi_client_ctx->host_data_addr = params->host_data_addr;
+ ipa_mhi_client_ctx->channel_context_array_addr =
+ params->channel_context_array_addr;
+ ipa_mhi_client_ctx->event_context_array_addr =
+ params->event_context_array_addr;
+ IPA_MHI_DBG("host_ctrl_addr 0x%x\n",
+ ipa_mhi_client_ctx->host_ctrl_addr);
+ IPA_MHI_DBG("host_data_addr 0x%x\n",
+ ipa_mhi_client_ctx->host_data_addr);
+ IPA_MHI_DBG("channel_context_array_addr 0x%llx\n",
+ ipa_mhi_client_ctx->channel_context_array_addr);
+ IPA_MHI_DBG("event_context_array_addr 0x%llx\n",
+ ipa_mhi_client_ctx->event_context_array_addr);
+
+ /* Add MHI <-> Q6 dependencies to IPA RM */
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_MHI_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ if (res && res != -EINPROGRESS) {
+ IPA_MHI_ERR("failed to add dependency %d\n", res);
+ goto fail_add_mhi_q6_dep;
+ }
+
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_MHI_CONS);
+ if (res && res != -EINPROGRESS) {
+ IPA_MHI_ERR("failed to add dependency %d\n", res);
+ goto fail_add_q6_mhi_dep;
+ }
+
+ res = ipa_mhi_request_prod();
+ if (res) {
+ IPA_MHI_ERR("failed request prod %d\n", res);
+ goto fail_request_prod;
+ }
+
+ /* gsi params */
+ init_params.gsi.first_ch_idx =
+ ipa_mhi_client_ctx->first_ch_idx;
+ /* uC params */
+ init_params.uC.first_ch_idx =
+ ipa_mhi_client_ctx->first_ch_idx;
+ init_params.uC.first_er_idx =
+ ipa_mhi_client_ctx->first_er_idx;
+ init_params.uC.host_ctrl_addr = params->host_ctrl_addr;
+ init_params.uC.host_data_addr = params->host_data_addr;
+ init_params.uC.mmio_addr = ipa_mhi_client_ctx->mmio_addr;
+ init_params.uC.msi = &ipa_mhi_client_ctx->msi;
+ init_params.uC.ipa_cached_dl_ul_sync_info =
+ &ipa_cached_dl_ul_sync_info;
+
+ res = ipa_mhi_init_engine(&init_params);
+ if (res) {
+ IPA_MHI_ERR("IPA core failed to start MHI %d\n", res);
+ goto fail_init_engine;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+
+fail_init_engine:
+ ipa_mhi_release_prod();
+fail_request_prod:
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_MHI_CONS);
+fail_add_q6_mhi_dep:
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+fail_add_mhi_q6_dep:
+ ipa_mhi_set_state(IPA_MHI_STATE_INITIALIZED);
+ return res;
+}
+
+/**
+ * ipa_mhi_get_channel_context() - Get corresponding channel context
+ * @ep: IPA ep
+ * @channel_id: Channel ID
+ *
+ * This function will return the corresponding channel context or allocate new
+ * one in case channel context for channel does not exist.
+ */
+static struct ipa_mhi_channel_ctx *ipa_mhi_get_channel_context(
+ enum ipa_client_type client, u8 channel_id)
+{
+ int ch_idx;
+ struct ipa_mhi_channel_ctx *channels;
+ int max_channels;
+
+ if (IPA_CLIENT_IS_PROD(client)) {
+ channels = ipa_mhi_client_ctx->ul_channels;
+ max_channels = IPA_MHI_MAX_UL_CHANNELS;
+ } else {
+ channels = ipa_mhi_client_ctx->dl_channels;
+ max_channels = IPA_MHI_MAX_DL_CHANNELS;
+ }
+
+ /* find the channel context according to channel id */
+ for (ch_idx = 0; ch_idx < max_channels; ch_idx++) {
+ if (channels[ch_idx].valid &&
+ channels[ch_idx].id == channel_id)
+ return &channels[ch_idx];
+ }
+
+ /* channel context does not exists, allocate a new one */
+ for (ch_idx = 0; ch_idx < max_channels; ch_idx++) {
+ if (!channels[ch_idx].valid)
+ break;
+ }
+
+ if (ch_idx == max_channels) {
+ IPA_MHI_ERR("no more channels available\n");
+ return NULL;
+ }
+
+ channels[ch_idx].valid = true;
+ channels[ch_idx].id = channel_id;
+ channels[ch_idx].index = ipa_mhi_client_ctx->total_channels++;
+ channels[ch_idx].client = client;
+ channels[ch_idx].state = IPA_HW_MHI_CHANNEL_STATE_INVALID;
+
+ return &channels[ch_idx];
+}
+
+/**
+ * ipa_mhi_get_channel_context_by_clnt_hdl() - Get corresponding channel
+ * context
+ * @clnt_hdl: client handle as provided in ipa_mhi_connect_pipe()
+ *
+ * This function will return the corresponding channel context or NULL in case
+ * that channel does not exist.
+ */
+static struct ipa_mhi_channel_ctx *ipa_mhi_get_channel_context_by_clnt_hdl(
+ u32 clnt_hdl)
+{
+ int ch_idx;
+
+ for (ch_idx = 0; ch_idx < IPA_MHI_MAX_UL_CHANNELS; ch_idx++) {
+ if (ipa_mhi_client_ctx->ul_channels[ch_idx].valid &&
+ ipa_get_ep_mapping(
+ ipa_mhi_client_ctx->ul_channels[ch_idx].client)
+ == clnt_hdl)
+ return &ipa_mhi_client_ctx->ul_channels[ch_idx];
+ }
+
+ for (ch_idx = 0; ch_idx < IPA_MHI_MAX_DL_CHANNELS; ch_idx++) {
+ if (ipa_mhi_client_ctx->dl_channels[ch_idx].valid &&
+ ipa_get_ep_mapping(
+ ipa_mhi_client_ctx->dl_channels[ch_idx].client)
+ == clnt_hdl)
+ return &ipa_mhi_client_ctx->dl_channels[ch_idx];
+ }
+
+ return NULL;
+}
+
+static void ipa_mhi_dump_ch_ctx(struct ipa_mhi_channel_ctx *channel)
+{
+ IPA_MHI_DBG("ch_id %d\n", channel->id);
+ IPA_MHI_DBG("chstate 0x%x\n", channel->ch_ctx_host.chstate);
+ IPA_MHI_DBG("brstmode 0x%x\n", channel->ch_ctx_host.brstmode);
+ IPA_MHI_DBG("pollcfg 0x%x\n", channel->ch_ctx_host.pollcfg);
+ IPA_MHI_DBG("chtype 0x%x\n", channel->ch_ctx_host.chtype);
+ IPA_MHI_DBG("erindex 0x%x\n", channel->ch_ctx_host.erindex);
+ IPA_MHI_DBG("rbase 0x%llx\n", channel->ch_ctx_host.rbase);
+ IPA_MHI_DBG("rlen 0x%llx\n", channel->ch_ctx_host.rlen);
+ IPA_MHI_DBG("rp 0x%llx\n", channel->ch_ctx_host.rp);
+ IPA_MHI_DBG("wp 0x%llx\n", channel->ch_ctx_host.wp);
+}
+
+static void ipa_mhi_dump_ev_ctx(struct ipa_mhi_channel_ctx *channel)
+{
+ IPA_MHI_DBG("ch_id %d event id %d\n", channel->id,
+ channel->ch_ctx_host.erindex);
+
+ IPA_MHI_DBG("intmodc 0x%x\n", channel->ev_ctx_host.intmodc);
+ IPA_MHI_DBG("intmodt 0x%x\n", channel->ev_ctx_host.intmodt);
+ IPA_MHI_DBG("ertype 0x%x\n", channel->ev_ctx_host.ertype);
+ IPA_MHI_DBG("msivec 0x%x\n", channel->ev_ctx_host.msivec);
+ IPA_MHI_DBG("rbase 0x%llx\n", channel->ev_ctx_host.rbase);
+ IPA_MHI_DBG("rlen 0x%llx\n", channel->ev_ctx_host.rlen);
+ IPA_MHI_DBG("rp 0x%llx\n", channel->ev_ctx_host.rp);
+ IPA_MHI_DBG("wp 0x%llx\n", channel->ev_ctx_host.wp);
+}
+
+static int ipa_mhi_read_ch_ctx(struct ipa_mhi_channel_ctx *channel)
+{
+ int res;
+
+ res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
+ &channel->ch_ctx_host, channel->channel_context_addr,
+ sizeof(channel->ch_ctx_host));
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res);
+ return res;
+
+ }
+ ipa_mhi_dump_ch_ctx(channel);
+
+ channel->event_context_addr =
+ ipa_mhi_client_ctx->event_context_array_addr +
+ channel->ch_ctx_host.erindex * sizeof(struct ipa_mhi_ev_ctx);
+ IPA_MHI_DBG("ch %d event_context_addr 0x%llx\n", channel->id,
+ channel->event_context_addr);
+
+ res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
+ &channel->ev_ctx_host, channel->event_context_addr,
+ sizeof(channel->ev_ctx_host));
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res);
+ return res;
+
+ }
+ ipa_mhi_dump_ev_ctx(channel);
+
+ return 0;
+}
+
+static void ipa_mhi_gsi_ev_err_cb(struct gsi_evt_err_notify *notify)
+{
+ struct ipa_mhi_channel_ctx *channel = notify->user_data;
+
+ IPA_MHI_ERR("channel id=%d client=%d state=%d\n",
+ channel->id, channel->client, channel->state);
+ switch (notify->evt_id) {
+ case GSI_EVT_OUT_OF_BUFFERS_ERR:
+ IPA_MHI_ERR("Received GSI_EVT_OUT_OF_BUFFERS_ERR\n");
+ break;
+ case GSI_EVT_OUT_OF_RESOURCES_ERR:
+ IPA_MHI_ERR("Received GSI_EVT_OUT_OF_RESOURCES_ERR\n");
+ break;
+ case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
+ IPA_MHI_ERR("Received GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
+ break;
+ case GSI_EVT_EVT_RING_EMPTY_ERR:
+ IPA_MHI_ERR("Received GSI_EVT_EVT_RING_EMPTY_ERR\n");
+ break;
+ default:
+ IPA_MHI_ERR("Unexpected err evt: %d\n", notify->evt_id);
+ }
+ IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc);
+}
+
+static void ipa_mhi_gsi_ch_err_cb(struct gsi_chan_err_notify *notify)
+{
+ struct ipa_mhi_channel_ctx *channel = notify->chan_user_data;
+
+ IPA_MHI_ERR("channel id=%d client=%d state=%d\n",
+ channel->id, channel->client, channel->state);
+ switch (notify->evt_id) {
+ case GSI_CHAN_INVALID_TRE_ERR:
+ IPA_MHI_ERR("Received GSI_CHAN_INVALID_TRE_ERR\n");
+ break;
+ case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
+ IPA_MHI_ERR("Received GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
+ break;
+ case GSI_CHAN_OUT_OF_BUFFERS_ERR:
+ IPA_MHI_ERR("Received GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
+ break;
+ case GSI_CHAN_OUT_OF_RESOURCES_ERR:
+ IPA_MHI_ERR("Received GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
+ break;
+ case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
+ IPA_MHI_ERR("Received GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
+ break;
+ case GSI_CHAN_HWO_1_ERR:
+ IPA_MHI_ERR("Received GSI_CHAN_HWO_1_ERR\n");
+ break;
+ default:
+ IPA_MHI_ERR("Unexpected err evt: %d\n", notify->evt_id);
+ }
+ IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc);
+}
+
+
+static bool ipa_mhi_gsi_channel_empty(struct ipa_mhi_channel_ctx *channel)
+{
+ IPA_MHI_FUNC_ENTRY();
+
+ if (!channel->stop_in_proc) {
+ IPA_MHI_DBG("Channel is not in STOP_IN_PROC\n");
+ return true;
+ }
+
+ if (ipa_mhi_stop_gsi_channel(channel->client) == true) {
+ channel->stop_in_proc = false;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ipa_mhi_wait_for_ul_empty_timeout() - wait for pending packets in uplink
+ * @msecs: timeout to wait
+ *
+ * This function will poll until there are no packets pending in uplink channels
+ * or timeout occurred.
+ *
+ * Return code: true - no pending packets in uplink channels
+ * false - timeout occurred
+ */
+static bool ipa_mhi_wait_for_ul_empty_timeout(unsigned int msecs)
+{
+ unsigned long jiffies_timeout = msecs_to_jiffies(msecs);
+ unsigned long jiffies_start = jiffies;
+ bool empty = false;
+ int i;
+
+ IPA_MHI_FUNC_ENTRY();
+ while (!empty) {
+ empty = true;
+ for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+ if (!ipa_mhi_client_ctx->ul_channels[i].valid)
+ continue;
+ if (ipa_get_transport_type() ==
+ IPA_TRANSPORT_TYPE_GSI)
+ empty &= ipa_mhi_gsi_channel_empty(
+ &ipa_mhi_client_ctx->ul_channels[i]);
+ else
+ empty &= ipa_mhi_sps_channel_empty(
+ ipa_mhi_client_ctx->ul_channels[i].client);
+ }
+
+ if (time_after(jiffies, jiffies_start + jiffies_timeout)) {
+ IPA_MHI_DBG("finished waiting for UL empty\n");
+ break;
+ }
+
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI &&
+ IPA_MHI_MAX_UL_CHANNELS == 1)
+ usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
+ IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
+ }
+
+ IPA_MHI_DBG("IPA UL is %s\n", (empty) ? "empty" : "not empty");
+
+ IPA_MHI_FUNC_EXIT();
+ return empty;
+}
+
+static int ipa_mhi_enable_force_clear(u32 request_id, bool throttle_source)
+{
+ struct ipa_enable_force_clear_datapath_req_msg_v01 req;
+ int i;
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+ memset(&req, 0, sizeof(req));
+ req.request_id = request_id;
+ req.source_pipe_bitmask = 0;
+ for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+ if (!ipa_mhi_client_ctx->ul_channels[i].valid)
+ continue;
+ req.source_pipe_bitmask |= 1 << ipa_get_ep_mapping(
+ ipa_mhi_client_ctx->ul_channels[i].client);
+ }
+ if (throttle_source) {
+ req.throttle_source_valid = 1;
+ req.throttle_source = 1;
+ }
+ IPA_MHI_DBG("req_id=0x%x src_pipe_btmk=0x%x throt_src=%d\n",
+ req.request_id, req.source_pipe_bitmask,
+ req.throttle_source);
+ res = ipa_qmi_enable_force_clear_datapath_send(&req);
+ if (res) {
+ IPA_MHI_ERR(
+ "ipa_qmi_enable_force_clear_datapath_send failed %d\n"
+ , res);
+ return res;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+static int ipa_mhi_disable_force_clear(u32 request_id)
+{
+ struct ipa_disable_force_clear_datapath_req_msg_v01 req;
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+ memset(&req, 0, sizeof(req));
+ req.request_id = request_id;
+ IPA_MHI_DBG("req_id=0x%x\n", req.request_id);
+ res = ipa_qmi_disable_force_clear_datapath_send(&req);
+ if (res) {
+ IPA_MHI_ERR(
+ "ipa_qmi_disable_force_clear_datapath_send failed %d\n"
+ , res);
+ return res;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+static void ipa_mhi_set_holb_on_dl_channels(bool enable,
+ struct ipa_ep_cfg_holb old_holb[])
+{
+ int i;
+ struct ipa_ep_cfg_holb ep_holb;
+ int ep_idx;
+ int res;
+
+ for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
+ if (!ipa_mhi_client_ctx->dl_channels[i].valid)
+ continue;
+ if (ipa_mhi_client_ctx->dl_channels[i].state ==
+ IPA_HW_MHI_CHANNEL_STATE_INVALID)
+ continue;
+ ep_idx = ipa_get_ep_mapping(
+ ipa_mhi_client_ctx->dl_channels[i].client);
+ if (-1 == ep_idx) {
+ IPA_MHI_ERR("Client %u is not mapped\n",
+ ipa_mhi_client_ctx->dl_channels[i].client);
+ ipa_assert();
+ return;
+ }
+ memset(&ep_holb, 0, sizeof(ep_holb));
+ if (enable) {
+ ipa_get_holb(ep_idx, &old_holb[i]);
+ ep_holb.en = 1;
+ ep_holb.tmr_val = 0;
+ } else {
+ ep_holb = old_holb[i];
+ }
+ res = ipa_cfg_ep_holb(ep_idx, &ep_holb);
+ if (res) {
+ IPA_MHI_ERR("ipa_cfg_ep_holb failed %d\n", res);
+ ipa_assert();
+ return;
+ }
+ }
+}
+
+static int ipa_mhi_suspend_gsi_channel(struct ipa_mhi_channel_ctx *channel)
+{
+ int clnt_hdl;
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+ clnt_hdl = ipa_get_ep_mapping(channel->client);
+ if (clnt_hdl < 0)
+ return -EFAULT;
+
+ res = ipa_stop_gsi_channel(clnt_hdl);
+ if (res != 0 && res != -GSI_STATUS_AGAIN &&
+ res != -GSI_STATUS_TIMED_OUT) {
+ IPA_MHI_ERR("GSI stop channel failed %d\n", res);
+ return -EFAULT;
+ }
+
+ /* check if channel was stopped completely */
+ if (res)
+ channel->stop_in_proc = true;
+
+ IPA_MHI_DBG("GSI channel is %s\n", (channel->stop_in_proc) ?
+ "STOP_IN_PROC" : "STOP");
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+static int ipa_mhi_reset_ul_channel(struct ipa_mhi_channel_ctx *channel)
+{
+ int res;
+ bool empty;
+ struct ipa_ep_cfg_holb old_ep_holb[IPA_MHI_MAX_DL_CHANNELS];
+
+ IPA_MHI_FUNC_ENTRY();
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+ res = ipa_mhi_suspend_gsi_channel(channel);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_suspend_gsi_channel failed %d\n",
+ res);
+ return res;
+ }
+ } else {
+ res = ipa_uc_mhi_reset_channel(channel->index);
+ if (res) {
+ IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n",
+ res);
+ return res;
+ }
+ }
+
+ empty = ipa_mhi_wait_for_ul_empty_timeout(
+ IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
+ if (!empty) {
+ IPA_MHI_DBG("%s not empty\n",
+ (ipa_get_transport_type() ==
+ IPA_TRANSPORT_TYPE_GSI) ? "GSI" : "BAM");
+ res = ipa_mhi_enable_force_clear(
+ ipa_mhi_client_ctx->qmi_req_id, false);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_enable_force_clear failed %d\n",
+ res);
+ ipa_assert();
+ return res;
+ }
+
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+ empty = ipa_mhi_wait_for_ul_empty_timeout(
+ IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
+
+ IPA_MHI_DBG("empty=%d\n", empty);
+ } else {
+ /* enable packet drop on all DL channels */
+ ipa_mhi_set_holb_on_dl_channels(true, old_ep_holb);
+ ipa_generate_tag_process();
+ /* disable packet drop on all DL channels */
+ ipa_mhi_set_holb_on_dl_channels(false, old_ep_holb);
+
+ res = ipa_disable_sps_pipe(channel->client);
+ if (res) {
+ IPA_MHI_ERR("sps_pipe_disable fail %d\n", res);
+ ipa_assert();
+ return res;
+ }
+ }
+
+ res =
+ ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_disable_force_clear failed %d\n",
+ res);
+ ipa_assert();
+ return res;
+ }
+ ipa_mhi_client_ctx->qmi_req_id++;
+ }
+
+ res = ipa_mhi_reset_channel_internal(channel->client);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_reset_ul_channel_internal failed %d\n"
+ , res);
+ return res;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+
+ return 0;
+}
+
+static int ipa_mhi_reset_dl_channel(struct ipa_mhi_channel_ctx *channel)
+{
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+ res = ipa_mhi_suspend_gsi_channel(channel);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_suspend_gsi_channel failed %d\n"
+ , res);
+ return res;
+ }
+
+ res = ipa_mhi_reset_channel_internal(channel->client);
+ if (res) {
+ IPA_MHI_ERR(
+ "ipa_mhi_reset_ul_channel_internal failed %d\n"
+ , res);
+ return res;
+ }
+ } else {
+ res = ipa_mhi_reset_channel_internal(channel->client);
+ if (res) {
+ IPA_MHI_ERR(
+ "ipa_mhi_reset_ul_channel_internal failed %d\n"
+ , res);
+ return res;
+ }
+
+ res = ipa_uc_mhi_reset_channel(channel->index);
+ if (res) {
+ IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n",
+ res);
+ ipa_mhi_start_channel_internal(channel->client);
+ return res;
+ }
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+static int ipa_mhi_reset_channel(struct ipa_mhi_channel_ctx *channel)
+{
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+ if (IPA_CLIENT_IS_PROD(channel->client))
+ res = ipa_mhi_reset_ul_channel(channel);
+ else
+ res = ipa_mhi_reset_dl_channel(channel);
+ if (res) {
+ IPA_MHI_ERR("failed to reset channel error %d\n", res);
+ return res;
+ }
+
+ channel->state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
+
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+ res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+ &channel->state, channel->channel_context_addr +
+ offsetof(struct ipa_mhi_ch_ctx, chstate),
+ sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res);
+ return res;
+ }
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+/**
+ * ipa_mhi_connect_pipe() - Connect pipe to IPA and start corresponding
+ * MHI channel
+ * @in: connect parameters
+ * @clnt_hdl: [out] client handle for this pipe
+ *
+ * This function is called by MHI client driver on MHI channel start.
+ * This function is called after MHI engine was started.
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
+{
+ int res;
+ unsigned long flags;
+ struct ipa_mhi_channel_ctx *channel = NULL;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ if (!in || !clnt_hdl) {
+ IPA_MHI_ERR("NULL args\n");
+ return -EINVAL;
+ }
+
+ if (in->sys.client >= IPA_CLIENT_MAX) {
+ IPA_MHI_ERR("bad param client:%d\n", in->sys.client);
+ return -EINVAL;
+ }
+
+ if (!IPA_CLIENT_IS_MHI(in->sys.client)) {
+ IPA_MHI_ERR(
+ "Invalid MHI client, client: %d\n", in->sys.client);
+ return -EINVAL;
+ }
+
+ IPA_MHI_DBG("channel=%d\n", in->channel_id);
+
+ spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+ if (!ipa_mhi_client_ctx ||
+ ipa_mhi_client_ctx->state != IPA_MHI_STATE_STARTED) {
+ IPA_MHI_ERR("IPA MHI was not started\n");
+ spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+
+ channel = ipa_mhi_get_channel_context(in->sys.client, in->channel_id);
+ if (!channel) {
+ IPA_MHI_ERR("ipa_mhi_get_channel_context failed\n");
+ return -EINVAL;
+ }
+
+ if (channel->state != IPA_HW_MHI_CHANNEL_STATE_INVALID &&
+ channel->state != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
+ IPA_MHI_ERR("Invalid channel state %d\n", channel->state);
+ return -EFAULT;
+ }
+
+ channel->channel_context_addr =
+ ipa_mhi_client_ctx->channel_context_array_addr +
+ channel->id * sizeof(struct ipa_mhi_ch_ctx);
+
+ /* for event context address index needs to read from host */
+
+ IPA_MHI_DBG("client %d channelIndex %d channelID %d, state %d\n",
+ channel->client, channel->index, channel->id, channel->state);
+ IPA_MHI_DBG("channel_context_addr 0x%llx cached_gsi_evt_ring_hdl %lu\n",
+ channel->channel_context_addr,
+ channel->cached_gsi_evt_ring_hdl);
+
+ IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
+
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+ struct ipa_mhi_connect_params_internal internal;
+
+ IPA_MHI_DBG("reading ch/ev context from host\n");
+ res = ipa_mhi_read_ch_ctx(channel);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_read_ch_ctx failed %d\n", res);
+ goto fail_start_channel;
+ }
+
+ internal.channel_id = in->channel_id;
+ internal.sys = &in->sys;
+ internal.start.gsi.state = channel->state;
+ internal.start.gsi.msi = &ipa_mhi_client_ctx->msi;
+ internal.start.gsi.ev_ctx_host = &channel->ev_ctx_host;
+ internal.start.gsi.event_context_addr =
+ channel->event_context_addr;
+ internal.start.gsi.ch_ctx_host = &channel->ch_ctx_host;
+ internal.start.gsi.channel_context_addr =
+ channel->channel_context_addr;
+ internal.start.gsi.ch_err_cb = ipa_mhi_gsi_ch_err_cb;
+ internal.start.gsi.channel = (void *)channel;
+ internal.start.gsi.ev_err_cb = ipa_mhi_gsi_ev_err_cb;
+ internal.start.gsi.assert_bit40 =
+ ipa_mhi_client_ctx->assert_bit40;
+ internal.start.gsi.mhi = &channel->ch_scratch.mhi;
+ internal.start.gsi.cached_gsi_evt_ring_hdl =
+ &channel->cached_gsi_evt_ring_hdl;
+ internal.start.gsi.evchid =
+ channel->index + IPA_MHI_GSI_ER_START;
+
+ res = ipa_connect_mhi_pipe(&internal, clnt_hdl);
+ if (res) {
+ IPA_MHI_ERR("ipa_connect_mhi_pipe failed %d\n", res);
+ goto fail_connect_pipe;
+ }
+ channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+ channel->brstmode_enabled =
+ channel->ch_scratch.mhi.burst_mode_enabled;
+
+ res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+ &channel->state, channel->channel_context_addr +
+ offsetof(struct ipa_mhi_ch_ctx, chstate),
+ sizeof(channel->state));
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
+ return res;
+
+ }
+ } else {
+ struct ipa_mhi_connect_params_internal internal;
+
+ internal.channel_id = in->channel_id;
+ internal.sys = &in->sys;
+ internal.start.uC.index = channel->index;
+ internal.start.uC.id = channel->id;
+ internal.start.uC.state = channel->state;
+ res = ipa_connect_mhi_pipe(&internal, clnt_hdl);
+ if (res) {
+ IPA_MHI_ERR("ipa_connect_mhi_pipe failed %d\n", res);
+ goto fail_connect_pipe;
+ }
+ channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+ }
+
+ if (!in->sys.keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+
+ IPA_MHI_FUNC_EXIT();
+
+ return 0;
+fail_connect_pipe:
+ ipa_mhi_reset_channel(channel);
+fail_start_channel:
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+ return -EPERM;
+}
+
+/**
+ * ipa_mhi_disconnect_pipe() - Disconnect pipe from IPA and reset corresponding
+ * MHI channel
+ * @clnt_hdl: client handle for this pipe
+ *
+ * This function is called by MHI client driver on MHI channel reset.
+ * This function is called after MHI channel was started.
+ * This function is doing the following:
+ * - Send command to uC/GSI to reset corresponding MHI channel
+ * - Configure IPA EP control
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa_mhi_disconnect_pipe(u32 clnt_hdl)
+{
+ int res;
+ enum ipa_client_type client;
+ static struct ipa_mhi_channel_ctx *channel;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ if (!ipa_mhi_client_ctx) {
+ IPA_MHI_ERR("IPA MHI was not initialized\n");
+ return -EINVAL;
+ }
+
+ client = ipa_get_client_mapping(clnt_hdl);
+
+ if (!IPA_CLIENT_IS_MHI(client)) {
+ IPA_MHI_ERR("invalid IPA MHI client, client: %d\n", client);
+ return -EINVAL;
+ }
+
+ channel = ipa_mhi_get_channel_context_by_clnt_hdl(clnt_hdl);
+ if (!channel) {
+ IPA_MHI_ERR("invalid clnt index\n");
+ return -EINVAL;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa_get_client_mapping(clnt_hdl));
+
+ res = ipa_mhi_reset_channel(channel);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_reset_channel failed %d\n", res);
+ goto fail_reset_channel;
+ }
+
+ res = ipa_disconnect_mhi_pipe(clnt_hdl);
+ if (res) {
+ IPA_MHI_ERR(
+ "IPA core driver failed to disconnect the pipe hdl %d, res %d"
+ , clnt_hdl, res);
+ return res;
+ }
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa_get_client_mapping(clnt_hdl));
+
+ IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+fail_reset_channel:
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa_get_client_mapping(clnt_hdl));
+ return res;
+}
+
+static int ipa_mhi_wait_for_cons_release(void)
+{
+ unsigned long flags;
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+ reinit_completion(&ipa_mhi_client_ctx->rm_cons_comp);
+ spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+ if (ipa_mhi_client_ctx->rm_cons_state != IPA_MHI_RM_STATE_GRANTED) {
+ spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+
+ res = wait_for_completion_timeout(
+ &ipa_mhi_client_ctx->rm_cons_comp,
+ msecs_to_jiffies(IPA_MHI_RM_TIMEOUT_MSEC));
+ if (res == 0) {
+ IPA_MHI_ERR("timeout release mhi cons\n");
+ return -ETIME;
+ }
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+static int ipa_mhi_suspend_channels(struct ipa_mhi_channel_ctx *channels)
+{
+ int i;
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+ for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+ if (!channels[i].valid)
+ continue;
+ if (channels[i].state !=
+ IPA_HW_MHI_CHANNEL_STATE_RUN)
+ continue;
+ IPA_MHI_DBG("suspending channel %d\n",
+ channels[i].id);
+
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
+ res = ipa_mhi_suspend_gsi_channel(
+ &channels[i]);
+ else
+ res = ipa_uc_mhi_suspend_channel(
+ channels[i].index);
+
+ if (res) {
+ IPA_MHI_ERR("failed to suspend channel %d error %d\n",
+ i, res);
+ return res;
+ }
+ channels[i].state =
+ IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+static int ipa_mhi_stop_event_update_channels(
+ struct ipa_mhi_channel_ctx *channels)
+{
+ int i;
+ int res;
+
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
+ return 0;
+
+ IPA_MHI_FUNC_ENTRY();
+ for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+ if (!channels[i].valid)
+ continue;
+ if (channels[i].state !=
+ IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
+ continue;
+ IPA_MHI_DBG("stop update event channel %d\n",
+ channels[i].id);
+ res = ipa_uc_mhi_stop_event_update_channel(
+ channels[i].index);
+ if (res) {
+ IPA_MHI_ERR("failed stop event channel %d error %d\n",
+ i, res);
+ return res;
+ }
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+static bool ipa_mhi_check_pending_packets_from_host(void)
+{
+ int i;
+ int res;
+ struct ipa_mhi_channel_ctx *channel;
+
+ IPA_MHI_FUNC_ENTRY();
+ for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+ channel = &ipa_mhi_client_ctx->ul_channels[i];
+ if (!channel->valid)
+ continue;
+
+ res = ipa_mhi_query_ch_info(channel->client,
+ &channel->ch_info);
+ if (res) {
+ IPA_MHI_ERR("gsi_query_channel_info failed\n");
+ return true;
+ }
+ res = ipa_mhi_read_ch_ctx(channel);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_read_ch_ctx failed %d\n", res);
+ return true;
+ }
+
+ if (channel->ch_info.rp != channel->ch_ctx_host.wp) {
+ IPA_MHI_DBG("There are pending packets from host\n");
+ IPA_MHI_DBG("device rp 0x%llx host 0x%llx\n",
+ channel->ch_info.rp, channel->ch_ctx_host.wp);
+
+ return true;
+ }
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return false;
+}
+
+static int ipa_mhi_resume_channels(bool LPTransitionRejected,
+ struct ipa_mhi_channel_ctx *channels)
+{
+ int i;
+ int res;
+ struct ipa_mhi_channel_ctx *channel;
+
+ IPA_MHI_FUNC_ENTRY();
+ for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+ if (!channels[i].valid)
+ continue;
+ if (channels[i].state !=
+ IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
+ continue;
+ channel = &channels[i];
+ IPA_MHI_DBG("resuming channel %d\n", channel->id);
+
+ res = ipa_mhi_resume_channels_internal(channel->client,
+ LPTransitionRejected, channel->brstmode_enabled,
+ channel->ch_scratch, channel->index);
+
+ if (res) {
+ IPA_MHI_ERR("failed to resume channel %d error %d\n",
+ i, res);
+ return res;
+ }
+
+ channel->stop_in_proc = false;
+ channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+/**
+ * ipa_mhi_suspend_ul() - Suspend MHI accelerated up link channels
+ * @force:
+ * false: in case of data pending in IPA, MHI channels will not be
+ * suspended and function will fail.
+ * true: in case of data pending in IPA, make sure no further access from
+ * IPA to PCIe is possible. In this case suspend cannot fail.
+ *
+ *
+ * This function is called by MHI client driver on MHI suspend.
+ * This function is called after MHI channel was started.
+ * When this function returns device can move to M1/M2/M3/D3cold state.
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+static int ipa_mhi_suspend_ul(bool force, bool *empty, bool *force_clear)
+{
+ int res;
+
+ *force_clear = false;
+
+ res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_suspend_ul_channels failed %d\n", res);
+ goto fail_suspend_ul_channel;
+ }
+
+ *empty = ipa_mhi_wait_for_ul_empty_timeout(
+ IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
+
+ if (!*empty) {
+ if (force) {
+ res = ipa_mhi_enable_force_clear(
+ ipa_mhi_client_ctx->qmi_req_id, false);
+ if (res) {
+ IPA_MHI_ERR("failed to enable force clear\n");
+ ipa_assert();
+ return res;
+ }
+ *force_clear = true;
+ IPA_MHI_DBG("force clear datapath enabled\n");
+
+ *empty = ipa_mhi_wait_for_ul_empty_timeout(
+ IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
+ IPA_MHI_DBG("empty=%d\n", *empty);
+ if (!*empty && ipa_get_transport_type()
+ == IPA_TRANSPORT_TYPE_GSI) {
+ IPA_MHI_ERR("Failed to suspend UL channels\n");
+ if (ipa_mhi_client_ctx->test_mode) {
+ res = -EAGAIN;
+ goto fail_suspend_ul_channel;
+ }
+
+ ipa_assert();
+ }
+ } else {
+ IPA_MHI_DBG("IPA not empty\n");
+ res = -EAGAIN;
+ goto fail_suspend_ul_channel;
+ }
+ }
+
+ if (*force_clear) {
+ res =
+ ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id);
+ if (res) {
+ IPA_MHI_ERR("failed to disable force clear\n");
+ ipa_assert();
+ return res;
+ }
+ IPA_MHI_DBG("force clear datapath disabled\n");
+ ipa_mhi_client_ctx->qmi_req_id++;
+ }
+
+ if (!force && ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+ if (ipa_mhi_check_pending_packets_from_host()) {
+ res = -EAGAIN;
+ goto fail_suspend_ul_channel;
+ }
+ }
+
+ res = ipa_mhi_stop_event_update_channels(
+ ipa_mhi_client_ctx->ul_channels);
+ if (res) {
+ IPA_MHI_ERR(
+ "ipa_mhi_stop_event_update_ul_channels failed %d\n",
+ res);
+ goto fail_suspend_ul_channel;
+ }
+
+ return 0;
+
+fail_suspend_ul_channel:
+ return res;
+}
+
+static bool ipa_mhi_has_open_aggr_frame(void)
+{
+ struct ipa_mhi_channel_ctx *channel;
+ int i;
+
+ for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
+ channel = &ipa_mhi_client_ctx->dl_channels[i];
+
+ if (!channel->valid)
+ continue;
+
+ if (ipa_has_open_aggr_frame(channel->client))
+ return true;
+ }
+
+ return false;
+}
+
+static void ipa_mhi_update_host_ch_state(bool update_rp)
+{
+ int i;
+ int res;
+ struct ipa_mhi_channel_ctx *channel;
+
+ for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+ channel = &ipa_mhi_client_ctx->ul_channels[i];
+ if (!channel->valid)
+ continue;
+
+ if (update_rp) {
+ res = ipa_mhi_query_ch_info(channel->client,
+ &channel->ch_info);
+ if (res) {
+ IPA_MHI_ERR("gsi_query_channel_info failed\n");
+ ipa_assert();
+ return;
+ }
+
+ res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+ &channel->ch_info.rp,
+ channel->channel_context_addr +
+ offsetof(struct ipa_mhi_ch_ctx, rp),
+ sizeof(channel->ch_info.rp));
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
+ ipa_assert();
+ return;
+ }
+ }
+
+ res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+ &channel->state, channel->channel_context_addr +
+ offsetof(struct ipa_mhi_ch_ctx, chstate),
+ sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
+ ipa_assert();
+ return;
+ }
+ }
+
+ for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
+ channel = &ipa_mhi_client_ctx->dl_channels[i];
+ if (!channel->valid)
+ continue;
+
+ if (update_rp) {
+ res = ipa_mhi_query_ch_info(channel->client,
+ &channel->ch_info);
+ if (res) {
+ IPA_MHI_ERR("gsi_query_channel_info failed\n");
+ ipa_assert();
+ return;
+ }
+
+ res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+ &channel->ch_info.rp,
+ channel->channel_context_addr +
+ offsetof(struct ipa_mhi_ch_ctx, rp),
+ sizeof(channel->ch_info.rp));
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
+ ipa_assert();
+ return;
+ }
+ }
+
+ res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+ &channel->state, channel->channel_context_addr +
+ offsetof(struct ipa_mhi_ch_ctx, chstate),
+ sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
+ ipa_assert();
+ }
+ }
+}
+
+static int ipa_mhi_suspend_dl(bool force)
+{
+ int res;
+
+ res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels);
+ if (res) {
+ IPA_MHI_ERR(
+ "ipa_mhi_suspend_channels for dl failed %d\n", res);
+ goto fail_suspend_dl_channel;
+ }
+
+ res = ipa_mhi_stop_event_update_channels
+ (ipa_mhi_client_ctx->dl_channels);
+ if (res) {
+ IPA_MHI_ERR("failed to stop event update on DL %d\n", res);
+ goto fail_stop_event_update_dl_channel;
+ }
+
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+ if (ipa_mhi_has_open_aggr_frame()) {
+ IPA_MHI_DBG("There is an open aggr frame\n");
+ if (force) {
+ ipa_mhi_client_ctx->trigger_wakeup = true;
+ } else {
+ res = -EAGAIN;
+ goto fail_stop_event_update_dl_channel;
+ }
+ }
+ }
+
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
+ ipa_mhi_update_host_ch_state(true);
+
+fail_stop_event_update_dl_channel:
+ ipa_mhi_resume_channels(true,
+ ipa_mhi_client_ctx->dl_channels);
+fail_suspend_dl_channel:
+ return res;
+}
+
+/**
+ * ipa_mhi_suspend() - Suspend MHI accelerated channels
+ * @force:
+ * false: in case of data pending in IPA, MHI channels will not be
+ * suspended and function will fail.
+ * true: in case of data pending in IPA, make sure no further access from
+ * IPA to PCIe is possible. In this case suspend cannot fail.
+ *
+ * This function is called by MHI client driver on MHI suspend.
+ * This function is called after MHI channel was started.
+ * When this function returns device can move to M1/M2/M3/D3cold state.
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa_mhi_suspend(bool force)
+{
+ int res;
+ bool empty;
+ bool force_clear;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPEND_IN_PROGRESS);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
+ return res;
+ }
+ res = ipa_mhi_suspend_ul(force, &empty, &force_clear);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_suspend_ul failed %d\n", res);
+ goto fail_suspend_ul_channel;
+ }
+
+ /*
+ * hold IPA clocks and release them after all
+ * IPA RM resource are released to make sure tag process will not start
+ */
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ IPA_MHI_DBG("release prod\n");
+ res = ipa_mhi_release_prod();
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n", res);
+ goto fail_release_prod;
+ }
+
+ IPA_MHI_DBG("wait for cons release\n");
+ res = ipa_mhi_wait_for_cons_release();
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_wait_for_cons_release failed %d\n", res);
+ goto fail_release_cons;
+ }
+
+ usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX);
+
+ res = ipa_mhi_suspend_dl(force);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_suspend_dl failed %d\n", res);
+ goto fail_suspend_dl_channel;
+ }
+
+ if (!empty)
+ ipa_set_tag_process_before_gating(false);
+
+ res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
+ goto fail_release_cons;
+ }
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+
+fail_suspend_dl_channel:
+fail_release_cons:
+ ipa_mhi_request_prod();
+fail_release_prod:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+fail_suspend_ul_channel:
+ ipa_mhi_resume_channels(true, ipa_mhi_client_ctx->ul_channels);
+ ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
+ if (force_clear) {
+ if (
+ ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id)) {
+ IPA_MHI_ERR("failed to disable force clear\n");
+ ipa_assert();
+ }
+ IPA_MHI_DBG("force clear datapath disabled\n");
+ ipa_mhi_client_ctx->qmi_req_id++;
+ }
+ return res;
+}
+
+/**
+ * ipa_mhi_resume() - Resume MHI accelerated channels
+ *
+ * This function is called by MHI client driver on MHI resume.
+ * This function is called after MHI channel was suspended.
+ * When this function returns device can move to M0 state.
+ * This function is doing the following:
+ * - Send command to uC/GSI to resume corresponding MHI channel
+ * - Request MHI_PROD in IPA RM
+ * - Resume data to IPA
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa_mhi_resume(void)
+{
+ int res;
+ bool dl_channel_resumed = false;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ res = ipa_mhi_set_state(IPA_MHI_STATE_RESUME_IN_PROGRESS);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
+ return res;
+ }
+
+ if (ipa_mhi_client_ctx->rm_cons_state == IPA_MHI_RM_STATE_REQUESTED) {
+ /* resume all DL channels */
+ res = ipa_mhi_resume_channels(false,
+ ipa_mhi_client_ctx->dl_channels);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n",
+ res);
+ goto fail_resume_dl_channels;
+ }
+ dl_channel_resumed = true;
+
+ ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
+ IPA_RM_RESOURCE_MHI_CONS);
+ ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED;
+ }
+
+ res = ipa_mhi_request_prod();
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_request_prod failed %d\n", res);
+ goto fail_request_prod;
+ }
+
+ /* resume all UL channels */
+ res = ipa_mhi_resume_channels(false,
+ ipa_mhi_client_ctx->ul_channels);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_resume_ul_channels failed %d\n", res);
+ goto fail_resume_ul_channels;
+ }
+
+ if (!dl_channel_resumed) {
+ res = ipa_mhi_resume_channels(false,
+ ipa_mhi_client_ctx->dl_channels);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n",
+ res);
+ goto fail_resume_dl_channels2;
+ }
+ }
+
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
+ ipa_mhi_update_host_ch_state(false);
+
+ res = ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
+ goto fail_set_state;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+
+fail_set_state:
+ ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels);
+fail_resume_dl_channels2:
+ ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels);
+fail_resume_ul_channels:
+ ipa_mhi_release_prod();
+fail_request_prod:
+ ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels);
+fail_resume_dl_channels:
+ ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
+ return res;
+}
+
+
+static int ipa_mhi_destroy_channels(struct ipa_mhi_channel_ctx *channels,
+ int num_of_channels)
+{
+ struct ipa_mhi_channel_ctx *channel;
+ int i, res;
+ u32 clnt_hdl;
+
+ for (i = 0; i < num_of_channels; i++) {
+ channel = &channels[i];
+ if (!channel->valid)
+ continue;
+ if (channel->state == IPA_HW_MHI_CHANNEL_STATE_INVALID)
+ continue;
+ if (channel->state != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
+ clnt_hdl = ipa_get_ep_mapping(channel->client);
+ IPA_MHI_DBG("disconnect pipe (ep: %d)\n", clnt_hdl);
+ res = ipa_mhi_disconnect_pipe(clnt_hdl);
+ if (res) {
+ IPA_MHI_ERR(
+ "failed to disconnect pipe %d, err %d\n"
+ , clnt_hdl, res);
+ goto fail;
+ }
+ }
+ res = ipa_mhi_destroy_channel(channel->client);
+ if (res) {
+ IPA_MHI_ERR(
+ "ipa_mhi_destroy_channel failed %d"
+ , res);
+ goto fail;
+ }
+ }
+ return 0;
+fail:
+ return res;
+}
+
+/**
+ * ipa_mhi_destroy_all_channels() - Destroy MHI IPA channels
+ *
+ * This function is called by IPA MHI client driver on MHI reset to destroy all
+ * IPA MHI channels.
+ */
+int ipa_mhi_destroy_all_channels(void)
+{
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+ /* reset all UL and DL acc channels and its accociated event rings */
+ res = ipa_mhi_destroy_channels(ipa_mhi_client_ctx->ul_channels,
+ IPA_MHI_MAX_UL_CHANNELS);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_destroy_channels(ul_channels) failed %d\n",
+ res);
+ return -EPERM;
+ }
+ IPA_MHI_DBG("All UL channels are disconnected\n");
+
+ res = ipa_mhi_destroy_channels(ipa_mhi_client_ctx->dl_channels,
+ IPA_MHI_MAX_DL_CHANNELS);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_destroy_channels(dl_channels) failed %d\n",
+ res);
+ return -EPERM;
+ }
+ IPA_MHI_DBG("All DL channels are disconnected\n");
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+static void ipa_mhi_debugfs_destroy(void)
+{
+ debugfs_remove_recursive(dent);
+}
+
+/**
+ * ipa_mhi_destroy() - Destroy MHI IPA
+ *
+ * This function is called by MHI client driver on MHI reset to destroy all IPA
+ * MHI resources.
+ * When this function returns ipa_mhi can re-initialize.
+ */
+void ipa_mhi_destroy(void)
+{
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+ if (!ipa_mhi_client_ctx) {
+ IPA_MHI_DBG("IPA MHI was not initialized, already destroyed\n");
+ return;
+ }
+ /* reset all UL and DL acc channels and its accociated event rings */
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+ res = ipa_mhi_destroy_all_channels();
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_destroy_all_channels failed %d\n",
+ res);
+ goto fail;
+ }
+ }
+ IPA_MHI_DBG("All channels are disconnected\n");
+
+ if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_SPS) {
+ IPA_MHI_DBG("cleanup uC MHI\n");
+ ipa_uc_mhi_cleanup();
+ }
+
+
+ if (ipa_mhi_client_ctx->state != IPA_MHI_STATE_INITIALIZED &&
+ ipa_mhi_client_ctx->state != IPA_MHI_STATE_READY) {
+ IPA_MHI_DBG("release prod\n");
+ res = ipa_mhi_release_prod();
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n", res);
+ goto fail;
+ }
+ IPA_MHI_DBG("wait for cons release\n");
+ res = ipa_mhi_wait_for_cons_release();
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_wait_for_cons_release failed %d\n",
+ res);
+ goto fail;
+ }
+ usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN,
+ IPA_MHI_SUSPEND_SLEEP_MAX);
+
+ IPA_MHI_DBG("deleate dependency Q6_PROD->MHI_CONS\n");
+ res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_MHI_CONS);
+ if (res) {
+ IPA_MHI_ERR(
+ "Error deleting dependency %d->%d, res=%d\n"
+ , IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_MHI_CONS,
+ res);
+ goto fail;
+ }
+ IPA_MHI_DBG("deleate dependency MHI_PROD->Q6_CONS\n");
+ res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ if (res) {
+ IPA_MHI_ERR(
+ "Error deleting dependency %d->%d, res=%d\n",
+ IPA_RM_RESOURCE_MHI_PROD,
+ IPA_RM_RESOURCE_Q6_CONS,
+ res);
+ goto fail;
+ }
+ }
+
+ res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
+ if (res) {
+ IPA_MHI_ERR("Error deleting resource %d, res=%d\n",
+ IPA_RM_RESOURCE_MHI_PROD, res);
+ goto fail;
+ }
+
+ res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS);
+ if (res) {
+ IPA_MHI_ERR("Error deleting resource %d, res=%d\n",
+ IPA_RM_RESOURCE_MHI_CONS, res);
+ goto fail;
+ }
+
+ ipa_mhi_debugfs_destroy();
+ destroy_workqueue(ipa_mhi_client_ctx->wq);
+ kfree(ipa_mhi_client_ctx);
+ ipa_mhi_client_ctx = NULL;
+ IPA_MHI_DBG("IPA MHI was reset, ready for re-init\n");
+
+ IPA_MHI_FUNC_EXIT();
+ return;
+fail:
+ ipa_assert();
+}
+
+/**
+ * ipa_mhi_init() - Initialize IPA MHI driver
+ * @params: initialization params
+ *
+ * This function is called by MHI client driver on boot to initialize IPA MHI
+ * Driver. When this function returns device can move to READY state.
+ * This function is doing the following:
+ * - Initialize MHI IPA internal data structures
+ * - Create IPA RM resources
+ * - Initialize debugfs
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa_mhi_init(struct ipa_mhi_init_params *params)
+{
+ int res;
+ struct ipa_rm_create_params mhi_prod_params;
+ struct ipa_rm_create_params mhi_cons_params;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ if (!params) {
+ IPA_MHI_ERR("null args\n");
+ return -EINVAL;
+ }
+
+ if (!params->notify) {
+ IPA_MHI_ERR("null notify function\n");
+ return -EINVAL;
+ }
+
+ if (ipa_mhi_client_ctx) {
+ IPA_MHI_ERR("already initialized\n");
+ return -EPERM;
+ }
+
+ IPA_MHI_DBG("notify = %pF priv = %p\n", params->notify, params->priv);
+ IPA_MHI_DBG("msi: addr_lo = 0x%x addr_hi = 0x%x\n",
+ params->msi.addr_low, params->msi.addr_hi);
+ IPA_MHI_DBG("msi: data = 0x%x mask = 0x%x\n",
+ params->msi.data, params->msi.mask);
+ IPA_MHI_DBG("mmio_addr = 0x%x\n", params->mmio_addr);
+ IPA_MHI_DBG("first_ch_idx = 0x%x\n", params->first_ch_idx);
+ IPA_MHI_DBG("first_er_idx = 0x%x\n", params->first_er_idx);
+ IPA_MHI_DBG("assert_bit40=%d\n", params->assert_bit40);
+ IPA_MHI_DBG("test_mode=%d\n", params->test_mode);
+
+ /* Initialize context */
+ ipa_mhi_client_ctx = kzalloc(sizeof(*ipa_mhi_client_ctx), GFP_KERNEL);
+ if (!ipa_mhi_client_ctx) {
+ IPA_MHI_ERR("no memory\n");
+ res = -EFAULT;
+ goto fail_alloc_ctx;
+ }
+
+ ipa_mhi_client_ctx->state = IPA_MHI_STATE_INITIALIZED;
+ ipa_mhi_client_ctx->cb_notify = params->notify;
+ ipa_mhi_client_ctx->cb_priv = params->priv;
+ ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED;
+ init_completion(&ipa_mhi_client_ctx->rm_prod_granted_comp);
+ spin_lock_init(&ipa_mhi_client_ctx->state_lock);
+ init_completion(&ipa_mhi_client_ctx->rm_cons_comp);
+ ipa_mhi_client_ctx->msi = params->msi;
+ ipa_mhi_client_ctx->mmio_addr = params->mmio_addr;
+ ipa_mhi_client_ctx->first_ch_idx = params->first_ch_idx;
+ ipa_mhi_client_ctx->first_er_idx = params->first_er_idx;
+ ipa_mhi_client_ctx->qmi_req_id = 0;
+ ipa_mhi_client_ctx->use_ipadma = true;
+ ipa_mhi_client_ctx->assert_bit40 = !!params->assert_bit40;
+ ipa_mhi_client_ctx->test_mode = params->test_mode;
+
+ ipa_mhi_client_ctx->wq = create_singlethread_workqueue("ipa_mhi_wq");
+ if (!ipa_mhi_client_ctx->wq) {
+ IPA_MHI_ERR("failed to create workqueue\n");
+ res = -EFAULT;
+ goto fail_create_wq;
+ }
+
+ /* Create PROD in IPA RM */
+ memset(&mhi_prod_params, 0, sizeof(mhi_prod_params));
+ mhi_prod_params.name = IPA_RM_RESOURCE_MHI_PROD;
+ mhi_prod_params.floor_voltage = IPA_VOLTAGE_SVS;
+ mhi_prod_params.reg_params.notify_cb = ipa_mhi_rm_prod_notify;
+ res = ipa_rm_create_resource(&mhi_prod_params);
+ if (res) {
+ IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_PROD\n");
+ goto fail_create_rm_prod;
+ }
+
+ /* Create CONS in IPA RM */
+ memset(&mhi_cons_params, 0, sizeof(mhi_cons_params));
+ mhi_cons_params.name = IPA_RM_RESOURCE_MHI_CONS;
+ mhi_cons_params.floor_voltage = IPA_VOLTAGE_SVS;
+ mhi_cons_params.request_resource = ipa_mhi_rm_cons_request;
+ mhi_cons_params.release_resource = ipa_mhi_rm_cons_release;
+ res = ipa_rm_create_resource(&mhi_cons_params);
+ if (res) {
+ IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_CONS\n");
+ goto fail_create_rm_cons;
+ }
+
+ /* Initialize uC interface */
+ ipa_uc_mhi_init(ipa_mhi_uc_ready_cb,
+ ipa_mhi_uc_wakeup_request_cb);
+ if (ipa_uc_state_check() == 0)
+ ipa_mhi_set_state(IPA_MHI_STATE_READY);
+
+ /* Initialize debugfs */
+ ipa_mhi_debugfs_init();
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+
+fail_create_rm_cons:
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
+fail_create_rm_prod:
+ destroy_workqueue(ipa_mhi_client_ctx->wq);
+fail_create_wq:
+ kfree(ipa_mhi_client_ctx);
+ ipa_mhi_client_ctx = NULL;
+fail_alloc_ctx:
+ return res;
+}
+
+static void ipa_mhi_cache_dl_ul_sync_info(
+ struct ipa_config_req_msg_v01 *config_req)
+{
+ ipa_cached_dl_ul_sync_info.params.isDlUlSyncEnabled = true;
+ ipa_cached_dl_ul_sync_info.params.UlAccmVal =
+ (config_req->ul_accumulation_time_limit_valid) ?
+ config_req->ul_accumulation_time_limit : 0;
+ ipa_cached_dl_ul_sync_info.params.ulMsiEventThreshold =
+ (config_req->ul_msi_event_threshold_valid) ?
+ config_req->ul_msi_event_threshold : 0;
+ ipa_cached_dl_ul_sync_info.params.dlMsiEventThreshold =
+ (config_req->dl_msi_event_threshold_valid) ?
+ config_req->dl_msi_event_threshold : 0;
+}
+
+/**
+ * ipa_mhi_handle_ipa_config_req() - hanle IPA CONFIG QMI message
+ *
+ * This function is called by by IPA QMI service to indicate that IPA CONFIG
+ * message was sent from modem. IPA MHI will update this information to IPA uC
+ * or will cache it until IPA MHI will be initialized.
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req)
+{
+ IPA_MHI_FUNC_ENTRY();
+
+ if (ipa_get_transport_type() != IPA_TRANSPORT_TYPE_GSI) {
+ ipa_mhi_cache_dl_ul_sync_info(config_req);
+ if (ipa_mhi_client_ctx &&
+ ipa_mhi_client_ctx->state !=
+ IPA_MHI_STATE_INITIALIZED)
+ ipa_uc_mhi_send_dl_ul_sync_info(
+ &ipa_cached_dl_ul_sync_info);
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+int ipa_mhi_is_using_dma(bool *flag)
+{
+ IPA_MHI_FUNC_ENTRY();
+
+ if (!ipa_mhi_client_ctx) {
+ IPA_MHI_ERR("not initialized\n");
+ return -EPERM;
+ }
+
+ *flag = ipa_mhi_client_ctx->use_ipadma ? true : false;
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+EXPORT_SYMBOL(ipa_mhi_is_using_dma);
+
+const char *ipa_mhi_get_state_str(int state)
+{
+ return MHI_STATE_STR(state);
+}
+EXPORT_SYMBOL(ipa_mhi_get_state_str);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA MHI client driver");
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
new file mode 100644
index 0000000..069f0a2
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
@@ -0,0 +1,597 @@
+/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa_uc_offload.h>
+#include <linux/msm_ipa.h>
+#include "../ipa_common_i.h"
+
+#define IPA_NTN_DMA_POOL_ALIGNMENT 8
+#define OFFLOAD_DRV_NAME "ipa_uc_offload"
+#define IPA_UC_OFFLOAD_DBG(fmt, args...) \
+ do { \
+ pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPA_UC_OFFLOAD_LOW(fmt, args...) \
+ do { \
+ pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPA_UC_OFFLOAD_ERR(fmt, args...) \
+ do { \
+ pr_err(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPA_UC_OFFLOAD_INFO(fmt, args...) \
+ do { \
+ pr_info(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+enum ipa_uc_offload_state {
+ IPA_UC_OFFLOAD_STATE_INVALID,
+ IPA_UC_OFFLOAD_STATE_INITIALIZED,
+ IPA_UC_OFFLOAD_STATE_UP,
+ IPA_UC_OFFLOAD_STATE_DOWN,
+};
+
+struct ipa_uc_offload_ctx {
+ enum ipa_uc_offload_proto proto;
+ enum ipa_uc_offload_state state;
+ void *priv;
+ u8 hdr_len;
+ u32 partial_hdr_hdl[IPA_IP_MAX];
+ char netdev_name[IPA_RESOURCE_NAME_MAX];
+ ipa_notify_cb notify;
+ struct completion ntn_completion;
+};
+
+static struct ipa_uc_offload_ctx *ipa_uc_offload_ctx[IPA_UC_MAX_PROT_SIZE];
+
+static int ipa_commit_partial_hdr(
+ struct ipa_ioc_add_hdr *hdr,
+ const char *netdev_name,
+ struct ipa_hdr_info *hdr_info)
+{
+ int i;
+
+ if (hdr == NULL || hdr_info == NULL) {
+ IPA_UC_OFFLOAD_ERR("Invalid input\n");
+ return -EINVAL;
+ }
+
+ hdr->commit = 1;
+ hdr->num_hdrs = 2;
+
+ snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name),
+ "%s_ipv4", netdev_name);
+ snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name),
+ "%s_ipv6", netdev_name);
+ for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) {
+ hdr->hdr[i].hdr_len = hdr_info[i].hdr_len;
+ memcpy(hdr->hdr[i].hdr, hdr_info[i].hdr, hdr->hdr[i].hdr_len);
+ hdr->hdr[i].type = hdr_info[i].hdr_type;
+ hdr->hdr[i].is_partial = 1;
+ hdr->hdr[i].is_eth2_ofst_valid = 1;
+ hdr->hdr[i].eth2_ofst = hdr_info[i].dst_mac_addr_offset;
+ }
+
+ if (ipa_add_hdr(hdr)) {
+ IPA_UC_OFFLOAD_ERR("fail to add partial headers\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int ipa_uc_offload_ntn_reg_intf(
+ struct ipa_uc_offload_intf_params *inp,
+ struct ipa_uc_offload_out_params *outp,
+ struct ipa_uc_offload_ctx *ntn_ctx)
+{
+ struct ipa_ioc_add_hdr *hdr;
+ struct ipa_tx_intf tx;
+ struct ipa_rx_intf rx;
+ struct ipa_ioc_tx_intf_prop tx_prop[2];
+ struct ipa_ioc_rx_intf_prop rx_prop[2];
+ u32 len;
+ int ret = 0;
+
+ IPA_UC_OFFLOAD_DBG("register interface for netdev %s\n",
+ inp->netdev_name);
+
+ memcpy(ntn_ctx->netdev_name, inp->netdev_name, IPA_RESOURCE_NAME_MAX);
+ ntn_ctx->hdr_len = inp->hdr_info[0].hdr_len;
+ ntn_ctx->notify = inp->notify;
+ ntn_ctx->priv = inp->priv;
+
+ /* add partial header */
+ len = sizeof(struct ipa_ioc_add_hdr) + 2 * sizeof(struct ipa_hdr_add);
+ hdr = kzalloc(len, GFP_KERNEL);
+ if (hdr == NULL) {
+ IPA_UC_OFFLOAD_ERR("fail to alloc %d bytes\n", len);
+ return -ENOMEM;
+ }
+
+ if (ipa_commit_partial_hdr(hdr, ntn_ctx->netdev_name, inp->hdr_info)) {
+ IPA_UC_OFFLOAD_ERR("fail to commit partial headers\n");
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ /* populate tx prop */
+ tx.num_props = 2;
+ tx.prop = tx_prop;
+
+ memset(tx_prop, 0, sizeof(tx_prop));
+ tx_prop[0].ip = IPA_IP_v4;
+ tx_prop[0].dst_pipe = IPA_CLIENT_ODU_TETH_CONS;
+ tx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
+ memcpy(tx_prop[0].hdr_name, hdr->hdr[IPA_IP_v4].name,
+ sizeof(tx_prop[0].hdr_name));
+
+ tx_prop[1].ip = IPA_IP_v6;
+ tx_prop[1].dst_pipe = IPA_CLIENT_ODU_TETH_CONS;
+ tx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
+ memcpy(tx_prop[1].hdr_name, hdr->hdr[IPA_IP_v6].name,
+ sizeof(tx_prop[1].hdr_name));
+
+ /* populate rx prop */
+ rx.num_props = 2;
+ rx.prop = rx_prop;
+
+ memset(rx_prop, 0, sizeof(rx_prop));
+ rx_prop[0].ip = IPA_IP_v4;
+ rx_prop[0].src_pipe = IPA_CLIENT_ODU_PROD;
+ rx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
+ if (inp->is_meta_data_valid) {
+ rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA;
+ rx_prop[0].attrib.meta_data = inp->meta_data;
+ rx_prop[0].attrib.meta_data_mask = inp->meta_data_mask;
+ }
+
+ rx_prop[1].ip = IPA_IP_v6;
+ rx_prop[1].src_pipe = IPA_CLIENT_ODU_PROD;
+ rx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
+ if (inp->is_meta_data_valid) {
+ rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA;
+ rx_prop[1].attrib.meta_data = inp->meta_data;
+ rx_prop[1].attrib.meta_data_mask = inp->meta_data_mask;
+ }
+
+ if (ipa_register_intf(inp->netdev_name, &tx, &rx)) {
+ IPA_UC_OFFLOAD_ERR("fail to add interface prop\n");
+ memset(ntn_ctx, 0, sizeof(*ntn_ctx));
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ ntn_ctx->partial_hdr_hdl[IPA_IP_v4] = hdr->hdr[IPA_IP_v4].hdr_hdl;
+ ntn_ctx->partial_hdr_hdl[IPA_IP_v6] = hdr->hdr[IPA_IP_v6].hdr_hdl;
+ init_completion(&ntn_ctx->ntn_completion);
+ ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
+
+fail:
+ kfree(hdr);
+ return ret;
+}
+
+int ipa_uc_offload_reg_intf(
+ struct ipa_uc_offload_intf_params *inp,
+ struct ipa_uc_offload_out_params *outp)
+{
+ struct ipa_uc_offload_ctx *ctx;
+ int ret = 0;
+
+ if (inp == NULL || outp == NULL) {
+ IPA_UC_OFFLOAD_ERR("invalid params in=%p out=%p\n", inp, outp);
+ return -EINVAL;
+ }
+
+ if (inp->proto <= IPA_UC_INVALID ||
+ inp->proto >= IPA_UC_MAX_PROT_SIZE) {
+ IPA_UC_OFFLOAD_ERR("invalid proto %d\n", inp->proto);
+ return -EINVAL;
+ }
+
+ if (!ipa_uc_offload_ctx[inp->proto]) {
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (ctx == NULL) {
+ IPA_UC_OFFLOAD_ERR("fail to alloc uc offload ctx\n");
+ return -EFAULT;
+ }
+ ipa_uc_offload_ctx[inp->proto] = ctx;
+ ctx->proto = inp->proto;
+ } else
+ ctx = ipa_uc_offload_ctx[inp->proto];
+
+ if (ctx->state != IPA_UC_OFFLOAD_STATE_INVALID) {
+ IPA_UC_OFFLOAD_ERR("Already Initialized\n");
+ return -EINVAL;
+ }
+
+ if (ctx->proto == IPA_UC_NTN) {
+ ret = ipa_uc_offload_ntn_reg_intf(inp, outp, ctx);
+ if (!ret)
+ outp->clnt_hndl = IPA_UC_NTN;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_reg_intf);
+
+static int ipa_uc_ntn_cons_release(void)
+{
+ return 0;
+}
+
+static int ipa_uc_ntn_cons_request(void)
+{
+ int ret = 0;
+ struct ipa_uc_offload_ctx *ntn_ctx;
+
+ ntn_ctx = ipa_uc_offload_ctx[IPA_UC_NTN];
+ if (!ntn_ctx) {
+ IPA_UC_OFFLOAD_ERR("NTN is not initialized\n");
+ ret = -EFAULT;
+ } else if (ntn_ctx->state != IPA_UC_OFFLOAD_STATE_UP) {
+ IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", ntn_ctx->state);
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+static void ipa_uc_offload_rm_notify(void *user_data, enum ipa_rm_event event,
+ unsigned long data)
+{
+ struct ipa_uc_offload_ctx *offload_ctx;
+
+ offload_ctx = (struct ipa_uc_offload_ctx *)user_data;
+ if (!(offload_ctx && offload_ctx->proto > IPA_UC_INVALID &&
+ offload_ctx->proto < IPA_UC_MAX_PROT_SIZE)) {
+ IPA_UC_OFFLOAD_ERR("Invalid user data\n");
+ return;
+ }
+
+ if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED)
+ IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", offload_ctx->state);
+
+ switch (event) {
+ case IPA_RM_RESOURCE_GRANTED:
+ complete_all(&offload_ctx->ntn_completion);
+ break;
+
+ case IPA_RM_RESOURCE_RELEASED:
+ break;
+
+ default:
+ IPA_UC_OFFLOAD_ERR("Invalid RM Evt: %d", event);
+ break;
+ }
+}
+
+int ipa_uc_ntn_conn_pipes(struct ipa_ntn_conn_in_params *inp,
+ struct ipa_ntn_conn_out_params *outp,
+ struct ipa_uc_offload_ctx *ntn_ctx)
+{
+ struct ipa_rm_create_params param;
+ int result = 0;
+
+ if (inp->dl.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT ||
+ inp->dl.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) {
+ IPA_UC_OFFLOAD_ERR("alignment failure on TX\n");
+ return -EINVAL;
+ }
+ if (inp->ul.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT ||
+ inp->ul.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) {
+ IPA_UC_OFFLOAD_ERR("alignment failure on RX\n");
+ return -EINVAL;
+ }
+
+ memset(¶m, 0, sizeof(param));
+ param.name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
+ param.reg_params.user_data = ntn_ctx;
+ param.reg_params.notify_cb = ipa_uc_offload_rm_notify;
+ param.floor_voltage = IPA_VOLTAGE_SVS;
+ result = ipa_rm_create_resource(¶m);
+ if (result) {
+ IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_PROD resource\n");
+ return -EFAULT;
+ }
+
+ memset(¶m, 0, sizeof(param));
+ param.name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
+ param.request_resource = ipa_uc_ntn_cons_request;
+ param.release_resource = ipa_uc_ntn_cons_release;
+ result = ipa_rm_create_resource(¶m);
+ if (result) {
+ IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_CONS resource\n");
+ goto fail_create_rm_cons;
+ }
+
+ if (ipa_rm_add_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+ IPA_RM_RESOURCE_APPS_CONS)) {
+ IPA_UC_OFFLOAD_ERR("fail to add rm dependency\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa_setup_uc_ntn_pipes(inp, ntn_ctx->notify,
+ ntn_ctx->priv, ntn_ctx->hdr_len, outp)) {
+ IPA_UC_OFFLOAD_ERR("fail to setup uc offload pipes\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ ntn_ctx->state = IPA_UC_OFFLOAD_STATE_UP;
+ result = ipa_rm_request_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+ if (result == -EINPROGRESS) {
+ if (wait_for_completion_timeout(&ntn_ctx->ntn_completion,
+ 10*HZ) == 0) {
+ IPA_UC_OFFLOAD_ERR("ODU PROD resource req time out\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ } else if (result != 0) {
+ IPA_UC_OFFLOAD_ERR("fail to request resource\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS);
+fail_create_rm_cons:
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+
+ return result;
+}
+
+int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *inp,
+ struct ipa_uc_offload_conn_out_params *outp)
+{
+ int ret = 0;
+ struct ipa_uc_offload_ctx *offload_ctx;
+
+ if (!(inp && outp)) {
+ IPA_UC_OFFLOAD_ERR("bad parm. in=%p out=%p\n", inp, outp);
+ return -EINVAL;
+ }
+
+ if (inp->clnt_hndl <= IPA_UC_INVALID ||
+ inp->clnt_hndl >= IPA_UC_MAX_PROT_SIZE) {
+ IPA_UC_OFFLOAD_ERR("invalid client handle %d\n",
+ inp->clnt_hndl);
+ return -EINVAL;
+ }
+
+ offload_ctx = ipa_uc_offload_ctx[inp->clnt_hndl];
+ if (!offload_ctx) {
+ IPA_UC_OFFLOAD_ERR("Invalid Handle\n");
+ return -EINVAL;
+ }
+
+ if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) {
+ IPA_UC_OFFLOAD_ERR("Invalid state %d\n", offload_ctx->state);
+ return -EPERM;
+ }
+
+ switch (offload_ctx->proto) {
+ case IPA_UC_NTN:
+ ret = ipa_uc_ntn_conn_pipes(&inp->u.ntn, &outp->u.ntn,
+ offload_ctx);
+ break;
+
+ default:
+ IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", offload_ctx->proto);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_conn_pipes);
+
+int ipa_set_perf_profile(struct ipa_perf_profile *profile)
+{
+ struct ipa_rm_perf_profile rm_profile;
+ enum ipa_rm_resource_name resource_name;
+
+ if (profile == NULL) {
+ IPA_UC_OFFLOAD_ERR("Invalid input\n");
+ return -EINVAL;
+ }
+
+ rm_profile.max_supported_bandwidth_mbps =
+ profile->max_supported_bw_mbps;
+
+ if (profile->client == IPA_CLIENT_ODU_PROD) {
+ resource_name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
+ } else if (profile->client == IPA_CLIENT_ODU_TETH_CONS) {
+ resource_name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
+ } else {
+ IPA_UC_OFFLOAD_ERR("not supported\n");
+ return -EINVAL;
+ }
+
+ if (ipa_rm_set_perf_profile(resource_name, &rm_profile)) {
+ IPA_UC_OFFLOAD_ERR("fail to setup rm perf profile\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_set_perf_profile);
+
+static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx)
+{
+ int ipa_ep_idx_ul, ipa_ep_idx_dl;
+
+ ntn_ctx->state = IPA_UC_OFFLOAD_STATE_DOWN;
+ if (ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+ IPA_RM_RESOURCE_APPS_CONS)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete rm dependency\n");
+ return -EFAULT;
+ }
+
+ if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_PROD resource\n");
+ return -EFAULT;
+ }
+
+ if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_CONS resource\n");
+ return -EFAULT;
+ }
+
+ ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ODU_PROD);
+ ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ODU_TETH_CONS);
+ if (ipa_tear_down_uc_offload_pipes(ipa_ep_idx_ul, ipa_ep_idx_dl)) {
+ IPA_UC_OFFLOAD_ERR("fail to tear down uc offload pipes\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int ipa_uc_offload_disconn_pipes(u32 clnt_hdl)
+{
+ struct ipa_uc_offload_ctx *offload_ctx;
+ int ret = 0;
+
+ if (clnt_hdl <= IPA_UC_INVALID ||
+ clnt_hdl >= IPA_UC_MAX_PROT_SIZE) {
+ IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ offload_ctx = ipa_uc_offload_ctx[clnt_hdl];
+ if (!offload_ctx) {
+ IPA_UC_OFFLOAD_ERR("Invalid client Handle\n");
+ return -EINVAL;
+ }
+
+ if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_UP) {
+ IPA_UC_OFFLOAD_ERR("Invalid state\n");
+ return -EINVAL;
+ }
+
+ switch (offload_ctx->proto) {
+ case IPA_UC_NTN:
+ ret = ipa_uc_ntn_disconn_pipes(offload_ctx);
+ break;
+
+ default:
+ IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_disconn_pipes);
+
+static int ipa_uc_ntn_cleanup(struct ipa_uc_offload_ctx *ntn_ctx)
+{
+ int len, result = 0;
+ struct ipa_ioc_del_hdr *hdr;
+
+ len = sizeof(struct ipa_ioc_del_hdr) + 2 * sizeof(struct ipa_hdr_del);
+ hdr = kzalloc(len, GFP_KERNEL);
+ if (hdr == NULL) {
+ IPA_UC_OFFLOAD_ERR("fail to alloc %d bytes\n", len);
+ return -ENOMEM;
+ }
+
+ hdr->commit = 1;
+ hdr->num_hdls = 2;
+ hdr->hdl[0].hdl = ntn_ctx->partial_hdr_hdl[0];
+ hdr->hdl[1].hdl = ntn_ctx->partial_hdr_hdl[1];
+
+ if (ipa_del_hdr(hdr)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete partial header\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa_deregister_intf(ntn_ctx->netdev_name)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete interface prop\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+fail:
+ kfree(hdr);
+ return result;
+}
+
+int ipa_uc_offload_cleanup(u32 clnt_hdl)
+{
+ struct ipa_uc_offload_ctx *offload_ctx;
+ int ret = 0;
+
+ if (clnt_hdl <= IPA_UC_INVALID ||
+ clnt_hdl >= IPA_UC_MAX_PROT_SIZE) {
+ IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ offload_ctx = ipa_uc_offload_ctx[clnt_hdl];
+ if (!offload_ctx) {
+ IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_DOWN) {
+ IPA_UC_OFFLOAD_ERR("Invalid State %d\n", offload_ctx->state);
+ return -EINVAL;
+ }
+
+ switch (offload_ctx->proto) {
+ case IPA_UC_NTN:
+ ret = ipa_uc_ntn_cleanup(offload_ctx);
+ break;
+
+ default:
+ IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ kfree(offload_ctx);
+ offload_ctx = NULL;
+ ipa_uc_offload_ctx[clnt_hdl] = NULL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_cleanup);
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
new file mode 100644
index 0000000..8e58320
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -0,0 +1,2711 @@
+/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/ipa.h>
+#include <linux/ipa_usb.h>
+#include <linux/rndis_ipa.h>
+#include <linux/ecm_ipa.h>
+#include "../ipa_v3/ipa_i.h"
+#include "../ipa_rm_i.h"
+
+#define IPA_USB_RM_TIMEOUT_MSEC 10000
+#define IPA_USB_DEV_READY_TIMEOUT_MSEC 10000
+
+#define IPA_HOLB_TMR_EN 0x1
+
+/* GSI channels weights */
+#define IPA_USB_DL_CHAN_LOW_WEIGHT 0x5
+#define IPA_USB_UL_CHAN_LOW_WEIGHT 0x4
+
+#define IPA_USB_MAX_MSG_LEN 4096
+
+#define IPA_USB_DRV_NAME "ipa_usb"
+
+#define IPA_USB_DBG(fmt, args...) \
+ do { \
+ pr_debug(IPA_USB_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPA_USB_DBG_LOW(fmt, args...) \
+ do { \
+ pr_debug(IPA_USB_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPA_USB_ERR(fmt, args...) \
+ do { \
+ pr_err(IPA_USB_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPA_USB_INFO(fmt, args...) \
+ do { \
+ pr_info(IPA_USB_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+struct ipa_usb_xdci_connect_params_internal {
+ enum ipa_usb_max_usb_packet_size max_pkt_size;
+ u32 ipa_to_usb_clnt_hdl;
+ u8 ipa_to_usb_xferrscidx;
+ bool ipa_to_usb_xferrscidx_valid;
+ u32 usb_to_ipa_clnt_hdl;
+ u8 usb_to_ipa_xferrscidx;
+ bool usb_to_ipa_xferrscidx_valid;
+ enum ipa_usb_teth_prot teth_prot;
+ struct ipa_usb_teth_prot_params teth_prot_params;
+ u32 max_supported_bandwidth_mbps;
+};
+
+enum ipa3_usb_teth_prot_state {
+ IPA_USB_TETH_PROT_INITIALIZED,
+ IPA_USB_TETH_PROT_CONNECTED,
+ IPA_USB_TETH_PROT_INVALID
+};
+
+struct ipa3_usb_teth_prot_context {
+ union {
+ struct ipa_usb_init_params rndis;
+ struct ecm_ipa_params ecm;
+ struct teth_bridge_init_params teth_bridge;
+ } teth_prot_params;
+ enum ipa3_usb_teth_prot_state state;
+ void *user_data;
+};
+
+enum ipa3_usb_cons_state {
+ IPA_USB_CONS_GRANTED,
+ IPA_USB_CONS_RELEASED
+};
+
+struct ipa3_usb_rm_context {
+ struct ipa_rm_create_params prod_params;
+ struct ipa_rm_create_params cons_params;
+ bool prod_valid;
+ bool cons_valid;
+ struct completion prod_comp;
+ enum ipa3_usb_cons_state cons_state;
+ /* consumer was requested*/
+ bool cons_requested;
+ /* consumer was requested and released before it was granted*/
+ bool cons_requested_released;
+};
+
+enum ipa3_usb_state {
+ IPA_USB_INVALID,
+ IPA_USB_INITIALIZED,
+ IPA_USB_CONNECTED,
+ IPA_USB_STOPPED,
+ IPA_USB_SUSPEND_REQUESTED,
+ IPA_USB_SUSPEND_IN_PROGRESS,
+ IPA_USB_SUSPENDED,
+ IPA_USB_RESUME_IN_PROGRESS
+};
+
+enum ipa3_usb_transport_type {
+ IPA_USB_TRANSPORT_TETH,
+ IPA_USB_TRANSPORT_DPL,
+ IPA_USB_TRANSPORT_MAX
+};
+
+/* Get transport type from tethering protocol */
+#define IPA3_USB_GET_TTYPE(__teth_prot) \
+ (((__teth_prot) == IPA_USB_DIAG) ? \
+ IPA_USB_TRANSPORT_DPL : IPA_USB_TRANSPORT_TETH)
+
+/* Does the given transport type is DPL? */
+#define IPA3_USB_IS_TTYPE_DPL(__ttype) \
+ ((__ttype) == IPA_USB_TRANSPORT_DPL)
+
+struct finish_suspend_work_context {
+ struct work_struct work;
+ enum ipa3_usb_transport_type ttype;
+ u32 dl_clnt_hdl;
+ u32 ul_clnt_hdl;
+};
+
+/**
+ * Transport type - could be either data tethering or DPL
+ * Each transport has it's own RM resources and statuses
+ */
+struct ipa3_usb_transport_type_ctx {
+ struct ipa3_usb_rm_context rm_ctx;
+ int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *user_data);
+ void *user_data;
+ enum ipa3_usb_state state;
+ struct finish_suspend_work_context finish_suspend_work;
+ struct ipa_usb_xdci_chan_params ch_params;
+};
+
+struct ipa3_usb_smmu_reg_map {
+ int cnt;
+ phys_addr_t addr;
+};
+
+struct ipa3_usb_context {
+ struct ipa3_usb_teth_prot_context
+ teth_prot_ctx[IPA_USB_MAX_TETH_PROT_SIZE];
+ int num_init_prot; /* without dpl */
+ struct teth_bridge_init_params teth_bridge_params;
+ struct completion dev_ready_comp;
+ u32 qmi_req_id;
+ spinlock_t state_lock;
+ bool dl_data_pending;
+ struct workqueue_struct *wq;
+ struct mutex general_mutex;
+ struct ipa3_usb_transport_type_ctx
+ ttype_ctx[IPA_USB_TRANSPORT_MAX];
+ struct dentry *dfile_state_info;
+ struct dentry *dent;
+ struct ipa3_usb_smmu_reg_map smmu_reg_map;
+};
+
+enum ipa3_usb_op {
+ IPA_USB_INIT_TETH_PROT,
+ IPA_USB_REQUEST_CHANNEL,
+ IPA_USB_CONNECT,
+ IPA_USB_DISCONNECT,
+ IPA_USB_RELEASE_CHANNEL,
+ IPA_USB_DEINIT_TETH_PROT,
+ IPA_USB_SUSPEND,
+ IPA_USB_RESUME
+};
+
+struct ipa3_usb_status_dbg_info {
+ const char *teth_state;
+ const char *dpl_state;
+ int num_init_prot;
+ const char *inited_prots[IPA_USB_MAX_TETH_PROT_SIZE];
+ const char *teth_connected_prot;
+ const char *dpl_connected_prot;
+ const char *teth_cons_state;
+ const char *dpl_cons_state;
+};
+
+static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work);
+static void ipa3_usb_wq_dpl_notify_remote_wakeup(struct work_struct *work);
+static void ipa3_usb_wq_notify_suspend_completed(struct work_struct *work);
+static void ipa3_usb_wq_dpl_notify_suspend_completed(struct work_struct *work);
+static DECLARE_WORK(ipa3_usb_notify_remote_wakeup_work,
+ ipa3_usb_wq_notify_remote_wakeup);
+static DECLARE_WORK(ipa3_usb_dpl_notify_remote_wakeup_work,
+ ipa3_usb_wq_dpl_notify_remote_wakeup);
+static DECLARE_WORK(ipa3_usb_notify_suspend_completed_work,
+ ipa3_usb_wq_notify_suspend_completed);
+static DECLARE_WORK(ipa3_usb_dpl_notify_suspend_completed_work,
+ ipa3_usb_wq_dpl_notify_suspend_completed);
+
+struct ipa3_usb_context *ipa3_usb_ctx;
+
+static char *ipa3_usb_op_to_string(enum ipa3_usb_op op)
+{
+ switch (op) {
+ case IPA_USB_INIT_TETH_PROT:
+ return "IPA_USB_INIT_TETH_PROT";
+ case IPA_USB_REQUEST_CHANNEL:
+ return "IPA_USB_REQUEST_CHANNEL";
+ case IPA_USB_CONNECT:
+ return "IPA_USB_CONNECT";
+ case IPA_USB_DISCONNECT:
+ return "IPA_USB_DISCONNECT";
+ case IPA_USB_RELEASE_CHANNEL:
+ return "IPA_USB_RELEASE_CHANNEL";
+ case IPA_USB_DEINIT_TETH_PROT:
+ return "IPA_USB_DEINIT_TETH_PROT";
+ case IPA_USB_SUSPEND:
+ return "IPA_USB_SUSPEND";
+ case IPA_USB_RESUME:
+ return "IPA_USB_RESUME";
+ }
+
+ return "UNSUPPORTED";
+}
+
+static char *ipa3_usb_state_to_string(enum ipa3_usb_state state)
+{
+ switch (state) {
+ case IPA_USB_INVALID:
+ return "IPA_USB_INVALID";
+ case IPA_USB_INITIALIZED:
+ return "IPA_USB_INITIALIZED";
+ case IPA_USB_CONNECTED:
+ return "IPA_USB_CONNECTED";
+ case IPA_USB_STOPPED:
+ return "IPA_USB_STOPPED";
+ case IPA_USB_SUSPEND_REQUESTED:
+ return "IPA_USB_SUSPEND_REQUESTED";
+ case IPA_USB_SUSPEND_IN_PROGRESS:
+ return "IPA_USB_SUSPEND_IN_PROGRESS";
+ case IPA_USB_SUSPENDED:
+ return "IPA_USB_SUSPENDED";
+ case IPA_USB_RESUME_IN_PROGRESS:
+ return "IPA_USB_RESUME_IN_PROGRESS";
+ }
+
+ return "UNSUPPORTED";
+}
+
+static char *ipa3_usb_notify_event_to_string(enum ipa_usb_notify_event event)
+{
+ switch (event) {
+ case IPA_USB_DEVICE_READY:
+ return "IPA_USB_DEVICE_READY";
+ case IPA_USB_REMOTE_WAKEUP:
+ return "IPA_USB_REMOTE_WAKEUP";
+ case IPA_USB_SUSPEND_COMPLETED:
+ return "IPA_USB_SUSPEND_COMPLETED";
+ }
+
+ return "UNSUPPORTED";
+}
+
+static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit,
+ enum ipa3_usb_transport_type ttype)
+{
+ unsigned long flags;
+ int state_legal = false;
+ enum ipa3_usb_state state;
+ struct ipa3_usb_rm_context *rm_ctx;
+
+ spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+ state = ipa3_usb_ctx->ttype_ctx[ttype].state;
+ switch (new_state) {
+ case IPA_USB_INVALID:
+ if (state == IPA_USB_INITIALIZED)
+ state_legal = true;
+ break;
+ case IPA_USB_INITIALIZED:
+ if (state == IPA_USB_STOPPED || state == IPA_USB_INVALID ||
+ ((!IPA3_USB_IS_TTYPE_DPL(ttype)) &&
+ (state == IPA_USB_INITIALIZED)))
+ state_legal = true;
+ break;
+ case IPA_USB_CONNECTED:
+ if (state == IPA_USB_INITIALIZED ||
+ state == IPA_USB_STOPPED ||
+ state == IPA_USB_RESUME_IN_PROGRESS ||
+ /*
+ * In case of failure during suspend request
+ * handling, state is reverted to connected.
+ */
+ (err_permit && state == IPA_USB_SUSPEND_REQUESTED) ||
+ /*
+ * In case of failure during suspend completing
+ * handling, state is reverted to connected.
+ */
+ (err_permit && state == IPA_USB_SUSPEND_IN_PROGRESS))
+ state_legal = true;
+ break;
+ case IPA_USB_STOPPED:
+ if (state == IPA_USB_SUSPEND_IN_PROGRESS ||
+ state == IPA_USB_CONNECTED ||
+ state == IPA_USB_SUSPENDED)
+ state_legal = true;
+ break;
+ case IPA_USB_SUSPEND_REQUESTED:
+ if (state == IPA_USB_CONNECTED)
+ state_legal = true;
+ break;
+ case IPA_USB_SUSPEND_IN_PROGRESS:
+ if (state == IPA_USB_SUSPEND_REQUESTED ||
+ /*
+ * In case of failure during resume, state is reverted
+ * to original, which could be suspend_in_progress.
+ * Allow it.
+ */
+ (err_permit && state == IPA_USB_RESUME_IN_PROGRESS))
+ state_legal = true;
+ break;
+ case IPA_USB_SUSPENDED:
+ if (state == IPA_USB_SUSPEND_REQUESTED ||
+ state == IPA_USB_SUSPEND_IN_PROGRESS ||
+ /*
+ * In case of failure during resume, state is reverted
+ * to original, which could be suspended. Allow it
+ */
+ (err_permit && state == IPA_USB_RESUME_IN_PROGRESS))
+ state_legal = true;
+ break;
+ case IPA_USB_RESUME_IN_PROGRESS:
+ if (state == IPA_USB_SUSPEND_IN_PROGRESS ||
+ state == IPA_USB_SUSPENDED)
+ state_legal = true;
+ break;
+ default:
+ state_legal = false;
+ break;
+
+ }
+ if (state_legal) {
+ if (state != new_state) {
+ IPA_USB_DBG("ipa_usb %s state changed %s -> %s\n",
+ IPA3_USB_IS_TTYPE_DPL(ttype) ? "DPL" : "",
+ ipa3_usb_state_to_string(state),
+ ipa3_usb_state_to_string(new_state));
+ ipa3_usb_ctx->ttype_ctx[ttype].state = new_state;
+ }
+ } else {
+ IPA_USB_ERR("invalid state change %s -> %s\n",
+ ipa3_usb_state_to_string(state),
+ ipa3_usb_state_to_string(new_state));
+ }
+
+ if (state_legal && (new_state == IPA_USB_CONNECTED)) {
+ rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
+ if ((rm_ctx->cons_state == IPA_USB_CONS_GRANTED) ||
+ rm_ctx->cons_requested_released) {
+ rm_ctx->cons_requested = false;
+ rm_ctx->cons_requested_released =
+ false;
+ }
+ /* Notify RM that consumer is granted */
+ if (rm_ctx->cons_requested) {
+ ipa_rm_notify_completion(
+ IPA_RM_RESOURCE_GRANTED,
+ rm_ctx->cons_params.name);
+ rm_ctx->cons_state = IPA_USB_CONS_GRANTED;
+ rm_ctx->cons_requested = false;
+ }
+ }
+
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+ return state_legal;
+}
+
+static bool ipa3_usb_check_legal_op(enum ipa3_usb_op op,
+ enum ipa3_usb_transport_type ttype)
+{
+ unsigned long flags;
+ bool is_legal = false;
+ enum ipa3_usb_state state;
+ bool is_dpl;
+
+ if (ipa3_usb_ctx == NULL) {
+ IPA_USB_ERR("ipa_usb_ctx is not initialized!\n");
+ return false;
+ }
+
+ is_dpl = IPA3_USB_IS_TTYPE_DPL(ttype);
+
+ spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+ state = ipa3_usb_ctx->ttype_ctx[ttype].state;
+ switch (op) {
+ case IPA_USB_INIT_TETH_PROT:
+ if (state == IPA_USB_INVALID ||
+ (!is_dpl && state == IPA_USB_INITIALIZED))
+ is_legal = true;
+ break;
+ case IPA_USB_REQUEST_CHANNEL:
+ if (state == IPA_USB_INITIALIZED)
+ is_legal = true;
+ break;
+ case IPA_USB_CONNECT:
+ if (state == IPA_USB_INITIALIZED || state == IPA_USB_STOPPED)
+ is_legal = true;
+ break;
+ case IPA_USB_DISCONNECT:
+ if (state == IPA_USB_CONNECTED ||
+ state == IPA_USB_SUSPEND_IN_PROGRESS ||
+ state == IPA_USB_SUSPENDED)
+ is_legal = true;
+ break;
+ case IPA_USB_RELEASE_CHANNEL:
+ /* when releasing 1st channel state will be changed already */
+ if (state == IPA_USB_STOPPED ||
+ (!is_dpl && state == IPA_USB_INITIALIZED))
+ is_legal = true;
+ break;
+ case IPA_USB_DEINIT_TETH_PROT:
+ /*
+ * For data tethering we should allow deinit an inited protocol
+ * always. E.g. rmnet is inited and rndis is connected.
+ * USB can deinit rmnet first and then disconnect rndis
+ * on cable disconnect.
+ */
+ if (!is_dpl || state == IPA_USB_INITIALIZED)
+ is_legal = true;
+ break;
+ case IPA_USB_SUSPEND:
+ if (state == IPA_USB_CONNECTED)
+ is_legal = true;
+ break;
+ case IPA_USB_RESUME:
+ if (state == IPA_USB_SUSPENDED ||
+ state == IPA_USB_SUSPEND_IN_PROGRESS)
+ is_legal = true;
+ break;
+ default:
+ is_legal = false;
+ break;
+ }
+
+ if (!is_legal) {
+ IPA_USB_ERR("Illegal %s operation: state=%s operation=%s\n",
+ is_dpl ? "DPL" : "",
+ ipa3_usb_state_to_string(state),
+ ipa3_usb_op_to_string(op));
+ }
+
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+ return is_legal;
+}
+
+static void ipa3_usb_notify_do(enum ipa3_usb_transport_type ttype,
+ enum ipa_usb_notify_event event)
+{
+ int (*cb)(enum ipa_usb_notify_event, void *user_data);
+ void *user_data;
+ int res;
+
+ IPA_USB_DBG("Trying to notify USB with %s\n",
+ ipa3_usb_notify_event_to_string(event));
+
+ cb = ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb;
+ user_data = ipa3_usb_ctx->ttype_ctx[ttype].user_data;
+
+ if (cb) {
+ res = cb(event, user_data);
+ IPA_USB_DBG("Notified USB with %s. is_dpl=%d result=%d\n",
+ ipa3_usb_notify_event_to_string(event),
+ IPA3_USB_IS_TTYPE_DPL(ttype), res);
+ }
+}
+
+/*
+ * This call-back is called from ECM or RNDIS drivers.
+ * Both drivers are data tethering drivers and not DPL
+ */
+void ipa3_usb_device_ready_notify_cb(void)
+{
+ IPA_USB_DBG_LOW("entry\n");
+ ipa3_usb_notify_do(IPA_USB_TRANSPORT_TETH,
+ IPA_USB_DEVICE_READY);
+ IPA_USB_DBG_LOW("exit\n");
+}
+
+static void ipa3_usb_prod_notify_cb_do(enum ipa_rm_event event,
+ enum ipa3_usb_transport_type ttype)
+{
+ struct ipa3_usb_rm_context *rm_ctx;
+
+ IPA_USB_DBG_LOW("entry\n");
+
+ rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
+
+ switch (event) {
+ case IPA_RM_RESOURCE_GRANTED:
+ IPA_USB_DBG(":%s granted\n",
+ ipa_rm_resource_str(rm_ctx->prod_params.name));
+ complete_all(&rm_ctx->prod_comp);
+ break;
+ case IPA_RM_RESOURCE_RELEASED:
+ IPA_USB_DBG(":%s released\n",
+ ipa_rm_resource_str(rm_ctx->prod_params.name));
+ complete_all(&rm_ctx->prod_comp);
+ break;
+ }
+ IPA_USB_DBG_LOW("exit\n");
+}
+
+static void ipa3_usb_prod_notify_cb(void *user_data, enum ipa_rm_event event,
+ unsigned long data)
+{
+ ipa3_usb_prod_notify_cb_do(event, IPA_USB_TRANSPORT_TETH);
+}
+
+static void ipa3_usb_dpl_dummy_prod_notify_cb(void *user_data,
+ enum ipa_rm_event event, unsigned long data)
+{
+ ipa3_usb_prod_notify_cb_do(event, IPA_USB_TRANSPORT_TETH);
+}
+
+static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work)
+{
+ ipa3_usb_notify_do(IPA_USB_TRANSPORT_TETH, IPA_USB_REMOTE_WAKEUP);
+}
+
+static void ipa3_usb_wq_dpl_notify_remote_wakeup(struct work_struct *work)
+{
+ ipa3_usb_notify_do(IPA_USB_TRANSPORT_DPL, IPA_USB_REMOTE_WAKEUP);
+}
+
+static void ipa3_usb_wq_notify_suspend_completed(struct work_struct *work)
+{
+ ipa3_usb_notify_do(IPA_USB_TRANSPORT_TETH, IPA_USB_SUSPEND_COMPLETED);
+}
+
+static void ipa3_usb_wq_dpl_notify_suspend_completed(struct work_struct *work)
+{
+ ipa3_usb_notify_do(IPA_USB_TRANSPORT_DPL, IPA_USB_SUSPEND_COMPLETED);
+}
+
+static void ipa3_usb_wq_finish_suspend_work(struct work_struct *work)
+{
+ struct finish_suspend_work_context *finish_suspend_work_ctx;
+ unsigned long flags;
+ int result = -EFAULT;
+ struct ipa3_usb_transport_type_ctx *tctx;
+
+ mutex_lock(&ipa3_usb_ctx->general_mutex);
+ IPA_USB_DBG_LOW("entry\n");
+ finish_suspend_work_ctx = container_of(work,
+ struct finish_suspend_work_context, work);
+ tctx = &ipa3_usb_ctx->ttype_ctx[finish_suspend_work_ctx->ttype];
+
+ spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+ if (tctx->state != IPA_USB_SUSPEND_IN_PROGRESS) {
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+ mutex_unlock(&ipa3_usb_ctx->general_mutex);
+ return;
+ }
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+
+ /* Stop DL/DPL channel */
+ result = ipa3_stop_gsi_channel(finish_suspend_work_ctx->dl_clnt_hdl);
+ if (result) {
+ IPAERR("Error stopping DL/DPL channel: %d, resuming channel\n",
+ result);
+ ipa3_xdci_resume(finish_suspend_work_ctx->ul_clnt_hdl,
+ finish_suspend_work_ctx->dl_clnt_hdl,
+ IPA3_USB_IS_TTYPE_DPL(finish_suspend_work_ctx->ttype));
+ /* Change state back to CONNECTED */
+ if (!ipa3_usb_set_state(IPA_USB_CONNECTED, true,
+ finish_suspend_work_ctx->ttype))
+ IPA_USB_ERR("failed to change state to connected\n");
+ queue_work(ipa3_usb_ctx->wq,
+ IPA3_USB_IS_TTYPE_DPL(finish_suspend_work_ctx->ttype) ?
+ &ipa3_usb_dpl_notify_remote_wakeup_work :
+ &ipa3_usb_notify_remote_wakeup_work);
+ mutex_unlock(&ipa3_usb_ctx->general_mutex);
+ return;
+ }
+
+ /* Change ipa_usb state to SUSPENDED */
+ if (!ipa3_usb_set_state(IPA_USB_SUSPENDED, false,
+ finish_suspend_work_ctx->ttype))
+ IPA_USB_ERR("failed to change state to suspended\n");
+
+ queue_work(ipa3_usb_ctx->wq,
+ IPA3_USB_IS_TTYPE_DPL(finish_suspend_work_ctx->ttype) ?
+ &ipa3_usb_dpl_notify_suspend_completed_work :
+ &ipa3_usb_notify_suspend_completed_work);
+
+ IPA_USB_DBG_LOW("exit\n");
+ mutex_unlock(&ipa3_usb_ctx->general_mutex);
+}
+
+static int ipa3_usb_cons_request_resource_cb_do(
+ enum ipa3_usb_transport_type ttype,
+ struct work_struct *remote_wakeup_work)
+{
+ struct ipa3_usb_rm_context *rm_ctx;
+ unsigned long flags;
+ int result;
+
+ IPA_USB_DBG_LOW("entry\n");
+ rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
+ spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+ IPA_USB_DBG("state is %s\n",
+ ipa3_usb_state_to_string(
+ ipa3_usb_ctx->ttype_ctx[ttype].state));
+ switch (ipa3_usb_ctx->ttype_ctx[ttype].state) {
+ case IPA_USB_CONNECTED:
+ rm_ctx->cons_state = IPA_USB_CONS_GRANTED;
+ result = 0;
+ break;
+ case IPA_USB_SUSPEND_REQUESTED:
+ rm_ctx->cons_requested = true;
+ if (rm_ctx->cons_state == IPA_USB_CONS_GRANTED)
+ result = 0;
+ else
+ result = -EINPROGRESS;
+ break;
+ case IPA_USB_SUSPEND_IN_PROGRESS:
+ /*
+ * This case happens due to suspend interrupt.
+ * CONS is granted
+ */
+ if (!rm_ctx->cons_requested) {
+ rm_ctx->cons_requested = true;
+ queue_work(ipa3_usb_ctx->wq, remote_wakeup_work);
+ }
+ result = 0;
+ break;
+ case IPA_USB_SUSPENDED:
+ if (!rm_ctx->cons_requested) {
+ rm_ctx->cons_requested = true;
+ queue_work(ipa3_usb_ctx->wq, remote_wakeup_work);
+ }
+ result = -EINPROGRESS;
+ break;
+ default:
+ rm_ctx->cons_requested = true;
+ result = -EINPROGRESS;
+ break;
+ }
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+ IPA_USB_DBG_LOW("exit with %d\n", result);
+ return result;
+}
+
+static int ipa3_usb_cons_request_resource_cb(void)
+{
+ return ipa3_usb_cons_request_resource_cb_do(IPA_USB_TRANSPORT_TETH,
+ &ipa3_usb_notify_remote_wakeup_work);
+}
+
+static int ipa3_usb_dpl_cons_request_resource_cb(void)
+{
+ return ipa3_usb_cons_request_resource_cb_do(IPA_USB_TRANSPORT_DPL,
+ &ipa3_usb_dpl_notify_remote_wakeup_work);
+}
+
+static int ipa3_usb_cons_release_resource_cb_do(
+ enum ipa3_usb_transport_type ttype)
+{
+ unsigned long flags;
+ struct ipa3_usb_rm_context *rm_ctx;
+
+ IPA_USB_DBG_LOW("entry\n");
+ rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
+ spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+ IPA_USB_DBG("state is %s\n",
+ ipa3_usb_state_to_string(
+ ipa3_usb_ctx->ttype_ctx[ttype].state));
+ switch (ipa3_usb_ctx->ttype_ctx[ttype].state) {
+ case IPA_USB_SUSPEND_IN_PROGRESS:
+ /* Proceed with the suspend if no DL/DPL data */
+ if (rm_ctx->cons_requested)
+ rm_ctx->cons_requested_released = true;
+ else {
+ queue_work(ipa3_usb_ctx->wq,
+ &ipa3_usb_ctx->ttype_ctx[ttype].
+ finish_suspend_work.work);
+ }
+ break;
+ case IPA_USB_SUSPEND_REQUESTED:
+ if (rm_ctx->cons_requested)
+ rm_ctx->cons_requested_released = true;
+ break;
+ case IPA_USB_STOPPED:
+ case IPA_USB_RESUME_IN_PROGRESS:
+ if (rm_ctx->cons_requested)
+ rm_ctx->cons_requested = false;
+ break;
+ case IPA_USB_CONNECTED:
+ case IPA_USB_INITIALIZED:
+ break;
+ default:
+ IPA_USB_ERR("received cons_release_cb in bad state: %s!\n",
+ ipa3_usb_state_to_string(
+ ipa3_usb_ctx->ttype_ctx[ttype].state));
+ WARN_ON(1);
+ break;
+ }
+
+ rm_ctx->cons_state = IPA_USB_CONS_RELEASED;
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+ IPA_USB_DBG_LOW("exit\n");
+ return 0;
+}
+
+static int ipa3_usb_cons_release_resource_cb(void)
+{
+ return ipa3_usb_cons_release_resource_cb_do(IPA_USB_TRANSPORT_TETH);
+}
+
+static int ipa3_usb_dpl_cons_release_resource_cb(void)
+{
+ return ipa3_usb_cons_release_resource_cb_do(IPA_USB_TRANSPORT_DPL);
+}
+
+static char *ipa3_usb_teth_prot_to_string(enum ipa_usb_teth_prot teth_prot)
+{
+ switch (teth_prot) {
+ case IPA_USB_RNDIS:
+ return "rndis_ipa";
+ case IPA_USB_ECM:
+ return "ecm_ipa";
+ case IPA_USB_RMNET:
+ case IPA_USB_MBIM:
+ return "teth_bridge";
+ case IPA_USB_DIAG:
+ return "dpl";
+ default:
+ break;
+ }
+
+ return "unsupported";
+}
+
+static char *ipa3_usb_teth_bridge_prot_to_string(
+ enum ipa_usb_teth_prot teth_prot)
+{
+ switch (teth_prot) {
+ case IPA_USB_RMNET:
+ return "rmnet";
+ case IPA_USB_MBIM:
+ return "mbim";
+ default:
+ break;
+ }
+
+ return "unsupported";
+}
+
+static int ipa3_usb_init_teth_bridge(void)
+{
+ int result;
+
+ result = teth_bridge_init(&ipa3_usb_ctx->teth_bridge_params);
+ if (result) {
+ IPA_USB_ERR("Failed to initialize teth_bridge.\n");
+ return result;
+ }
+
+ return 0;
+}
+
+static int ipa3_usb_create_rm_resources(enum ipa3_usb_transport_type ttype)
+{
+ struct ipa3_usb_rm_context *rm_ctx;
+ int result = -EFAULT;
+ bool created = false;
+
+ rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
+
+ /* create PROD */
+ if (!rm_ctx->prod_valid) {
+ rm_ctx->prod_params.name = IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD :
+ IPA_RM_RESOURCE_USB_PROD;
+ rm_ctx->prod_params.floor_voltage = IPA_VOLTAGE_SVS;
+ rm_ctx->prod_params.reg_params.user_data = NULL;
+ rm_ctx->prod_params.reg_params.notify_cb =
+ IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ ipa3_usb_dpl_dummy_prod_notify_cb :
+ ipa3_usb_prod_notify_cb;
+ rm_ctx->prod_params.request_resource = NULL;
+ rm_ctx->prod_params.release_resource = NULL;
+ result = ipa_rm_create_resource(&rm_ctx->prod_params);
+ if (result) {
+ IPA_USB_ERR("Failed to create %s RM resource\n",
+ ipa_rm_resource_str(rm_ctx->prod_params.name));
+ return result;
+ }
+ rm_ctx->prod_valid = true;
+ created = true;
+ IPA_USB_DBG("Created %s RM resource\n",
+ ipa_rm_resource_str(rm_ctx->prod_params.name));
+ }
+
+ /* Create CONS */
+ if (!rm_ctx->cons_valid) {
+ rm_ctx->cons_params.name = IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ IPA_RM_RESOURCE_USB_DPL_CONS :
+ IPA_RM_RESOURCE_USB_CONS;
+ rm_ctx->cons_params.floor_voltage = IPA_VOLTAGE_SVS;
+ rm_ctx->cons_params.reg_params.user_data = NULL;
+ rm_ctx->cons_params.reg_params.notify_cb = NULL;
+ rm_ctx->cons_params.request_resource =
+ IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ ipa3_usb_dpl_cons_request_resource_cb :
+ ipa3_usb_cons_request_resource_cb;
+ rm_ctx->cons_params.release_resource =
+ IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ ipa3_usb_dpl_cons_release_resource_cb :
+ ipa3_usb_cons_release_resource_cb;
+ result = ipa_rm_create_resource(&rm_ctx->cons_params);
+ if (result) {
+ IPA_USB_ERR("Failed to create %s RM resource\n",
+ ipa_rm_resource_str(rm_ctx->cons_params.name));
+ goto create_cons_rsc_fail;
+ }
+ rm_ctx->cons_valid = true;
+ IPA_USB_DBG("Created %s RM resource\n",
+ ipa_rm_resource_str(rm_ctx->cons_params.name));
+ }
+
+ return 0;
+
+create_cons_rsc_fail:
+ if (created) {
+ rm_ctx->prod_valid = false;
+ ipa_rm_delete_resource(rm_ctx->prod_params.name);
+ }
+ return result;
+}
+
+int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
+ struct ipa_usb_teth_params *teth_params,
+ int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event,
+ void *),
+ void *user_data)
+{
+ int result = -EFAULT;
+ enum ipa3_usb_transport_type ttype;
+
+ mutex_lock(&ipa3_usb_ctx->general_mutex);
+ IPA_USB_DBG_LOW("entry\n");
+ if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE ||
+ ((teth_prot == IPA_USB_RNDIS || teth_prot == IPA_USB_ECM) &&
+ teth_params == NULL) || ipa_usb_notify_cb == NULL ||
+ user_data == NULL) {
+ IPA_USB_ERR("bad parameters.\n");
+ result = -EINVAL;
+ goto bad_params;
+ }
+
+ ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+ if (!ipa3_usb_check_legal_op(IPA_USB_INIT_TETH_PROT, ttype)) {
+ IPA_USB_ERR("Illegal operation.\n");
+ result = -EPERM;
+ goto bad_params;
+ }
+
+ /* Create IPA RM USB resources */
+ result = ipa3_usb_create_rm_resources(ttype);
+ if (result) {
+ IPA_USB_ERR("Failed creating IPA RM USB resources\n");
+ goto bad_params;
+ }
+
+ if (!ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb) {
+ ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb =
+ ipa_usb_notify_cb;
+ } else if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+ if (ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb !=
+ ipa_usb_notify_cb) {
+ IPA_USB_ERR("Got different notify_cb\n");
+ result = -EINVAL;
+ goto bad_params;
+ }
+ } else {
+ IPA_USB_ERR("Already has dpl_notify_cb\n");
+ result = -EINVAL;
+ goto bad_params;
+ }
+
+ /* Initialize tethering protocol */
+ switch (teth_prot) {
+ case IPA_USB_RNDIS:
+ case IPA_USB_ECM:
+ if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+ IPA_USB_TETH_PROT_INVALID) {
+ IPA_USB_DBG("%s already initialized\n",
+ ipa3_usb_teth_prot_to_string(teth_prot));
+ result = -EPERM;
+ goto bad_params;
+ }
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data = user_data;
+ if (teth_prot == IPA_USB_RNDIS) {
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].
+ teth_prot_params.rndis.device_ready_notify =
+ ipa3_usb_device_ready_notify_cb;
+ memcpy(ipa3_usb_ctx->teth_prot_ctx[teth_prot].
+ teth_prot_params.rndis.host_ethaddr,
+ teth_params->host_ethaddr,
+ sizeof(teth_params->host_ethaddr));
+ memcpy(ipa3_usb_ctx->teth_prot_ctx[teth_prot].
+ teth_prot_params.rndis.device_ethaddr,
+ teth_params->device_ethaddr,
+ sizeof(teth_params->device_ethaddr));
+
+ result = rndis_ipa_init(&ipa3_usb_ctx->
+ teth_prot_ctx[teth_prot].
+ teth_prot_params.rndis);
+ if (result) {
+ IPA_USB_ERR("Failed to initialize %s\n",
+ ipa3_usb_teth_prot_to_string(
+ teth_prot));
+ goto teth_prot_init_fail;
+ }
+ } else {
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].
+ teth_prot_params.ecm.device_ready_notify =
+ ipa3_usb_device_ready_notify_cb;
+ memcpy(ipa3_usb_ctx->teth_prot_ctx[teth_prot].
+ teth_prot_params.ecm.host_ethaddr,
+ teth_params->host_ethaddr,
+ sizeof(teth_params->host_ethaddr));
+ memcpy(ipa3_usb_ctx->teth_prot_ctx[teth_prot].
+ teth_prot_params.ecm.device_ethaddr,
+ teth_params->device_ethaddr,
+ sizeof(teth_params->device_ethaddr));
+
+ result = ecm_ipa_init(&ipa3_usb_ctx->
+ teth_prot_ctx[teth_prot].teth_prot_params.ecm);
+ if (result) {
+ IPA_USB_ERR("Failed to initialize %s\n",
+ ipa3_usb_teth_prot_to_string(
+ teth_prot));
+ goto teth_prot_init_fail;
+ }
+ }
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+ IPA_USB_TETH_PROT_INITIALIZED;
+ ipa3_usb_ctx->num_init_prot++;
+ IPA_USB_DBG("initialized %s\n",
+ ipa3_usb_teth_prot_to_string(teth_prot));
+ break;
+ case IPA_USB_RMNET:
+ case IPA_USB_MBIM:
+ if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+ IPA_USB_TETH_PROT_INVALID) {
+ IPA_USB_DBG("%s already initialized\n",
+ ipa3_usb_teth_prot_to_string(teth_prot));
+ result = -EPERM;
+ goto bad_params;
+ }
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data = user_data;
+ result = ipa3_usb_init_teth_bridge();
+ if (result)
+ goto teth_prot_init_fail;
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+ IPA_USB_TETH_PROT_INITIALIZED;
+ ipa3_usb_ctx->num_init_prot++;
+ IPA_USB_DBG("initialized %s %s\n",
+ ipa3_usb_teth_prot_to_string(teth_prot),
+ ipa3_usb_teth_bridge_prot_to_string(teth_prot));
+ break;
+ case IPA_USB_DIAG:
+ if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+ IPA_USB_TETH_PROT_INVALID) {
+ IPA_USB_DBG("DPL already initialized\n");
+ result = -EPERM;
+ goto bad_params;
+ }
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data = user_data;
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+ IPA_USB_TETH_PROT_INITIALIZED;
+ IPA_USB_DBG("initialized DPL\n");
+ break;
+ default:
+ IPA_USB_ERR("unexpected tethering protocol\n");
+ result = -EINVAL;
+ goto bad_params;
+ }
+
+ if (!ipa3_usb_set_state(IPA_USB_INITIALIZED, false, ttype))
+ IPA_USB_ERR("failed to change state to initialized\n");
+
+ IPA_USB_DBG_LOW("exit\n");
+ mutex_unlock(&ipa3_usb_ctx->general_mutex);
+ return 0;
+
+teth_prot_init_fail:
+ if ((IPA3_USB_IS_TTYPE_DPL(ttype))
+ || (ipa3_usb_ctx->num_init_prot == 0)) {
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_valid = false;
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_valid = false;
+ ipa_rm_delete_resource(
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name);
+ ipa_rm_delete_resource(
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name);
+ }
+bad_params:
+ mutex_unlock(&ipa3_usb_ctx->general_mutex);
+ return result;
+}
+EXPORT_SYMBOL(ipa_usb_init_teth_prot);
+
+void ipa3_usb_gsi_evt_err_cb(struct gsi_evt_err_notify *notify)
+{
+ IPA_USB_DBG_LOW("entry\n");
+ if (!notify)
+ return;
+ IPA_USB_ERR("Received event error %d, description: %d\n",
+ notify->evt_id, notify->err_desc);
+ IPA_USB_DBG_LOW("exit\n");
+}
+
+void ipa3_usb_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
+{
+ IPA_USB_DBG_LOW("entry\n");
+ if (!notify)
+ return;
+ IPA_USB_ERR("Received channel error %d, description: %d\n",
+ notify->evt_id, notify->err_desc);
+ IPA_USB_DBG_LOW("exit\n");
+}
+
+static bool ipa3_usb_check_chan_params(struct ipa_usb_xdci_chan_params *params)
+{
+ IPA_USB_DBG_LOW("gevntcount_low_addr = %x\n",
+ params->gevntcount_low_addr);
+ IPA_USB_DBG_LOW("gevntcount_hi_addr = %x\n",
+ params->gevntcount_hi_addr);
+ IPA_USB_DBG_LOW("dir = %d\n", params->dir);
+ IPA_USB_DBG_LOW("xfer_ring_len = %d\n", params->xfer_ring_len);
+ IPA_USB_DBG_LOW("xfer_ring_base_addr = %llx\n",
+ params->xfer_ring_base_addr);
+ IPA_USB_DBG_LOW("last_trb_addr_iova = %x\n",
+ params->xfer_scratch.last_trb_addr_iova);
+ IPA_USB_DBG_LOW("const_buffer_size = %d\n",
+ params->xfer_scratch.const_buffer_size);
+ IPA_USB_DBG_LOW("depcmd_low_addr = %x\n",
+ params->xfer_scratch.depcmd_low_addr);
+ IPA_USB_DBG_LOW("depcmd_hi_addr = %x\n",
+ params->xfer_scratch.depcmd_hi_addr);
+
+ if (params->client >= IPA_CLIENT_MAX ||
+ params->teth_prot > IPA_USB_MAX_TETH_PROT_SIZE ||
+ params->xfer_ring_len % GSI_CHAN_RE_SIZE_16B ||
+ params->xfer_scratch.const_buffer_size < 1 ||
+ params->xfer_scratch.const_buffer_size > 31) {
+ IPA_USB_ERR("Invalid params\n");
+ return false;
+ }
+ switch (params->teth_prot) {
+ case IPA_USB_DIAG:
+ if (!IPA_CLIENT_IS_CONS(params->client)) {
+ IPA_USB_ERR("DPL supports only DL channel\n");
+ return false;
+ }
+ case IPA_USB_RNDIS:
+ case IPA_USB_ECM:
+ if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state ==
+ IPA_USB_TETH_PROT_INVALID) {
+ IPA_USB_ERR("%s is not initialized\n",
+ ipa3_usb_teth_prot_to_string(
+ params->teth_prot));
+ return false;
+ }
+ break;
+ case IPA_USB_RMNET:
+ case IPA_USB_MBIM:
+ if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state ==
+ IPA_USB_TETH_PROT_INVALID) {
+ IPA_USB_ERR("%s is not initialized\n",
+ ipa3_usb_teth_bridge_prot_to_string(
+ params->teth_prot));
+ return false;
+ }
+ break;
+ default:
+ IPA_USB_ERR("Unknown tethering protocol (%d)\n",
+ params->teth_prot);
+ return false;
+ }
+ return true;
+}
+
+static int ipa3_usb_smmu_map_xdci_channel(
+ struct ipa_usb_xdci_chan_params *params, bool map)
+{
+ int result;
+ u32 gevntcount_r = rounddown(params->gevntcount_low_addr, PAGE_SIZE);
+ u32 xfer_scratch_r =
+ rounddown(params->xfer_scratch.depcmd_low_addr, PAGE_SIZE);
+
+ if (gevntcount_r != xfer_scratch_r) {
+ IPA_USB_ERR("No support more than 1 page map for USB regs\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (map) {
+ if (ipa3_usb_ctx->smmu_reg_map.cnt == 0) {
+ ipa3_usb_ctx->smmu_reg_map.addr = gevntcount_r;
+ result = ipa3_smmu_map_peer_reg(
+ ipa3_usb_ctx->smmu_reg_map.addr, true);
+ if (result) {
+ IPA_USB_ERR("failed to map USB regs %d\n",
+ result);
+ return result;
+ }
+ } else {
+ if (gevntcount_r != ipa3_usb_ctx->smmu_reg_map.addr) {
+ IPA_USB_ERR(
+ "No support for map different reg\n");
+ return -EINVAL;
+ }
+ }
+ ipa3_usb_ctx->smmu_reg_map.cnt++;
+ } else {
+ if (gevntcount_r != ipa3_usb_ctx->smmu_reg_map.addr) {
+ IPA_USB_ERR(
+ "No support for map different reg\n");
+ return -EINVAL;
+ }
+
+ if (ipa3_usb_ctx->smmu_reg_map.cnt == 1) {
+ result = ipa3_smmu_map_peer_reg(
+ ipa3_usb_ctx->smmu_reg_map.addr, false);
+ if (result) {
+ IPA_USB_ERR("failed to unmap USB regs %d\n",
+ result);
+ return result;
+ }
+ }
+ ipa3_usb_ctx->smmu_reg_map.cnt--;
+ }
+
+ result = ipa3_smmu_map_peer_buff(params->xfer_ring_base_addr_iova,
+ params->xfer_ring_base_addr, params->xfer_ring_len, map);
+ if (result) {
+ IPA_USB_ERR("failed to map Xfer ring %d\n", result);
+ return result;
+ }
+
+ result = ipa3_smmu_map_peer_buff(params->data_buff_base_addr_iova,
+ params->data_buff_base_addr, params->data_buff_base_len, map);
+ if (result) {
+ IPA_USB_ERR("failed to map TRBs buff %d\n", result);
+ return result;
+ }
+
+ return 0;
+}
+
+static int ipa3_usb_request_xdci_channel(
+ struct ipa_usb_xdci_chan_params *params,
+ struct ipa_req_chan_out_params *out_params)
+{
+ int result = -EFAULT;
+ struct ipa_request_gsi_channel_params chan_params;
+ enum ipa3_usb_transport_type ttype;
+
+ IPA_USB_DBG_LOW("entry\n");
+ if (params == NULL || out_params == NULL ||
+ !ipa3_usb_check_chan_params(params)) {
+ IPA_USB_ERR("bad parameters\n");
+ return -EINVAL;
+ }
+
+ ttype = IPA3_USB_GET_TTYPE(params->teth_prot);
+
+ if (!ipa3_usb_check_legal_op(IPA_USB_REQUEST_CHANNEL, ttype)) {
+ IPA_USB_ERR("Illegal operation\n");
+ return -EPERM;
+ }
+
+ memset(&chan_params, 0, sizeof(struct ipa_request_gsi_channel_params));
+ memcpy(&chan_params.ipa_ep_cfg, ¶ms->ipa_ep_cfg,
+ sizeof(struct ipa_ep_cfg));
+ chan_params.client = params->client;
+ switch (params->teth_prot) {
+ case IPA_USB_RNDIS:
+ chan_params.priv = ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].
+ teth_prot_params.rndis.private;
+ if (params->dir == GSI_CHAN_DIR_FROM_GSI)
+ chan_params.notify =
+ ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].
+ teth_prot_params.rndis.ipa_tx_notify;
+ else
+ chan_params.notify =
+ ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].
+ teth_prot_params.rndis.ipa_rx_notify;
+ chan_params.skip_ep_cfg =
+ ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].
+ teth_prot_params.rndis.skip_ep_cfg;
+ break;
+ case IPA_USB_ECM:
+ chan_params.priv = ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].
+ teth_prot_params.ecm.private;
+ if (params->dir == GSI_CHAN_DIR_FROM_GSI)
+ chan_params.notify =
+ ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].
+ teth_prot_params.ecm.ecm_ipa_tx_dp_notify;
+ else
+ chan_params.notify =
+ ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].
+ teth_prot_params.ecm.ecm_ipa_rx_dp_notify;
+ chan_params.skip_ep_cfg =
+ ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].
+ teth_prot_params.ecm.skip_ep_cfg;
+ break;
+ case IPA_USB_RMNET:
+ case IPA_USB_MBIM:
+ chan_params.priv =
+ ipa3_usb_ctx->teth_bridge_params.private_data;
+ chan_params.notify =
+ ipa3_usb_ctx->teth_bridge_params.usb_notify_cb;
+ chan_params.skip_ep_cfg =
+ ipa3_usb_ctx->teth_bridge_params.skip_ep_cfg;
+ break;
+ case IPA_USB_DIAG:
+ chan_params.priv = NULL;
+ chan_params.notify = NULL;
+ chan_params.skip_ep_cfg = true;
+ break;
+ default:
+ break;
+ }
+
+ result = ipa3_usb_smmu_map_xdci_channel(params, true);
+ if (result) {
+ IPA_USB_ERR("failed to smmu map %d\n", result);
+ return result;
+ }
+
+ /* store channel params for SMMU unmap */
+ ipa3_usb_ctx->ttype_ctx[ttype].ch_params = *params;
+
+ chan_params.keep_ipa_awake = params->keep_ipa_awake;
+ chan_params.evt_ring_params.intf = GSI_EVT_CHTYPE_XDCI_EV;
+ chan_params.evt_ring_params.intr = GSI_INTR_IRQ;
+ chan_params.evt_ring_params.re_size = GSI_EVT_RING_RE_SIZE_16B;
+ chan_params.evt_ring_params.ring_len = params->xfer_ring_len -
+ chan_params.evt_ring_params.re_size;
+ chan_params.evt_ring_params.ring_base_addr =
+ params->xfer_ring_base_addr;
+ chan_params.evt_ring_params.ring_base_vaddr = NULL;
+ chan_params.evt_ring_params.int_modt = 0;
+ chan_params.evt_ring_params.int_modt = 0;
+ chan_params.evt_ring_params.intvec = 0;
+ chan_params.evt_ring_params.msi_addr = 0;
+ chan_params.evt_ring_params.rp_update_addr = 0;
+ chan_params.evt_ring_params.exclusive = true;
+ chan_params.evt_ring_params.err_cb = ipa3_usb_gsi_evt_err_cb;
+ chan_params.evt_ring_params.user_data = NULL;
+ chan_params.evt_scratch.xdci.gevntcount_low_addr =
+ params->gevntcount_low_addr;
+ chan_params.evt_scratch.xdci.gevntcount_hi_addr =
+ params->gevntcount_hi_addr;
+ chan_params.chan_params.prot = GSI_CHAN_PROT_XDCI;
+ chan_params.chan_params.dir = params->dir;
+ /* chan_id is set in ipa3_request_gsi_channel() */
+ chan_params.chan_params.re_size = GSI_CHAN_RE_SIZE_16B;
+ chan_params.chan_params.ring_len = params->xfer_ring_len;
+ chan_params.chan_params.ring_base_addr =
+ params->xfer_ring_base_addr;
+ chan_params.chan_params.ring_base_vaddr = NULL;
+ chan_params.chan_params.use_db_eng = GSI_CHAN_DB_MODE;
+ chan_params.chan_params.max_prefetch = GSI_ONE_PREFETCH_SEG;
+ if (params->dir == GSI_CHAN_DIR_FROM_GSI)
+ chan_params.chan_params.low_weight =
+ IPA_USB_DL_CHAN_LOW_WEIGHT;
+ else
+ chan_params.chan_params.low_weight =
+ IPA_USB_UL_CHAN_LOW_WEIGHT;
+ chan_params.chan_params.xfer_cb = NULL;
+ chan_params.chan_params.err_cb = ipa3_usb_gsi_chan_err_cb;
+ chan_params.chan_params.chan_user_data = NULL;
+ chan_params.chan_scratch.xdci.last_trb_addr =
+ params->xfer_scratch.last_trb_addr_iova;
+ /* xferrscidx will be updated later */
+ chan_params.chan_scratch.xdci.xferrscidx = 0;
+ chan_params.chan_scratch.xdci.const_buffer_size =
+ params->xfer_scratch.const_buffer_size;
+ chan_params.chan_scratch.xdci.depcmd_low_addr =
+ params->xfer_scratch.depcmd_low_addr;
+ chan_params.chan_scratch.xdci.depcmd_hi_addr =
+ params->xfer_scratch.depcmd_hi_addr;
+ chan_params.chan_scratch.xdci.outstanding_threshold =
+ ((params->teth_prot == IPA_USB_MBIM) ? 1 : 2) *
+ chan_params.chan_params.re_size;
+ /* max_outstanding_tre is set in ipa3_request_gsi_channel() */
+ result = ipa3_request_gsi_channel(&chan_params, out_params);
+ if (result) {
+ IPA_USB_ERR("failed to allocate GSI channel\n");
+ ipa3_usb_smmu_map_xdci_channel(params, false);
+ return result;
+ }
+
+ IPA_USB_DBG_LOW("exit\n");
+ return 0;
+}
+
+static int ipa3_usb_release_xdci_channel(u32 clnt_hdl,
+ enum ipa3_usb_transport_type ttype)
+{
+ int result = 0;
+
+ IPA_USB_DBG_LOW("entry\n");
+ if (ttype > IPA_USB_TRANSPORT_MAX) {
+ IPA_USB_ERR("bad parameter.\n");
+ return -EINVAL;
+ }
+
+ if (!ipa3_usb_check_legal_op(IPA_USB_RELEASE_CHANNEL, ttype)) {
+ IPA_USB_ERR("Illegal operation.\n");
+ return -EPERM;
+ }
+
+ /* Release channel */
+ result = ipa3_release_gsi_channel(clnt_hdl);
+ if (result) {
+ IPA_USB_ERR("failed to deallocate channel.\n");
+ return result;
+ }
+
+ result = ipa3_usb_smmu_map_xdci_channel(
+ &ipa3_usb_ctx->ttype_ctx[ttype].ch_params, false);
+
+ /* Change ipa_usb state to INITIALIZED */
+ if (!ipa3_usb_set_state(IPA_USB_INITIALIZED, false, ttype))
+ IPA_USB_ERR("failed to change state to initialized\n");
+
+ IPA_USB_DBG_LOW("exit\n");
+ return 0;
+}
+
+static int ipa3_usb_request_prod(enum ipa3_usb_transport_type ttype)
+{
+ int result;
+ struct ipa3_usb_rm_context *rm_ctx;
+ const char *rsrc_str;
+
+ rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
+ rsrc_str = ipa_rm_resource_str(rm_ctx->prod_params.name);
+
+ IPA_USB_DBG_LOW("requesting %s\n", rsrc_str);
+ init_completion(&rm_ctx->prod_comp);
+ result = ipa_rm_request_resource(rm_ctx->prod_params.name);
+ if (result) {
+ if (result != -EINPROGRESS) {
+ IPA_USB_ERR("failed to request %s: %d\n",
+ rsrc_str, result);
+ return result;
+ }
+ result = wait_for_completion_timeout(&rm_ctx->prod_comp,
+ msecs_to_jiffies(IPA_USB_RM_TIMEOUT_MSEC));
+ if (result == 0) {
+ IPA_USB_ERR("timeout request %s\n", rsrc_str);
+ return -ETIME;
+ }
+ }
+
+ IPA_USB_DBG_LOW("%s granted\n", rsrc_str);
+ return 0;
+}
+
+static int ipa3_usb_release_prod(enum ipa3_usb_transport_type ttype)
+{
+ int result;
+ struct ipa3_usb_rm_context *rm_ctx;
+ const char *rsrc_str;
+
+ rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
+ rsrc_str = ipa_rm_resource_str(rm_ctx->prod_params.name);
+
+ IPA_USB_DBG_LOW("releasing %s\n", rsrc_str);
+
+ init_completion(&rm_ctx->prod_comp);
+ result = ipa_rm_release_resource(rm_ctx->prod_params.name);
+ if (result) {
+ if (result != -EINPROGRESS) {
+ IPA_USB_ERR("failed to release %s: %d\n",
+ rsrc_str, result);
+ return result;
+ }
+ result = wait_for_completion_timeout(&rm_ctx->prod_comp,
+ msecs_to_jiffies(IPA_USB_RM_TIMEOUT_MSEC));
+ if (result == 0) {
+ IPA_USB_ERR("timeout release %s\n", rsrc_str);
+ return -ETIME;
+ }
+ }
+
+ IPA_USB_DBG_LOW("%s released\n", rsrc_str);
+ return 0;
+}
+
+static bool ipa3_usb_check_connect_params(
+ struct ipa_usb_xdci_connect_params_internal *params)
+{
+ IPA_USB_DBG_LOW("ul xferrscidx = %d\n", params->usb_to_ipa_xferrscidx);
+ IPA_USB_DBG_LOW("dl xferrscidx = %d\n", params->ipa_to_usb_xferrscidx);
+ IPA_USB_DBG_LOW("max_supported_bandwidth_mbps = %d\n",
+ params->max_supported_bandwidth_mbps);
+
+ if (params->max_pkt_size < IPA_USB_HIGH_SPEED_512B ||
+ params->max_pkt_size > IPA_USB_SUPER_SPEED_1024B ||
+ params->ipa_to_usb_xferrscidx < 0 ||
+ params->ipa_to_usb_xferrscidx > 127 ||
+ (params->teth_prot != IPA_USB_DIAG &&
+ (params->usb_to_ipa_xferrscidx < 0 ||
+ params->usb_to_ipa_xferrscidx > 127)) ||
+ params->teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) {
+ IPA_USB_ERR("Invalid params\n");
+ return false;
+ }
+
+ if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state ==
+ IPA_USB_TETH_PROT_INVALID) {
+ IPA_USB_ERR("%s is not initialized\n",
+ ipa3_usb_teth_prot_to_string(
+ params->teth_prot));
+ return false;
+ }
+
+ return true;
+}
+
+static int ipa3_usb_connect_teth_bridge(
+ struct teth_bridge_connect_params *params)
+{
+ int result;
+
+ result = teth_bridge_connect(params);
+ if (result) {
+ IPA_USB_ERR("failed to connect teth_bridge (%s)\n",
+ params->tethering_mode == TETH_TETHERING_MODE_RMNET ?
+ "rmnet" : "mbim");
+ return result;
+ }
+
+ return 0;
+}
+
+static int ipa3_usb_connect_dpl(void)
+{
+ int res = 0;
+
+ /*
+ * Add DPL dependency to RM dependency graph, first add_dependency call
+ * is sync in order to make sure the IPA clocks are up before we
+ * continue and notify the USB driver it may continue.
+ */
+ res = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ if (res < 0) {
+ IPA_USB_ERR("ipa_rm_add_dependency_sync() failed.\n");
+ return res;
+ }
+
+ /*
+ * this add_dependency call can't be sync since it will block until DPL
+ * status is connected (which can happen only later in the flow),
+ * the clocks are already up so the call doesn't need to block.
+ */
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_USB_DPL_CONS);
+ if (res < 0 && res != -EINPROGRESS) {
+ IPA_USB_ERR("ipa_rm_add_dependency() failed.\n");
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ return res;
+ }
+
+ return 0;
+}
+
+static int ipa3_usb_connect_teth_prot(
+ struct ipa_usb_xdci_connect_params_internal *params,
+ enum ipa3_usb_transport_type ttype)
+{
+ int result;
+ struct teth_bridge_connect_params teth_bridge_params;
+
+ IPA_USB_DBG("connecting protocol = %d\n",
+ params->teth_prot);
+ switch (params->teth_prot) {
+ case IPA_USB_RNDIS:
+ if (ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].state ==
+ IPA_USB_TETH_PROT_CONNECTED) {
+ IPA_USB_DBG("%s is already connected.\n",
+ ipa3_usb_teth_prot_to_string(
+ params->teth_prot));
+ break;
+ }
+ ipa3_usb_ctx->ttype_ctx[ttype].user_data =
+ ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].user_data;
+ result = rndis_ipa_pipe_connect_notify(
+ params->usb_to_ipa_clnt_hdl,
+ params->ipa_to_usb_clnt_hdl,
+ params->teth_prot_params.max_xfer_size_bytes_to_dev,
+ params->teth_prot_params.max_packet_number_to_dev,
+ params->teth_prot_params.max_xfer_size_bytes_to_host,
+ ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].
+ teth_prot_params.rndis.private);
+ if (result) {
+ IPA_USB_ERR("failed to connect %s.\n",
+ ipa3_usb_teth_prot_to_string(
+ params->teth_prot));
+ ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
+ return result;
+ }
+ ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].state =
+ IPA_USB_TETH_PROT_CONNECTED;
+ IPA_USB_DBG("%s is connected.\n",
+ ipa3_usb_teth_prot_to_string(
+ params->teth_prot));
+ break;
+ case IPA_USB_ECM:
+ if (ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].state ==
+ IPA_USB_TETH_PROT_CONNECTED) {
+ IPA_USB_DBG("%s is already connected.\n",
+ ipa3_usb_teth_prot_to_string(
+ params->teth_prot));
+ break;
+ }
+ ipa3_usb_ctx->ttype_ctx[ttype].user_data =
+ ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].user_data;
+ result = ecm_ipa_connect(params->usb_to_ipa_clnt_hdl,
+ params->ipa_to_usb_clnt_hdl,
+ ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].
+ teth_prot_params.ecm.private);
+ if (result) {
+ IPA_USB_ERR("failed to connect %s.\n",
+ ipa3_usb_teth_prot_to_string(
+ params->teth_prot));
+ ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
+ return result;
+ }
+ ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].state =
+ IPA_USB_TETH_PROT_CONNECTED;
+ IPA_USB_DBG("%s is connected.\n",
+ ipa3_usb_teth_prot_to_string(
+ params->teth_prot));
+ break;
+ case IPA_USB_RMNET:
+ case IPA_USB_MBIM:
+ if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state ==
+ IPA_USB_TETH_PROT_CONNECTED) {
+ IPA_USB_DBG("%s is already connected.\n",
+ ipa3_usb_teth_prot_to_string(
+ params->teth_prot));
+ break;
+ }
+ result = ipa3_usb_init_teth_bridge();
+ if (result)
+ return result;
+
+ ipa3_usb_ctx->ttype_ctx[ttype].user_data =
+ ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].
+ user_data;
+ teth_bridge_params.ipa_usb_pipe_hdl =
+ params->ipa_to_usb_clnt_hdl;
+ teth_bridge_params.usb_ipa_pipe_hdl =
+ params->usb_to_ipa_clnt_hdl;
+ teth_bridge_params.tethering_mode =
+ (params->teth_prot == IPA_USB_RMNET) ?
+ (TETH_TETHERING_MODE_RMNET):(TETH_TETHERING_MODE_MBIM);
+ teth_bridge_params.client_type = IPA_CLIENT_USB_PROD;
+ result = ipa3_usb_connect_teth_bridge(&teth_bridge_params);
+ if (result) {
+ ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
+ return result;
+ }
+ ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state =
+ IPA_USB_TETH_PROT_CONNECTED;
+ ipa3_usb_notify_do(ttype, IPA_USB_DEVICE_READY);
+ IPA_USB_DBG("%s (%s) is connected.\n",
+ ipa3_usb_teth_prot_to_string(
+ params->teth_prot),
+ ipa3_usb_teth_bridge_prot_to_string(
+ params->teth_prot));
+ break;
+ case IPA_USB_DIAG:
+ if (ipa3_usb_ctx->teth_prot_ctx[IPA_USB_DIAG].state ==
+ IPA_USB_TETH_PROT_CONNECTED) {
+ IPA_USB_DBG("%s is already connected.\n",
+ ipa3_usb_teth_prot_to_string(
+ params->teth_prot));
+ break;
+ }
+
+ ipa3_usb_ctx->ttype_ctx[ttype].user_data =
+ ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].
+ user_data;
+ result = ipa3_usb_connect_dpl();
+ if (result) {
+ IPA_USB_ERR("Failed connecting DPL result=%d\n",
+ result);
+ ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
+ return result;
+ }
+ ipa3_usb_ctx->teth_prot_ctx[IPA_USB_DIAG].state =
+ IPA_USB_TETH_PROT_CONNECTED;
+ ipa3_usb_notify_do(ttype, IPA_USB_DEVICE_READY);
+ IPA_USB_DBG("%s is connected.\n",
+ ipa3_usb_teth_prot_to_string(
+ params->teth_prot));
+ break;
+ default:
+ IPA_USB_ERR("Invalid tethering protocol\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int ipa3_usb_disconnect_teth_bridge(void)
+{
+ int result;
+
+ result = teth_bridge_disconnect(IPA_CLIENT_USB_PROD);
+ if (result) {
+ IPA_USB_ERR("failed to disconnect teth_bridge.\n");
+ return result;
+ }
+
+ return 0;
+}
+
+static int ipa3_usb_disconnect_dpl(void)
+{
+ int res;
+
+ /* Remove DPL RM dependency */
+ res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ if (res)
+ IPA_USB_ERR("deleting DPL_DUMMY_PROD rsrc dependency fail\n");
+
+ res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_USB_DPL_CONS);
+ if (res)
+ IPA_USB_ERR("deleting DPL_CONS rsrc dependencty fail\n");
+
+ return 0;
+}
+
+static int ipa3_usb_disconnect_teth_prot(enum ipa_usb_teth_prot teth_prot)
+{
+ int result = 0;
+ enum ipa3_usb_transport_type ttype;
+
+ ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+ switch (teth_prot) {
+ case IPA_USB_RNDIS:
+ case IPA_USB_ECM:
+ if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+ IPA_USB_TETH_PROT_CONNECTED) {
+ IPA_USB_DBG("%s is not connected.\n",
+ ipa3_usb_teth_prot_to_string(teth_prot));
+ return -EPERM;
+ }
+ if (teth_prot == IPA_USB_RNDIS) {
+ result = rndis_ipa_pipe_disconnect_notify(
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].
+ teth_prot_params.rndis.private);
+ } else {
+ result = ecm_ipa_disconnect(
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].
+ teth_prot_params.ecm.private);
+ }
+ if (result) {
+ IPA_USB_ERR("failed to disconnect %s.\n",
+ ipa3_usb_teth_prot_to_string(teth_prot));
+ break;
+ }
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+ IPA_USB_TETH_PROT_INITIALIZED;
+ IPA_USB_DBG("disconnected %s\n",
+ ipa3_usb_teth_prot_to_string(teth_prot));
+ break;
+ case IPA_USB_RMNET:
+ case IPA_USB_MBIM:
+ if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+ IPA_USB_TETH_PROT_CONNECTED) {
+ IPA_USB_DBG("%s (%s) is not connected.\n",
+ ipa3_usb_teth_prot_to_string(teth_prot),
+ ipa3_usb_teth_bridge_prot_to_string(teth_prot));
+ return -EPERM;
+ }
+ result = ipa3_usb_disconnect_teth_bridge();
+ if (result)
+ break;
+
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+ IPA_USB_TETH_PROT_INITIALIZED;
+ IPA_USB_DBG("disconnected %s (%s)\n",
+ ipa3_usb_teth_prot_to_string(teth_prot),
+ ipa3_usb_teth_bridge_prot_to_string(teth_prot));
+ break;
+ case IPA_USB_DIAG:
+ if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+ IPA_USB_TETH_PROT_CONNECTED) {
+ IPA_USB_DBG("%s is not connected.\n",
+ ipa3_usb_teth_prot_to_string(teth_prot));
+ return -EPERM;
+ }
+ result = ipa3_usb_disconnect_dpl();
+ if (result)
+ break;
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+ IPA_USB_TETH_PROT_INITIALIZED;
+ IPA_USB_DBG("disconnected %s\n",
+ ipa3_usb_teth_prot_to_string(teth_prot));
+ break;
+ default:
+ break;
+ }
+
+ ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
+ return result;
+}
+
+static int ipa3_usb_xdci_connect_internal(
+ struct ipa_usb_xdci_connect_params_internal *params)
+{
+ int result = -EFAULT;
+ struct ipa_rm_perf_profile profile;
+ enum ipa3_usb_transport_type ttype;
+
+ IPA_USB_DBG_LOW("entry\n");
+ if (params == NULL || !ipa3_usb_check_connect_params(params)) {
+ IPA_USB_ERR("bad parameters.\n");
+ return -EINVAL;
+ }
+
+ ttype = (params->teth_prot == IPA_USB_DIAG) ? IPA_USB_TRANSPORT_DPL :
+ IPA_USB_TRANSPORT_TETH;
+
+ if (!ipa3_usb_check_legal_op(IPA_USB_CONNECT, ttype)) {
+ IPA_USB_ERR("Illegal operation.\n");
+ return -EPERM;
+ }
+
+ /* Set EE xDCI specific scratch */
+ result = ipa3_set_usb_max_packet_size(params->max_pkt_size);
+ if (result) {
+ IPA_USB_ERR("failed setting xDCI EE scratch field\n");
+ return result;
+ }
+
+ /* Set RM PROD & CONS perf profile */
+ profile.max_supported_bandwidth_mbps =
+ params->max_supported_bandwidth_mbps;
+ result = ipa_rm_set_perf_profile(
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name,
+ &profile);
+ if (result) {
+ IPA_USB_ERR("failed to set %s perf profile\n",
+ ipa_rm_resource_str(ipa3_usb_ctx->ttype_ctx[ttype].
+ rm_ctx.prod_params.name));
+ return result;
+ }
+ result = ipa_rm_set_perf_profile(
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name,
+ &profile);
+ if (result) {
+ IPA_USB_ERR("failed to set %s perf profile\n",
+ ipa_rm_resource_str(ipa3_usb_ctx->ttype_ctx[ttype].
+ rm_ctx.cons_params.name));
+ return result;
+ }
+
+ /* Request PROD */
+ result = ipa3_usb_request_prod(ttype);
+ if (result)
+ return result;
+
+ if (params->teth_prot != IPA_USB_DIAG) {
+ /* Start UL channel */
+ result = ipa3_xdci_connect(params->usb_to_ipa_clnt_hdl,
+ params->usb_to_ipa_xferrscidx,
+ params->usb_to_ipa_xferrscidx_valid);
+ if (result) {
+ IPA_USB_ERR("failed to connect UL channel.\n");
+ goto connect_ul_fail;
+ }
+ }
+
+ /* Start DL/DPL channel */
+ result = ipa3_xdci_connect(params->ipa_to_usb_clnt_hdl,
+ params->ipa_to_usb_xferrscidx,
+ params->ipa_to_usb_xferrscidx_valid);
+ if (result) {
+ IPA_USB_ERR("failed to connect DL/DPL channel.\n");
+ goto connect_dl_fail;
+ }
+
+ /* Connect tethering protocol */
+ result = ipa3_usb_connect_teth_prot(params, ttype);
+ if (result) {
+ IPA_USB_ERR("failed to connect teth protocol\n");
+ goto connect_teth_prot_fail;
+ }
+
+ if (!ipa3_usb_set_state(IPA_USB_CONNECTED, false, ttype)) {
+ IPA_USB_ERR(
+ "failed to change state to connected\n");
+ goto state_change_connected_fail;
+ }
+
+ IPA_USB_DBG_LOW("exit\n");
+ return 0;
+
+state_change_connected_fail:
+ ipa3_usb_disconnect_teth_prot(params->teth_prot);
+connect_teth_prot_fail:
+ ipa3_xdci_disconnect(params->ipa_to_usb_clnt_hdl, false, -1);
+ ipa3_reset_gsi_channel(params->ipa_to_usb_clnt_hdl);
+ ipa3_reset_gsi_event_ring(params->ipa_to_usb_clnt_hdl);
+connect_dl_fail:
+ if (params->teth_prot != IPA_USB_DIAG) {
+ ipa3_xdci_disconnect(params->usb_to_ipa_clnt_hdl, false, -1);
+ ipa3_reset_gsi_channel(params->usb_to_ipa_clnt_hdl);
+ ipa3_reset_gsi_event_ring(params->usb_to_ipa_clnt_hdl);
+ }
+connect_ul_fail:
+ ipa3_usb_release_prod(ttype);
+ return result;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static char dbg_buff[IPA_USB_MAX_MSG_LEN];
+
+static char *ipa3_usb_cons_state_to_string(enum ipa3_usb_cons_state state)
+{
+ switch (state) {
+ case IPA_USB_CONS_GRANTED:
+ return "CONS_GRANTED";
+ case IPA_USB_CONS_RELEASED:
+ return "CONS_RELEASED";
+ }
+
+ return "UNSUPPORTED";
+}
+
+static int ipa3_usb_get_status_dbg_info(struct ipa3_usb_status_dbg_info *status)
+{
+ int res;
+ int i;
+ unsigned long flags;
+
+ IPA_USB_DBG_LOW("entry\n");
+
+ if (ipa3_usb_ctx == NULL) {
+ IPA_USB_ERR("IPA USB was not inited yet\n");
+ return -EFAULT;
+ }
+
+ mutex_lock(&ipa3_usb_ctx->general_mutex);
+
+ if (!status) {
+ IPA_USB_ERR("Invalid input\n");
+ res = -EINVAL;
+ goto bail;
+ }
+
+ memset(status, 0, sizeof(struct ipa3_usb_status_dbg_info));
+
+ spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+ status->teth_state = ipa3_usb_state_to_string(
+ ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].state);
+ status->dpl_state = ipa3_usb_state_to_string(
+ ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].state);
+ if (ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].rm_ctx.cons_valid)
+ status->teth_cons_state = ipa3_usb_cons_state_to_string(
+ ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].
+ rm_ctx.cons_state);
+ if (ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].rm_ctx.cons_valid)
+ status->dpl_cons_state = ipa3_usb_cons_state_to_string(
+ ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].
+ rm_ctx.cons_state);
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+
+ for (i = 0 ; i < IPA_USB_MAX_TETH_PROT_SIZE ; i++) {
+ if (ipa3_usb_ctx->teth_prot_ctx[i].state ==
+ IPA_USB_TETH_PROT_INITIALIZED) {
+ if ((i == IPA_USB_RMNET) || (i == IPA_USB_MBIM))
+ status->inited_prots[status->num_init_prot++] =
+ ipa3_usb_teth_bridge_prot_to_string(i);
+ else
+ status->inited_prots[status->num_init_prot++] =
+ ipa3_usb_teth_prot_to_string(i);
+ } else if (ipa3_usb_ctx->teth_prot_ctx[i].state ==
+ IPA_USB_TETH_PROT_CONNECTED) {
+ switch (i) {
+ case IPA_USB_RMNET:
+ case IPA_USB_MBIM:
+ status->teth_connected_prot =
+ ipa3_usb_teth_bridge_prot_to_string(i);
+ break;
+ case IPA_USB_DIAG:
+ status->dpl_connected_prot =
+ ipa3_usb_teth_prot_to_string(i);
+ break;
+ default:
+ status->teth_connected_prot =
+ ipa3_usb_teth_prot_to_string(i);
+ }
+ }
+ }
+
+ res = 0;
+ IPA_USB_DBG_LOW("exit\n");
+bail:
+ mutex_unlock(&ipa3_usb_ctx->general_mutex);
+ return res;
+}
+
+static ssize_t ipa3_read_usb_state_info(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ipa3_usb_status_dbg_info status;
+ int result;
+ int nbytes;
+ int cnt = 0;
+ int i;
+
+ result = ipa3_usb_get_status_dbg_info(&status);
+ if (result) {
+ nbytes = scnprintf(dbg_buff, IPA_USB_MAX_MSG_LEN,
+ "Fail to read IPA USB status\n");
+ cnt += nbytes;
+ } else {
+ nbytes = scnprintf(dbg_buff, IPA_USB_MAX_MSG_LEN,
+ "Tethering Data State: %s\n"
+ "DPL State: %s\n"
+ "Protocols in Initialized State: ",
+ status.teth_state,
+ status.dpl_state);
+ cnt += nbytes;
+
+ for (i = 0 ; i < status.num_init_prot ; i++) {
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_USB_MAX_MSG_LEN - cnt,
+ "%s ", status.inited_prots[i]);
+ cnt += nbytes;
+ }
+ nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
+ status.num_init_prot ? "\n" : "None\n");
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
+ "Protocols in Connected State: ");
+ cnt += nbytes;
+ if (status.teth_connected_prot) {
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_USB_MAX_MSG_LEN - cnt,
+ "%s ", status.teth_connected_prot);
+ cnt += nbytes;
+ }
+ if (status.dpl_connected_prot) {
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_USB_MAX_MSG_LEN - cnt,
+ "%s ", status.dpl_connected_prot);
+ cnt += nbytes;
+ }
+ nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
+ (status.teth_connected_prot ||
+ status.dpl_connected_prot) ? "\n" : "None\n");
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
+ "USB Tethering Consumer State: %s\n",
+ status.teth_cons_state ?
+ status.teth_cons_state : "Invalid");
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
+ "DPL Consumer State: %s\n",
+ status.dpl_cons_state ? status.dpl_cons_state :
+ "Invalid");
+ cnt += nbytes;
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+const struct file_operations ipa3_ipa_usb_ops = {
+ .read = ipa3_read_usb_state_info,
+};
+
+static void ipa_usb_debugfs_init(void)
+{
+ const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+
+ ipa3_usb_ctx->dent = debugfs_create_dir("ipa_usb", 0);
+ if (IS_ERR(ipa3_usb_ctx->dent)) {
+ IPA_USB_ERR("fail to create folder in debug_fs.\n");
+ return;
+ }
+
+ ipa3_usb_ctx->dfile_state_info = debugfs_create_file("state_info",
+ read_only_mode, ipa3_usb_ctx->dent, 0,
+ &ipa3_ipa_usb_ops);
+ if (!ipa3_usb_ctx->dfile_state_info ||
+ IS_ERR(ipa3_usb_ctx->dfile_state_info)) {
+ IPA_USB_ERR("failed to create file for state_info\n");
+ goto fail;
+ }
+
+ return;
+
+fail:
+ debugfs_remove_recursive(ipa3_usb_ctx->dent);
+ ipa3_usb_ctx->dent = NULL;
+}
+
+static void ipa_usb_debugfs_remove(void)
+{
+ if (IS_ERR(ipa3_usb_ctx->dent)) {
+ IPA_USB_ERR("ipa_usb debugfs folder was not created.\n");
+ return;
+ }
+
+ debugfs_remove_recursive(ipa3_usb_ctx->dent);
+}
+#else /* CONFIG_DEBUG_FS */
+static void ipa_usb_debugfs_init(void){}
+static void ipa_usb_debugfs_remove(void){}
+#endif /* CONFIG_DEBUG_FS */
+
+
+
+int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
+ struct ipa_usb_xdci_chan_params *dl_chan_params,
+ struct ipa_req_chan_out_params *ul_out_params,
+ struct ipa_req_chan_out_params *dl_out_params,
+ struct ipa_usb_xdci_connect_params *connect_params)
+{
+ int result = -EFAULT;
+ struct ipa_usb_xdci_connect_params_internal conn_params;
+
+ mutex_lock(&ipa3_usb_ctx->general_mutex);
+ IPA_USB_DBG_LOW("entry\n");
+ if (connect_params == NULL || dl_chan_params == NULL ||
+ dl_out_params == NULL ||
+ (connect_params->teth_prot != IPA_USB_DIAG &&
+ (ul_chan_params == NULL || ul_out_params == NULL))) {
+ IPA_USB_ERR("bad parameters.\n");
+ result = -EINVAL;
+ goto bad_params;
+ }
+
+ if (connect_params->teth_prot != IPA_USB_DIAG) {
+ result = ipa3_usb_request_xdci_channel(ul_chan_params,
+ ul_out_params);
+ if (result) {
+ IPA_USB_ERR("failed to allocate UL channel.\n");
+ goto bad_params;
+ }
+ }
+
+ result = ipa3_usb_request_xdci_channel(dl_chan_params, dl_out_params);
+ if (result) {
+ IPA_USB_ERR("failed to allocate DL/DPL channel.\n");
+ goto alloc_dl_chan_fail;
+ }
+
+ memset(&conn_params, 0,
+ sizeof(struct ipa_usb_xdci_connect_params_internal));
+ conn_params.max_pkt_size = connect_params->max_pkt_size;
+ conn_params.ipa_to_usb_clnt_hdl = dl_out_params->clnt_hdl;
+ conn_params.ipa_to_usb_xferrscidx =
+ connect_params->ipa_to_usb_xferrscidx;
+ conn_params.ipa_to_usb_xferrscidx_valid =
+ connect_params->ipa_to_usb_xferrscidx_valid;
+ if (connect_params->teth_prot != IPA_USB_DIAG) {
+ conn_params.usb_to_ipa_clnt_hdl = ul_out_params->clnt_hdl;
+ conn_params.usb_to_ipa_xferrscidx =
+ connect_params->usb_to_ipa_xferrscidx;
+ conn_params.usb_to_ipa_xferrscidx_valid =
+ connect_params->usb_to_ipa_xferrscidx_valid;
+ }
+ conn_params.teth_prot = connect_params->teth_prot;
+ conn_params.teth_prot_params = connect_params->teth_prot_params;
+ conn_params.max_supported_bandwidth_mbps =
+ connect_params->max_supported_bandwidth_mbps;
+ result = ipa3_usb_xdci_connect_internal(&conn_params);
+ if (result) {
+ IPA_USB_ERR("failed to connect.\n");
+ goto connect_fail;
+ }
+
+ IPA_USB_DBG_LOW("exit\n");
+ mutex_unlock(&ipa3_usb_ctx->general_mutex);
+ return 0;
+
+connect_fail:
+ ipa3_usb_release_xdci_channel(dl_out_params->clnt_hdl,
+ dl_chan_params->teth_prot);
+alloc_dl_chan_fail:
+ if (connect_params->teth_prot != IPA_USB_DIAG)
+ ipa3_usb_release_xdci_channel(ul_out_params->clnt_hdl,
+ ul_chan_params->teth_prot);
+bad_params:
+ mutex_unlock(&ipa3_usb_ctx->general_mutex);
+ return result;
+}
+EXPORT_SYMBOL(ipa_usb_xdci_connect);
+
+static int ipa3_usb_check_disconnect_prot(enum ipa_usb_teth_prot teth_prot)
+{
+ if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) {
+ IPA_USB_ERR("bad parameter.\n");
+ return -EFAULT;
+ }
+
+ if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+ IPA_USB_TETH_PROT_CONNECTED) {
+ IPA_USB_ERR("%s is not connected.\n",
+ ipa3_usb_teth_prot_to_string(teth_prot));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot)
+{
+ int result = 0;
+ struct ipa_ep_cfg_holb holb_cfg;
+ unsigned long flags;
+ enum ipa3_usb_state orig_state;
+ enum ipa3_usb_transport_type ttype;
+
+ mutex_lock(&ipa3_usb_ctx->general_mutex);
+ IPA_USB_DBG_LOW("entry\n");
+ if (ipa3_usb_check_disconnect_prot(teth_prot)) {
+ result = -EINVAL;
+ goto bad_params;
+ }
+
+ ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+ if (!ipa3_usb_check_legal_op(IPA_USB_DISCONNECT, ttype)) {
+ IPA_USB_ERR("Illegal operation.\n");
+ result = -EPERM;
+ goto bad_params;
+ }
+
+ spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+ if (ipa3_usb_ctx->ttype_ctx[ttype].state != IPA_USB_SUSPENDED) {
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+ /* Stop DL/DPL channel */
+ result = ipa3_xdci_disconnect(dl_clnt_hdl, false, -1);
+ if (result) {
+ IPA_USB_ERR("failed to disconnect DL/DPL channel.\n");
+ goto bad_params;
+ }
+ } else {
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+ memset(&holb_cfg, 0, sizeof(holb_cfg));
+ holb_cfg.en = IPA_HOLB_TMR_EN;
+ holb_cfg.tmr_val = 0;
+ ipa3_cfg_ep_holb(dl_clnt_hdl, &holb_cfg);
+ }
+
+ spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+ orig_state = ipa3_usb_ctx->ttype_ctx[ttype].state;
+ if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+ if (orig_state != IPA_USB_SUSPEND_IN_PROGRESS &&
+ orig_state != IPA_USB_SUSPENDED) {
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock,
+ flags);
+ /* Stop UL channel */
+ result = ipa3_xdci_disconnect(ul_clnt_hdl,
+ true,
+ ipa3_usb_ctx->qmi_req_id);
+ if (result) {
+ IPA_USB_ERR("failed disconnect UL channel\n");
+ goto bad_params;
+ }
+ ipa3_usb_ctx->qmi_req_id++;
+ } else
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock,
+ flags);
+ } else
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+
+ /* Reset DL channel */
+ result = ipa3_reset_gsi_channel(dl_clnt_hdl);
+ if (result) {
+ IPA_USB_ERR("failed to reset DL channel.\n");
+ goto bad_params;
+ }
+
+ /* Reset DL event ring */
+ result = ipa3_reset_gsi_event_ring(dl_clnt_hdl);
+ if (result) {
+ IPA_USB_ERR("failed to reset DL event ring.\n");
+ goto bad_params;
+ }
+
+ if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+ /* Reset UL channel */
+ result = ipa3_reset_gsi_channel(ul_clnt_hdl);
+ if (result) {
+ IPA_USB_ERR("failed to reset UL channel.\n");
+ goto bad_params;
+ }
+
+ /* Reset UL event ring */
+ result = ipa3_reset_gsi_event_ring(ul_clnt_hdl);
+ if (result) {
+ IPA_USB_ERR("failed to reset UL event ring.\n");
+ goto bad_params;
+ }
+ }
+
+ /* Change state to STOPPED */
+ if (!ipa3_usb_set_state(IPA_USB_STOPPED, false, ttype))
+ IPA_USB_ERR("failed to change state to stopped\n");
+
+ if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+ result = ipa3_usb_release_xdci_channel(ul_clnt_hdl, ttype);
+ if (result) {
+ IPA_USB_ERR("failed to release UL channel.\n");
+ goto bad_params;
+ }
+ }
+
+ result = ipa3_usb_release_xdci_channel(dl_clnt_hdl, ttype);
+ if (result) {
+ IPA_USB_ERR("failed to release DL channel.\n");
+ goto bad_params;
+ }
+
+ /* Disconnect tethering protocol */
+ result = ipa3_usb_disconnect_teth_prot(teth_prot);
+ if (result)
+ goto bad_params;
+
+ if (orig_state != IPA_USB_SUSPEND_IN_PROGRESS &&
+ orig_state != IPA_USB_SUSPENDED) {
+ result = ipa3_usb_release_prod(ttype);
+ if (result) {
+ IPA_USB_ERR("failed to release PROD.\n");
+ goto bad_params;
+ }
+ }
+
+ IPA_USB_DBG_LOW("exit\n");
+ mutex_unlock(&ipa3_usb_ctx->general_mutex);
+ return 0;
+
+bad_params:
+ mutex_unlock(&ipa3_usb_ctx->general_mutex);
+ return result;
+
+}
+EXPORT_SYMBOL(ipa_usb_xdci_disconnect);
+
+int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
+{
+ int result = -EFAULT;
+ enum ipa3_usb_transport_type ttype;
+
+ mutex_lock(&ipa3_usb_ctx->general_mutex);
+ IPA_USB_DBG_LOW("entry\n");
+ if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) {
+ IPA_USB_ERR("bad parameters.\n");
+ result = -EINVAL;
+ goto bad_params;
+ }
+
+ ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+ if (!ipa3_usb_check_legal_op(IPA_USB_DEINIT_TETH_PROT, ttype)) {
+ IPA_USB_ERR("Illegal operation.\n");
+ result = -EPERM;
+ goto bad_params;
+ }
+
+ /* Clean-up tethering protocol */
+ switch (teth_prot) {
+ case IPA_USB_RNDIS:
+ case IPA_USB_ECM:
+ if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+ IPA_USB_TETH_PROT_INITIALIZED) {
+ IPA_USB_ERR("%s is not initialized\n",
+ ipa3_usb_teth_prot_to_string(teth_prot));
+ result = -EINVAL;
+ goto bad_params;
+ }
+ if (teth_prot == IPA_USB_RNDIS)
+ rndis_ipa_cleanup(
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].
+ teth_prot_params.rndis.private);
+ else
+ ecm_ipa_cleanup(
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].
+ teth_prot_params.ecm.private);
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data = NULL;
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+ IPA_USB_TETH_PROT_INVALID;
+ ipa3_usb_ctx->num_init_prot--;
+ IPA_USB_DBG("deinitialized %s\n",
+ ipa3_usb_teth_prot_to_string(teth_prot));
+ break;
+ case IPA_USB_RMNET:
+ case IPA_USB_MBIM:
+ if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+ IPA_USB_TETH_PROT_INITIALIZED) {
+ IPA_USB_ERR("%s (%s) is not initialized\n",
+ ipa3_usb_teth_prot_to_string(teth_prot),
+ ipa3_usb_teth_bridge_prot_to_string(teth_prot));
+ result = -EINVAL;
+ goto bad_params;
+ }
+
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data =
+ NULL;
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+ IPA_USB_TETH_PROT_INVALID;
+ ipa3_usb_ctx->num_init_prot--;
+ IPA_USB_DBG("deinitialized %s (%s)\n",
+ ipa3_usb_teth_prot_to_string(teth_prot),
+ ipa3_usb_teth_bridge_prot_to_string(teth_prot));
+ break;
+ case IPA_USB_DIAG:
+ if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+ IPA_USB_TETH_PROT_INITIALIZED) {
+ IPA_USB_ERR("%s is not initialized\n",
+ ipa3_usb_teth_prot_to_string(teth_prot));
+ result = -EINVAL;
+ goto bad_params;
+ }
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data =
+ NULL;
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+ IPA_USB_TETH_PROT_INVALID;
+ IPA_USB_DBG("deinitialized %s\n",
+ ipa3_usb_teth_prot_to_string(teth_prot));
+ break;
+ default:
+ IPA_USB_ERR("unexpected tethering protocol\n");
+ result = -EINVAL;
+ goto bad_params;
+ }
+
+ if (IPA3_USB_IS_TTYPE_DPL(ttype) ||
+ (ipa3_usb_ctx->num_init_prot == 0)) {
+ if (!ipa3_usb_set_state(IPA_USB_INVALID, false, ttype))
+ IPA_USB_ERR("failed to change state to invalid\n");
+ ipa_rm_delete_resource(
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name);
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_valid = false;
+ ipa_rm_delete_resource(
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name);
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_valid = false;
+ ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb = NULL;
+ }
+
+ IPA_USB_DBG_LOW("exit\n");
+ mutex_unlock(&ipa3_usb_ctx->general_mutex);
+ return 0;
+
+bad_params:
+ mutex_unlock(&ipa3_usb_ctx->general_mutex);
+ return result;
+}
+EXPORT_SYMBOL(ipa_usb_deinit_teth_prot);
+
+int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot)
+{
+ int result = 0;
+ unsigned long flags;
+ enum ipa3_usb_cons_state curr_cons_state;
+ enum ipa3_usb_transport_type ttype;
+
+ mutex_lock(&ipa3_usb_ctx->general_mutex);
+ IPA_USB_DBG_LOW("entry\n");
+ if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) {
+ IPA_USB_ERR("bad parameters.\n");
+ result = -EINVAL;
+ goto bad_params;
+ }
+
+ ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+ if (!ipa3_usb_check_legal_op(IPA_USB_SUSPEND, ttype)) {
+ IPA_USB_ERR("Illegal operation.\n");
+ result = -EPERM;
+ goto bad_params;
+ }
+
+ IPA_USB_DBG("Start suspend sequence: %s\n",
+ IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ "DPL channel":"Data Tethering channels");
+
+ /* Change state to SUSPEND_REQUESTED */
+ if (!ipa3_usb_set_state(IPA_USB_SUSPEND_REQUESTED, false, ttype)) {
+ IPA_USB_ERR(
+ "fail changing state to suspend_req.\n");
+ result = -EFAULT;
+ goto bad_params;
+ }
+
+ /* Stop UL channel & suspend DL/DPL EP */
+ result = ipa3_xdci_suspend(ul_clnt_hdl, dl_clnt_hdl,
+ true,
+ ipa3_usb_ctx->qmi_req_id, IPA3_USB_IS_TTYPE_DPL(ttype));
+ if (result) {
+ IPA_USB_ERR("failed to suspend\n");
+ goto suspend_fail;
+ }
+ ipa3_usb_ctx->qmi_req_id++;
+
+ result = ipa3_usb_release_prod(ttype);
+ if (result) {
+ IPA_USB_ERR("failed to release PROD\n");
+ goto release_prod_fail;
+ }
+
+ spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+ curr_cons_state = ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_state;
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+ if (curr_cons_state == IPA_USB_CONS_GRANTED) {
+ /* Change state to SUSPEND_IN_PROGRESS */
+ if (!ipa3_usb_set_state(IPA_USB_SUSPEND_IN_PROGRESS,
+ false, ttype))
+ IPA_USB_ERR("fail set state to suspend_in_progress\n");
+
+ /* Check if DL/DPL data pending */
+ spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+ if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) {
+ IPA_USB_DBG(
+ "DL/DPL data pending, invoke remote wakeup\n");
+ queue_work(ipa3_usb_ctx->wq,
+ IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ &ipa3_usb_dpl_notify_remote_wakeup_work :
+ &ipa3_usb_notify_remote_wakeup_work);
+ }
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+
+ ipa3_usb_ctx->ttype_ctx[ttype].finish_suspend_work.ttype =
+ ttype;
+ ipa3_usb_ctx->ttype_ctx[ttype].finish_suspend_work.dl_clnt_hdl =
+ dl_clnt_hdl;
+ ipa3_usb_ctx->ttype_ctx[ttype].finish_suspend_work.ul_clnt_hdl =
+ ul_clnt_hdl;
+ INIT_WORK(&ipa3_usb_ctx->ttype_ctx[ttype].
+ finish_suspend_work.work,
+ ipa3_usb_wq_finish_suspend_work);
+
+ result = -EINPROGRESS;
+ IPA_USB_DBG("exit with suspend_in_progress\n");
+ goto bad_params;
+ }
+
+ /* Stop DL channel */
+ result = ipa3_stop_gsi_channel(dl_clnt_hdl);
+ if (result) {
+ IPAERR("Error stopping DL/DPL channel: %d\n", result);
+ result = -EFAULT;
+ goto release_prod_fail;
+ }
+ /* Change state to SUSPENDED */
+ if (!ipa3_usb_set_state(IPA_USB_SUSPENDED, false, ttype))
+ IPA_USB_ERR("failed to change state to suspended\n");
+
+ /* Check if DL/DPL data pending */
+ spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+ if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) {
+ IPA_USB_DBG_LOW(
+ "DL/DPL data is pending, invoking remote wakeup\n");
+ queue_work(ipa3_usb_ctx->wq, IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ &ipa3_usb_dpl_notify_remote_wakeup_work :
+ &ipa3_usb_notify_remote_wakeup_work);
+ }
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+
+ IPA_USB_DBG_LOW("exit\n");
+ mutex_unlock(&ipa3_usb_ctx->general_mutex);
+ return 0;
+
+release_prod_fail:
+ ipa3_xdci_resume(ul_clnt_hdl, dl_clnt_hdl,
+ IPA3_USB_IS_TTYPE_DPL(ttype));
+suspend_fail:
+ /* Change state back to CONNECTED */
+ if (!ipa3_usb_set_state(IPA_USB_CONNECTED, true, ttype))
+ IPA_USB_ERR("failed to change state back to connected\n");
+bad_params:
+ mutex_unlock(&ipa3_usb_ctx->general_mutex);
+ return result;
+}
+EXPORT_SYMBOL(ipa_usb_xdci_suspend);
+
+int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot)
+{
+ int result = -EFAULT;
+ enum ipa3_usb_state prev_state;
+ unsigned long flags;
+ enum ipa3_usb_transport_type ttype;
+
+ mutex_lock(&ipa3_usb_ctx->general_mutex);
+ IPA_USB_DBG_LOW("entry\n");
+
+ if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) {
+ IPA_USB_ERR("bad parameters.\n");
+ result = -EINVAL;
+ goto bad_params;
+ }
+
+ ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+ if (!ipa3_usb_check_legal_op(IPA_USB_RESUME, ttype)) {
+ IPA_USB_ERR("Illegal operation.\n");
+ result = -EPERM;
+ goto bad_params;
+ }
+
+ IPA_USB_DBG_LOW("Start resume sequence: %s\n",
+ IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ "DPL channel" : "Data Tethering channels");
+
+ spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+ prev_state = ipa3_usb_ctx->ttype_ctx[ttype].state;
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+
+ /* Change state to RESUME_IN_PROGRESS */
+ if (!ipa3_usb_set_state(IPA_USB_RESUME_IN_PROGRESS, false, ttype)) {
+ IPA_USB_ERR("failed to change state to resume_in_progress\n");
+ result = -EFAULT;
+ goto bad_params;
+ }
+
+ /* Request USB_PROD */
+ result = ipa3_usb_request_prod(ttype);
+ if (result)
+ goto prod_req_fail;
+
+ if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+ /* Start UL channel */
+ result = ipa3_start_gsi_channel(ul_clnt_hdl);
+ if (result) {
+ IPA_USB_ERR("failed to start UL channel.\n");
+ goto start_ul_fail;
+ }
+ }
+
+ if (prev_state != IPA_USB_SUSPEND_IN_PROGRESS) {
+ /* Start DL/DPL channel */
+ result = ipa3_start_gsi_channel(dl_clnt_hdl);
+ if (result) {
+ IPA_USB_ERR("failed to start DL/DPL channel.\n");
+ goto start_dl_fail;
+ }
+ }
+
+ /* Change state to CONNECTED */
+ if (!ipa3_usb_set_state(IPA_USB_CONNECTED, false, ttype)) {
+ IPA_USB_ERR("failed to change state to connected\n");
+ result = -EFAULT;
+ goto state_change_connected_fail;
+ }
+
+ IPA_USB_DBG_LOW("exit\n");
+ mutex_unlock(&ipa3_usb_ctx->general_mutex);
+ return 0;
+
+state_change_connected_fail:
+ if (prev_state != IPA_USB_SUSPEND_IN_PROGRESS) {
+ result = ipa3_stop_gsi_channel(dl_clnt_hdl);
+ if (result)
+ IPA_USB_ERR("Error stopping DL/DPL channel: %d\n",
+ result);
+ }
+start_dl_fail:
+ if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+ result = ipa3_stop_gsi_channel(ul_clnt_hdl);
+ if (result)
+ IPA_USB_ERR("Error stopping UL channel: %d\n", result);
+ }
+start_ul_fail:
+ ipa3_usb_release_prod(ttype);
+prod_req_fail:
+ /* Change state back to prev_state */
+ if (!ipa3_usb_set_state(prev_state, true, ttype))
+ IPA_USB_ERR("failed to change state back to %s\n",
+ ipa3_usb_state_to_string(prev_state));
+bad_params:
+ mutex_unlock(&ipa3_usb_ctx->general_mutex);
+ return result;
+}
+EXPORT_SYMBOL(ipa_usb_xdci_resume);
+
+static int __init ipa3_usb_init(void)
+{
+ int i;
+ unsigned long flags;
+ int res;
+
+ IPA_USB_DBG("entry\n");
+ ipa3_usb_ctx = kzalloc(sizeof(struct ipa3_usb_context), GFP_KERNEL);
+ if (ipa3_usb_ctx == NULL) {
+ IPA_USB_ERR("failed to allocate memory\n");
+ IPA_USB_ERR(":ipa_usb init failed\n");
+ return -EFAULT;
+ }
+ memset(ipa3_usb_ctx, 0, sizeof(struct ipa3_usb_context));
+
+ for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++)
+ ipa3_usb_ctx->teth_prot_ctx[i].state =
+ IPA_USB_TETH_PROT_INVALID;
+ ipa3_usb_ctx->num_init_prot = 0;
+ init_completion(&ipa3_usb_ctx->dev_ready_comp);
+ ipa3_usb_ctx->qmi_req_id = 0;
+ spin_lock_init(&ipa3_usb_ctx->state_lock);
+ ipa3_usb_ctx->dl_data_pending = false;
+ mutex_init(&ipa3_usb_ctx->general_mutex);
+
+ for (i = 0; i < IPA_USB_TRANSPORT_MAX; i++) {
+ ipa3_usb_ctx->ttype_ctx[i].rm_ctx.prod_valid = false;
+ ipa3_usb_ctx->ttype_ctx[i].rm_ctx.cons_valid = false;
+ init_completion(&ipa3_usb_ctx->ttype_ctx[i].rm_ctx.prod_comp);
+ ipa3_usb_ctx->ttype_ctx[i].user_data = NULL;
+ }
+
+ spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+ for (i = 0; i < IPA_USB_TRANSPORT_MAX; i++) {
+ ipa3_usb_ctx->ttype_ctx[i].state = IPA_USB_INVALID;
+ ipa3_usb_ctx->ttype_ctx[i].rm_ctx.cons_state =
+ IPA_USB_CONS_RELEASED;
+ }
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+
+ ipa3_usb_ctx->wq = create_singlethread_workqueue("ipa_usb_wq");
+ if (!ipa3_usb_ctx->wq) {
+ IPA_USB_ERR("failed to create workqueue\n");
+ res = -EFAULT;
+ goto ipa_usb_workqueue_fail;
+ }
+
+ ipa_usb_debugfs_init();
+
+ IPA_USB_INFO("exit: IPA_USB init success!\n");
+
+ return 0;
+
+ipa_usb_workqueue_fail:
+ IPA_USB_ERR(":init failed (%d)\n", -res);
+ kfree(ipa3_usb_ctx);
+ return res;
+}
+
+static void ipa3_usb_exit(void)
+{
+ IPA_USB_DBG_LOW("IPA_USB exit\n");
+ ipa_usb_debugfs_remove();
+ kfree(ipa3_usb_ctx);
+}
+
+arch_initcall(ipa3_usb_init);
+module_exit(ipa3_usb_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA USB client driver");
diff --git a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
new file mode 100644
index 0000000..79da63e
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
@@ -0,0 +1,1251 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msm_ipa.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/ipv6.h>
+#include <net/addrconf.h>
+#include <linux/ipa.h>
+#include <linux/cdev.h>
+#include <linux/ipa_odu_bridge.h>
+#include "../ipa_common_i.h"
+
+#define ODU_BRIDGE_DRV_NAME "odu_ipa_bridge"
+
+#define ODU_BRIDGE_DBG(fmt, args...) \
+ do { \
+ pr_debug(ODU_BRIDGE_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+#define ODU_BRIDGE_DBG_LOW(fmt, args...) \
+ do { \
+ pr_debug(ODU_BRIDGE_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+#define ODU_BRIDGE_ERR(fmt, args...) \
+ do { \
+ pr_err(ODU_BRIDGE_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define ODU_BRIDGE_FUNC_ENTRY() \
+ ODU_BRIDGE_DBG_LOW("ENTRY\n")
+#define ODU_BRIDGE_FUNC_EXIT() \
+ ODU_BRIDGE_DBG_LOW("EXIT\n")
+
+
+#define ODU_BRIDGE_IS_QMI_ADDR(daddr) \
+ (memcmp(&(daddr), &odu_bridge_ctx->llv6_addr, sizeof((daddr))) \
+ == 0)
+
+#define ODU_BRIDGE_IPV4_HDR_NAME "odu_br_ipv4"
+#define ODU_BRIDGE_IPV6_HDR_NAME "odu_br_ipv6"
+
+#define IPA_ODU_SYS_DESC_FIFO_SZ 0x800
+
+#ifdef CONFIG_COMPAT
+#define ODU_BRIDGE_IOC_SET_LLV6_ADDR32 _IOW(ODU_BRIDGE_IOC_MAGIC, \
+ ODU_BRIDGE_IOCTL_SET_LLV6_ADDR, \
+ compat_uptr_t)
+#endif
+
+#define IPA_ODU_VER_CHECK() \
+ do { \
+ ret = 0;\
+ if (ipa_get_hw_type() == IPA_HW_None) { \
+ pr_err("IPA HW is unknown\n"); \
+ ret = -EFAULT; \
+ } \
+ else if (ipa_get_hw_type() < IPA_HW_v3_0) \
+ ret = 1; \
+ } while (0)
+
+/**
+ * struct stats - driver statistics, viewable using debugfs
+ * @num_ul_packets: number of packets bridged in uplink direction
+ * @num_dl_packets: number of packets bridged in downink direction
+ * bridge
+ * @num_lan_packets: number of packets bridged to APPS on bridge mode
+ */
+struct stats {
+ u64 num_ul_packets;
+ u64 num_dl_packets;
+ u64 num_lan_packets;
+};
+
+/**
+ * struct odu_bridge_ctx - ODU bridge driver context information
+ * @class: kernel class pointer
+ * @dev_num: kernel device number
+ * @dev: kernel device struct pointer
+ * @cdev: kernel character device struct
+ * @netdev_name: network interface name
+ * @device_ethaddr: network interface ethernet address
+ * @priv: client's private data. to be used in client's callbacks
+ * @tx_dp_notify: client callback for handling IPA ODU_PROD callback
+ * @send_dl_skb: client callback for sending skb in downlink direction
+ * @stats: statistics, how many packets were transmitted using the SW bridge
+ * @is_conencted: is bridge connected ?
+ * @mode: ODU mode (router/bridge)
+ * @lock: for the initialization, connect and disconnect synchronization
+ * @llv6_addr: link local IPv6 address of ODU network interface
+ * @odu_br_ipv4_hdr_hdl: handle for partial ipv4 ethernet header
+ * @odu_br_ipv6_hdr_hdl: handle for partial ipv6 ethernet header
+ * @odu_prod_hdl: handle for IPA_CLIENT_ODU_PROD pipe
+ * @odu_emb_cons_hdl: handle for IPA_CLIENT_ODU_EMB_CONS pipe
+ * @odu_teth_cons_hdl: handle for IPA_CLIENT_ODU_TETH_CONS pipe
+ */
+struct odu_bridge_ctx {
+ struct class *class;
+ dev_t dev_num;
+ struct device *dev;
+ struct cdev cdev;
+ char netdev_name[IPA_RESOURCE_NAME_MAX];
+ u8 device_ethaddr[ETH_ALEN];
+ void *priv;
+ ipa_notify_cb tx_dp_notify;
+ int (*send_dl_skb)(void *priv, struct sk_buff *skb);
+ struct stats stats;
+ bool is_connected;
+ enum odu_bridge_mode mode;
+ struct mutex lock;
+ struct in6_addr llv6_addr;
+ uint32_t odu_br_ipv4_hdr_hdl;
+ uint32_t odu_br_ipv6_hdr_hdl;
+ u32 odu_prod_hdl;
+ u32 odu_emb_cons_hdl;
+ u32 odu_teth_cons_hdl;
+ u32 ipa_sys_desc_size;
+ void *logbuf;
+ void *logbuf_low;
+};
+static struct odu_bridge_ctx *odu_bridge_ctx;
+
+#ifdef CONFIG_DEBUG_FS
+#define ODU_MAX_MSG_LEN 512
+static char dbg_buff[ODU_MAX_MSG_LEN];
+#endif
+
+static void odu_bridge_emb_cons_cb(void *priv, enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ ODU_BRIDGE_FUNC_ENTRY();
+ if (evt != IPA_RECEIVE) {
+ ODU_BRIDGE_ERR("unexpected event\n");
+ WARN_ON(1);
+ return;
+ }
+ odu_bridge_ctx->send_dl_skb(priv, (struct sk_buff *)data);
+ odu_bridge_ctx->stats.num_dl_packets++;
+ ODU_BRIDGE_FUNC_EXIT();
+}
+
+static void odu_bridge_teth_cons_cb(void *priv, enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ struct ipv6hdr *ipv6hdr;
+ struct sk_buff *skb = (struct sk_buff *)data;
+ struct sk_buff *skb_copied;
+
+ ODU_BRIDGE_FUNC_ENTRY();
+ if (evt != IPA_RECEIVE) {
+ ODU_BRIDGE_ERR("unexpected event\n");
+ WARN_ON(1);
+ return;
+ }
+
+ ipv6hdr = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+ if (ipv6hdr->version == 6 &&
+ ipv6_addr_is_multicast(&ipv6hdr->daddr)) {
+ ODU_BRIDGE_DBG_LOW("Multicast pkt, send to APPS and adapter\n");
+ skb_copied = skb_clone(skb, GFP_KERNEL);
+ if (skb_copied) {
+ odu_bridge_ctx->tx_dp_notify(odu_bridge_ctx->priv,
+ IPA_RECEIVE,
+ (unsigned long) skb_copied);
+ odu_bridge_ctx->stats.num_lan_packets++;
+ } else {
+ ODU_BRIDGE_ERR("No memory\n");
+ }
+ }
+
+ odu_bridge_ctx->send_dl_skb(priv, skb);
+ odu_bridge_ctx->stats.num_dl_packets++;
+ ODU_BRIDGE_FUNC_EXIT();
+}
+
+static int odu_bridge_connect_router(void)
+{
+ struct ipa_sys_connect_params odu_prod_params;
+ struct ipa_sys_connect_params odu_emb_cons_params;
+ int res;
+
+ ODU_BRIDGE_FUNC_ENTRY();
+
+ memset(&odu_prod_params, 0, sizeof(odu_prod_params));
+ memset(&odu_emb_cons_params, 0, sizeof(odu_emb_cons_params));
+
+ /* configure RX (ODU->IPA) EP */
+ odu_prod_params.client = IPA_CLIENT_ODU_PROD;
+ odu_prod_params.ipa_ep_cfg.hdr.hdr_len = ETH_HLEN;
+ odu_prod_params.ipa_ep_cfg.nat.nat_en = IPA_SRC_NAT;
+ odu_prod_params.desc_fifo_sz = odu_bridge_ctx->ipa_sys_desc_size;
+ odu_prod_params.priv = odu_bridge_ctx->priv;
+ odu_prod_params.notify = odu_bridge_ctx->tx_dp_notify;
+ res = ipa_setup_sys_pipe(&odu_prod_params,
+ &odu_bridge_ctx->odu_prod_hdl);
+ if (res) {
+ ODU_BRIDGE_ERR("fail to setup sys pipe ODU_PROD %d\n", res);
+ goto fail_odu_prod;
+ }
+
+ /* configure TX (IPA->ODU) EP */
+ odu_emb_cons_params.client = IPA_CLIENT_ODU_EMB_CONS;
+ odu_emb_cons_params.ipa_ep_cfg.hdr.hdr_len = ETH_HLEN;
+ odu_emb_cons_params.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
+ odu_emb_cons_params.desc_fifo_sz = odu_bridge_ctx->ipa_sys_desc_size;
+ odu_emb_cons_params.priv = odu_bridge_ctx->priv;
+ odu_emb_cons_params.notify = odu_bridge_emb_cons_cb;
+ res = ipa_setup_sys_pipe(&odu_emb_cons_params,
+ &odu_bridge_ctx->odu_emb_cons_hdl);
+ if (res) {
+ ODU_BRIDGE_ERR("fail to setup sys pipe ODU_EMB_CONS %d\n", res);
+ goto fail_odu_emb_cons;
+ }
+
+ ODU_BRIDGE_DBG("odu_prod_hdl = %d, odu_emb_cons_hdl = %d\n",
+ odu_bridge_ctx->odu_prod_hdl, odu_bridge_ctx->odu_emb_cons_hdl);
+
+ ODU_BRIDGE_FUNC_EXIT();
+
+ return 0;
+
+fail_odu_emb_cons:
+ ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl);
+ odu_bridge_ctx->odu_prod_hdl = 0;
+fail_odu_prod:
+ return res;
+}
+
+static int odu_bridge_connect_bridge(void)
+{
+ struct ipa_sys_connect_params odu_prod_params;
+ struct ipa_sys_connect_params odu_emb_cons_params;
+ struct ipa_sys_connect_params odu_teth_cons_params;
+ int res;
+
+ ODU_BRIDGE_FUNC_ENTRY();
+
+ memset(&odu_prod_params, 0, sizeof(odu_prod_params));
+ memset(&odu_emb_cons_params, 0, sizeof(odu_emb_cons_params));
+
+ /* Build IPA Resource manager dependency graph */
+ ODU_BRIDGE_DBG_LOW("build dependency graph\n");
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ if (res && res != -EINPROGRESS) {
+ ODU_BRIDGE_ERR("ipa_rm_add_dependency() failed\n");
+ goto fail_add_dependency_1;
+ }
+
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_ODU_ADAPT_CONS);
+ if (res && res != -EINPROGRESS) {
+ ODU_BRIDGE_ERR("ipa_rm_add_dependency() failed\n");
+ goto fail_add_dependency_2;
+ }
+
+ /* configure RX (ODU->IPA) EP */
+ odu_prod_params.client = IPA_CLIENT_ODU_PROD;
+ odu_prod_params.desc_fifo_sz = IPA_ODU_SYS_DESC_FIFO_SZ;
+ odu_prod_params.priv = odu_bridge_ctx->priv;
+ odu_prod_params.notify = odu_bridge_ctx->tx_dp_notify;
+ odu_prod_params.skip_ep_cfg = true;
+ res = ipa_setup_sys_pipe(&odu_prod_params,
+ &odu_bridge_ctx->odu_prod_hdl);
+ if (res) {
+ ODU_BRIDGE_ERR("fail to setup sys pipe ODU_PROD %d\n", res);
+ goto fail_odu_prod;
+ }
+
+ /* configure TX tethered (IPA->ODU) EP */
+ odu_teth_cons_params.client = IPA_CLIENT_ODU_TETH_CONS;
+ odu_teth_cons_params.desc_fifo_sz = IPA_ODU_SYS_DESC_FIFO_SZ;
+ odu_teth_cons_params.priv = odu_bridge_ctx->priv;
+ odu_teth_cons_params.notify = odu_bridge_teth_cons_cb;
+ odu_teth_cons_params.skip_ep_cfg = true;
+ res = ipa_setup_sys_pipe(&odu_teth_cons_params,
+ &odu_bridge_ctx->odu_teth_cons_hdl);
+ if (res) {
+ ODU_BRIDGE_ERR("fail to setup sys pipe ODU_TETH_CONS %d\n",
+ res);
+ goto fail_odu_teth_cons;
+ }
+
+ /* configure TX embedded(IPA->ODU) EP */
+ odu_emb_cons_params.client = IPA_CLIENT_ODU_EMB_CONS;
+ odu_emb_cons_params.ipa_ep_cfg.hdr.hdr_len = ETH_HLEN;
+ odu_emb_cons_params.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
+ odu_emb_cons_params.desc_fifo_sz = IPA_ODU_SYS_DESC_FIFO_SZ;
+ odu_emb_cons_params.priv = odu_bridge_ctx->priv;
+ odu_emb_cons_params.notify = odu_bridge_emb_cons_cb;
+ res = ipa_setup_sys_pipe(&odu_emb_cons_params,
+ &odu_bridge_ctx->odu_emb_cons_hdl);
+ if (res) {
+ ODU_BRIDGE_ERR("fail to setup sys pipe ODU_EMB_CONS %d\n", res);
+ goto fail_odu_emb_cons;
+ }
+
+ ODU_BRIDGE_DBG_LOW("odu_prod_hdl = %d, odu_emb_cons_hdl = %d\n",
+ odu_bridge_ctx->odu_prod_hdl, odu_bridge_ctx->odu_emb_cons_hdl);
+ ODU_BRIDGE_DBG_LOW("odu_teth_cons_hdl = %d\n",
+ odu_bridge_ctx->odu_teth_cons_hdl);
+
+ ODU_BRIDGE_FUNC_EXIT();
+
+ return 0;
+
+fail_odu_emb_cons:
+ ipa_teardown_sys_pipe(odu_bridge_ctx->odu_teth_cons_hdl);
+ odu_bridge_ctx->odu_teth_cons_hdl = 0;
+fail_odu_teth_cons:
+ ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl);
+ odu_bridge_ctx->odu_prod_hdl = 0;
+fail_odu_prod:
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_ODU_ADAPT_CONS);
+fail_add_dependency_2:
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+fail_add_dependency_1:
+ return res;
+}
+
+static int odu_bridge_disconnect_router(void)
+{
+ int res;
+
+ ODU_BRIDGE_FUNC_ENTRY();
+
+ res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl);
+ if (res)
+ ODU_BRIDGE_ERR("teardown ODU PROD failed\n");
+ odu_bridge_ctx->odu_prod_hdl = 0;
+
+ res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_emb_cons_hdl);
+ if (res)
+ ODU_BRIDGE_ERR("teardown ODU EMB CONS failed\n");
+ odu_bridge_ctx->odu_emb_cons_hdl = 0;
+
+ ODU_BRIDGE_FUNC_EXIT();
+
+ return 0;
+}
+
+static int odu_bridge_disconnect_bridge(void)
+{
+ int res;
+
+ ODU_BRIDGE_FUNC_ENTRY();
+
+ res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl);
+ if (res)
+ ODU_BRIDGE_ERR("teardown ODU PROD failed\n");
+ odu_bridge_ctx->odu_prod_hdl = 0;
+
+ res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_teth_cons_hdl);
+ if (res)
+ ODU_BRIDGE_ERR("teardown ODU TETH CONS failed\n");
+ odu_bridge_ctx->odu_teth_cons_hdl = 0;
+
+ res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_emb_cons_hdl);
+ if (res)
+ ODU_BRIDGE_ERR("teardown ODU EMB CONS failed\n");
+ odu_bridge_ctx->odu_emb_cons_hdl = 0;
+
+ /* Delete IPA Resource manager dependency graph */
+ ODU_BRIDGE_DBG("deleting dependency graph\n");
+ res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ if (res && res != -EINPROGRESS)
+ ODU_BRIDGE_ERR("ipa_rm_delete_dependency() failed\n");
+
+ res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_ODU_ADAPT_CONS);
+ if (res && res != -EINPROGRESS)
+ ODU_BRIDGE_ERR("ipa_rm_delete_dependency() failed\n");
+
+ return 0;
+}
+
+/**
+ * odu_bridge_disconnect() - Disconnect odu bridge
+ *
+ * Disconnect all pipes and deletes IPA RM dependencies on bridge mode
+ *
+ * Return codes: 0- success, error otherwise
+ */
+int odu_bridge_disconnect(void)
+{
+ int res;
+
+ ODU_BRIDGE_FUNC_ENTRY();
+
+ if (!odu_bridge_ctx) {
+ ODU_BRIDGE_ERR("Not initialized\n");
+ return -EFAULT;
+ }
+
+ if (!odu_bridge_ctx->is_connected) {
+ ODU_BRIDGE_ERR("Not connected\n");
+ return -EFAULT;
+ }
+
+ mutex_lock(&odu_bridge_ctx->lock);
+ if (odu_bridge_ctx->mode == ODU_BRIDGE_MODE_ROUTER) {
+ res = odu_bridge_disconnect_router();
+ if (res) {
+ ODU_BRIDGE_ERR("disconnect_router failed %d\n", res);
+ goto out;
+ }
+ } else {
+ res = odu_bridge_disconnect_bridge();
+ if (res) {
+ ODU_BRIDGE_ERR("disconnect_bridge failed %d\n", res);
+ goto out;
+ }
+ }
+
+ odu_bridge_ctx->is_connected = false;
+ res = 0;
+out:
+ mutex_unlock(&odu_bridge_ctx->lock);
+ ODU_BRIDGE_FUNC_EXIT();
+ return res;
+}
+EXPORT_SYMBOL(odu_bridge_disconnect);
+
+/**
+ * odu_bridge_connect() - Connect odu bridge.
+ *
+ * Call to the mode-specific connect function for connection IPA pipes
+ * and adding IPA RM dependencies
+
+ * Return codes: 0: success
+ * -EINVAL: invalid parameters
+ * -EPERM: Operation not permitted as the bridge is already
+ * connected
+ */
+int odu_bridge_connect(void)
+{
+ int res;
+
+ ODU_BRIDGE_FUNC_ENTRY();
+
+ if (!odu_bridge_ctx) {
+ ODU_BRIDGE_ERR("Not initialized\n");
+ return -EFAULT;
+ }
+
+ if (odu_bridge_ctx->is_connected) {
+ ODU_BRIDGE_ERR("already connected\n");
+ return -EFAULT;
+ }
+
+ mutex_lock(&odu_bridge_ctx->lock);
+ if (odu_bridge_ctx->mode == ODU_BRIDGE_MODE_ROUTER) {
+ res = odu_bridge_connect_router();
+ if (res) {
+ ODU_BRIDGE_ERR("connect_router failed\n");
+ goto bail;
+ }
+ } else {
+ res = odu_bridge_connect_bridge();
+ if (res) {
+ ODU_BRIDGE_ERR("connect_bridge failed\n");
+ goto bail;
+ }
+ }
+
+ odu_bridge_ctx->is_connected = true;
+ res = 0;
+bail:
+ mutex_unlock(&odu_bridge_ctx->lock);
+ ODU_BRIDGE_FUNC_EXIT();
+ return res;
+}
+EXPORT_SYMBOL(odu_bridge_connect);
+
+/**
+ * odu_bridge_set_mode() - Set bridge mode to Router/Bridge
+ * @mode: mode to be set
+ */
+static int odu_bridge_set_mode(enum odu_bridge_mode mode)
+{
+ int res;
+
+ ODU_BRIDGE_FUNC_ENTRY();
+
+ if (mode < 0 || mode >= ODU_BRIDGE_MODE_MAX) {
+ ODU_BRIDGE_ERR("Unsupported mode: %d\n", mode);
+ return -EFAULT;
+ }
+
+ ODU_BRIDGE_DBG_LOW("setting mode: %d\n", mode);
+ mutex_lock(&odu_bridge_ctx->lock);
+
+ if (odu_bridge_ctx->mode == mode) {
+ ODU_BRIDGE_DBG_LOW("same mode\n");
+ res = 0;
+ goto bail;
+ }
+
+ if (odu_bridge_ctx->is_connected) {
+ /* first disconnect the old configuration */
+ if (odu_bridge_ctx->mode == ODU_BRIDGE_MODE_ROUTER) {
+ res = odu_bridge_disconnect_router();
+ if (res) {
+ ODU_BRIDGE_ERR("disconnect_router failed\n");
+ goto bail;
+ }
+ } else {
+ res = odu_bridge_disconnect_bridge();
+ if (res) {
+ ODU_BRIDGE_ERR("disconnect_bridge failed\n");
+ goto bail;
+ }
+ }
+
+ /* connect the new configuration */
+ if (mode == ODU_BRIDGE_MODE_ROUTER) {
+ res = odu_bridge_connect_router();
+ if (res) {
+ ODU_BRIDGE_ERR("connect_router failed\n");
+ goto bail;
+ }
+ } else {
+ res = odu_bridge_connect_bridge();
+ if (res) {
+ ODU_BRIDGE_ERR("connect_bridge failed\n");
+ goto bail;
+ }
+ }
+ }
+ odu_bridge_ctx->mode = mode;
+ res = 0;
+bail:
+ mutex_unlock(&odu_bridge_ctx->lock);
+ ODU_BRIDGE_FUNC_EXIT();
+ return res;
+};
+
+/**
+ * odu_bridge_set_llv6_addr() - Set link local ipv6 address
+ * @llv6_addr: odu network interface link local address
+ *
+ * This function sets the link local ipv6 address provided by IOCTL
+ */
+static int odu_bridge_set_llv6_addr(struct in6_addr *llv6_addr)
+{
+ struct in6_addr llv6_addr_host;
+
+ ODU_BRIDGE_FUNC_ENTRY();
+
+ llv6_addr_host.s6_addr32[0] = ntohl(llv6_addr->s6_addr32[0]);
+ llv6_addr_host.s6_addr32[1] = ntohl(llv6_addr->s6_addr32[1]);
+ llv6_addr_host.s6_addr32[2] = ntohl(llv6_addr->s6_addr32[2]);
+ llv6_addr_host.s6_addr32[3] = ntohl(llv6_addr->s6_addr32[3]);
+
+ memcpy(&odu_bridge_ctx->llv6_addr, &llv6_addr_host,
+ sizeof(odu_bridge_ctx->llv6_addr));
+ ODU_BRIDGE_DBG_LOW("LLV6 addr: %pI6c\n", &odu_bridge_ctx->llv6_addr);
+
+ ODU_BRIDGE_FUNC_EXIT();
+
+ return 0;
+};
+
+static long odu_bridge_ioctl(struct file *filp,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ int res = 0;
+ struct in6_addr llv6_addr;
+
+ ODU_BRIDGE_DBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
+
+ if ((_IOC_TYPE(cmd) != ODU_BRIDGE_IOC_MAGIC) ||
+ (_IOC_NR(cmd) >= ODU_BRIDGE_IOCTL_MAX)) {
+ ODU_BRIDGE_ERR("Invalid ioctl\n");
+ return -ENOIOCTLCMD;
+ }
+
+ switch (cmd) {
+ case ODU_BRIDGE_IOC_SET_MODE:
+ ODU_BRIDGE_DBG("ODU_BRIDGE_IOC_SET_MODE ioctl called\n");
+ res = odu_bridge_set_mode(arg);
+ if (res) {
+ ODU_BRIDGE_ERR("Error, res = %d\n", res);
+ break;
+ }
+ break;
+
+ case ODU_BRIDGE_IOC_SET_LLV6_ADDR:
+ ODU_BRIDGE_DBG("ODU_BRIDGE_IOC_SET_LLV6_ADDR ioctl called\n");
+ res = copy_from_user(&llv6_addr,
+ (struct in6_addr *)arg,
+ sizeof(llv6_addr));
+ if (res) {
+ ODU_BRIDGE_ERR("Error, res = %d\n", res);
+ res = -EFAULT;
+ break;
+ }
+
+ res = odu_bridge_set_llv6_addr(&llv6_addr);
+ if (res) {
+ ODU_BRIDGE_ERR("Error, res = %d\n", res);
+ break;
+ }
+ break;
+
+ default:
+ ODU_BRIDGE_ERR("Unknown ioctl: %d\n", cmd);
+ WARN_ON(1);
+ }
+
+ return res;
+}
+
+#ifdef CONFIG_COMPAT
+static long compat_odu_bridge_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case ODU_BRIDGE_IOC_SET_LLV6_ADDR32:
+ cmd = ODU_BRIDGE_IOC_SET_LLV6_ADDR;
+ break;
+ case ODU_BRIDGE_IOC_SET_MODE:
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return odu_bridge_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *dent;
+static struct dentry *dfile_stats;
+static struct dentry *dfile_mode;
+
+static ssize_t odu_debugfs_stats(struct file *file,
+ char __user *ubuf,
+ size_t count,
+ loff_t *ppos)
+{
+ int nbytes = 0;
+
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ ODU_MAX_MSG_LEN - nbytes,
+ "UL packets: %lld\n",
+ odu_bridge_ctx->stats.num_ul_packets);
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ ODU_MAX_MSG_LEN - nbytes,
+ "DL packets: %lld\n",
+ odu_bridge_ctx->stats.num_dl_packets);
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ ODU_MAX_MSG_LEN - nbytes,
+ "LAN packets: %lld\n",
+ odu_bridge_ctx->stats.num_lan_packets);
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t odu_debugfs_hw_bridge_mode_write(struct file *file,
+ const char __user *ubuf,
+ size_t count,
+ loff_t *ppos)
+{
+ unsigned long missing;
+ enum odu_bridge_mode mode;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, ubuf, count);
+ if (missing)
+ return -EFAULT;
+
+ if (count > 0)
+ dbg_buff[count-1] = '\0';
+
+ if (strcmp(dbg_buff, "router") == 0) {
+ mode = ODU_BRIDGE_MODE_ROUTER;
+ } else if (strcmp(dbg_buff, "bridge") == 0) {
+ mode = ODU_BRIDGE_MODE_BRIDGE;
+ } else {
+ ODU_BRIDGE_ERR("Bad mode, got %s,\n"
+ "Use <router> or <bridge>.\n", dbg_buff);
+ return count;
+ }
+
+ odu_bridge_set_mode(mode);
+ return count;
+}
+
+static ssize_t odu_debugfs_hw_bridge_mode_read(struct file *file,
+ char __user *ubuf,
+ size_t count,
+ loff_t *ppos)
+{
+ int nbytes = 0;
+
+ switch (odu_bridge_ctx->mode) {
+ case ODU_BRIDGE_MODE_ROUTER:
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ ODU_MAX_MSG_LEN - nbytes,
+ "router\n");
+ break;
+ case ODU_BRIDGE_MODE_BRIDGE:
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ ODU_MAX_MSG_LEN - nbytes,
+ "bridge\n");
+ break;
+ default:
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ ODU_MAX_MSG_LEN - nbytes,
+ "mode error\n");
+ break;
+
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+const struct file_operations odu_stats_ops = {
+ .read = odu_debugfs_stats,
+};
+
+const struct file_operations odu_hw_bridge_mode_ops = {
+ .read = odu_debugfs_hw_bridge_mode_read,
+ .write = odu_debugfs_hw_bridge_mode_write,
+};
+
+static void odu_debugfs_init(void)
+{
+ const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+ const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+ S_IWUSR | S_IWGRP | S_IWOTH;
+
+ dent = debugfs_create_dir("odu_ipa_bridge", 0);
+ if (IS_ERR(dent)) {
+ ODU_BRIDGE_ERR("fail to create folder odu_ipa_bridge\n");
+ return;
+ }
+
+ dfile_stats =
+ debugfs_create_file("stats", read_only_mode, dent,
+ 0, &odu_stats_ops);
+ if (!dfile_stats || IS_ERR(dfile_stats)) {
+ ODU_BRIDGE_ERR("fail to create file stats\n");
+ goto fail;
+ }
+
+ dfile_mode =
+ debugfs_create_file("mode", read_write_mode,
+ dent, 0, &odu_hw_bridge_mode_ops);
+ if (!dfile_mode ||
+ IS_ERR(dfile_mode)) {
+ ODU_BRIDGE_ERR("fail to create file dfile_mode\n");
+ goto fail;
+ }
+
+ return;
+fail:
+ debugfs_remove_recursive(dent);
+}
+
+static void odu_debugfs_destroy(void)
+{
+ debugfs_remove_recursive(dent);
+}
+
+#else
+static void odu_debugfs_init(void) {}
+static void odu_debugfs_destroy(void) {}
+#endif /* CONFIG_DEBUG_FS */
+
+
+static const struct file_operations odu_bridge_drv_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = odu_bridge_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_odu_bridge_ioctl,
+#endif
+};
+
+/**
+ * odu_bridge_tx_dp() - Send skb to ODU bridge
+ * @skb: skb to send
+ * @metadata: metadata on packet
+ *
+ * This function handles uplink packet.
+ * In Router Mode:
+ * packet is sent directly to IPA.
+ * In Router Mode:
+ * packet is classified if it should arrive to network stack.
+ * QMI IP packet should arrive to APPS network stack
+ * IPv6 Multicast packet should arrive to APPS network stack and Q6
+ *
+ * Return codes: 0- success, error otherwise
+ */
+int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata)
+{
+ struct sk_buff *skb_copied = NULL;
+ struct ipv6hdr *ipv6hdr;
+ int res;
+
+ ODU_BRIDGE_FUNC_ENTRY();
+
+ switch (odu_bridge_ctx->mode) {
+ case ODU_BRIDGE_MODE_ROUTER:
+ /* Router mode - pass skb to IPA */
+ res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata);
+ if (res) {
+ ODU_BRIDGE_DBG("tx dp failed %d\n", res);
+ goto out;
+ }
+ odu_bridge_ctx->stats.num_ul_packets++;
+ goto out;
+
+ case ODU_BRIDGE_MODE_BRIDGE:
+ ipv6hdr = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+ if (ipv6hdr->version == 6 &&
+ ODU_BRIDGE_IS_QMI_ADDR(ipv6hdr->daddr)) {
+ ODU_BRIDGE_DBG_LOW("QMI packet\n");
+ skb_copied = skb_clone(skb, GFP_KERNEL);
+ if (!skb_copied) {
+ ODU_BRIDGE_ERR("No memory\n");
+ return -ENOMEM;
+ }
+ odu_bridge_ctx->tx_dp_notify(odu_bridge_ctx->priv,
+ IPA_RECEIVE,
+ (unsigned long)skb_copied);
+ odu_bridge_ctx->tx_dp_notify(odu_bridge_ctx->priv,
+ IPA_WRITE_DONE,
+ (unsigned long)skb);
+ odu_bridge_ctx->stats.num_ul_packets++;
+ odu_bridge_ctx->stats.num_lan_packets++;
+ res = 0;
+ goto out;
+ }
+
+ if (ipv6hdr->version == 6 &&
+ ipv6_addr_is_multicast(&ipv6hdr->daddr)) {
+ ODU_BRIDGE_DBG_LOW(
+ "Multicast pkt, send to APPS and IPA\n");
+ skb_copied = skb_clone(skb, GFP_KERNEL);
+ if (!skb_copied) {
+ ODU_BRIDGE_ERR("No memory\n");
+ return -ENOMEM;
+ }
+
+ res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata);
+ if (res) {
+ ODU_BRIDGE_DBG("tx dp failed %d\n", res);
+ dev_kfree_skb(skb_copied);
+ goto out;
+ }
+
+ odu_bridge_ctx->tx_dp_notify(odu_bridge_ctx->priv,
+ IPA_RECEIVE,
+ (unsigned long)skb_copied);
+ odu_bridge_ctx->stats.num_ul_packets++;
+ odu_bridge_ctx->stats.num_lan_packets++;
+ goto out;
+ }
+
+ res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata);
+ if (res) {
+ ODU_BRIDGE_DBG("tx dp failed %d\n", res);
+ goto out;
+ }
+ odu_bridge_ctx->stats.num_ul_packets++;
+ goto out;
+
+ default:
+ ODU_BRIDGE_ERR("Unsupported mode: %d\n", odu_bridge_ctx->mode);
+ WARN_ON(1);
+ res = -EFAULT;
+
+ }
+out:
+ ODU_BRIDGE_FUNC_EXIT();
+ return res;
+}
+EXPORT_SYMBOL(odu_bridge_tx_dp);
+
+static int odu_bridge_add_hdrs(void)
+{
+ struct ipa_ioc_add_hdr *hdrs;
+ struct ipa_hdr_add *ipv4_hdr;
+ struct ipa_hdr_add *ipv6_hdr;
+ struct ethhdr *eth_ipv4;
+ struct ethhdr *eth_ipv6;
+ int res;
+
+ ODU_BRIDGE_FUNC_ENTRY();
+ hdrs = kzalloc(sizeof(*hdrs) + sizeof(*ipv4_hdr) + sizeof(*ipv6_hdr),
+ GFP_KERNEL);
+ if (!hdrs) {
+ ODU_BRIDGE_ERR("no mem\n");
+ res = -ENOMEM;
+ goto out;
+ }
+ ipv4_hdr = &hdrs->hdr[0];
+ eth_ipv4 = (struct ethhdr *)(ipv4_hdr->hdr);
+ ipv6_hdr = &hdrs->hdr[1];
+ eth_ipv6 = (struct ethhdr *)(ipv6_hdr->hdr);
+ strlcpy(ipv4_hdr->name, ODU_BRIDGE_IPV4_HDR_NAME,
+ IPA_RESOURCE_NAME_MAX);
+ memcpy(eth_ipv4->h_source, odu_bridge_ctx->device_ethaddr, ETH_ALEN);
+ eth_ipv4->h_proto = htons(ETH_P_IP);
+ ipv4_hdr->hdr_len = ETH_HLEN;
+ ipv4_hdr->is_partial = 1;
+ ipv4_hdr->is_eth2_ofst_valid = 1;
+ ipv4_hdr->eth2_ofst = 0;
+ strlcpy(ipv6_hdr->name, ODU_BRIDGE_IPV6_HDR_NAME,
+ IPA_RESOURCE_NAME_MAX);
+ memcpy(eth_ipv6->h_source, odu_bridge_ctx->device_ethaddr, ETH_ALEN);
+ eth_ipv6->h_proto = htons(ETH_P_IPV6);
+ ipv6_hdr->hdr_len = ETH_HLEN;
+ ipv6_hdr->is_partial = 1;
+ ipv6_hdr->is_eth2_ofst_valid = 1;
+ ipv6_hdr->eth2_ofst = 0;
+ hdrs->commit = 1;
+ hdrs->num_hdrs = 2;
+ res = ipa_add_hdr(hdrs);
+ if (res) {
+ ODU_BRIDGE_ERR("Fail on Header-Insertion(%d)\n", res);
+ goto out_free_mem;
+ }
+ if (ipv4_hdr->status) {
+ ODU_BRIDGE_ERR("Fail on Header-Insertion ipv4(%d)\n",
+ ipv4_hdr->status);
+ res = ipv4_hdr->status;
+ goto out_free_mem;
+ }
+ if (ipv6_hdr->status) {
+ ODU_BRIDGE_ERR("Fail on Header-Insertion ipv6(%d)\n",
+ ipv6_hdr->status);
+ res = ipv6_hdr->status;
+ goto out_free_mem;
+ }
+ odu_bridge_ctx->odu_br_ipv4_hdr_hdl = ipv4_hdr->hdr_hdl;
+ odu_bridge_ctx->odu_br_ipv6_hdr_hdl = ipv6_hdr->hdr_hdl;
+
+ res = 0;
+out_free_mem:
+ kfree(hdrs);
+out:
+ ODU_BRIDGE_FUNC_EXIT();
+ return res;
+}
+
+static void odu_bridge_del_hdrs(void)
+{
+ struct ipa_ioc_del_hdr *del_hdr;
+ struct ipa_hdr_del *ipv4;
+ struct ipa_hdr_del *ipv6;
+ int result;
+
+ del_hdr = kzalloc(sizeof(*del_hdr) + sizeof(*ipv4) +
+ sizeof(*ipv6), GFP_KERNEL);
+ if (!del_hdr)
+ return;
+ del_hdr->commit = 1;
+ del_hdr->num_hdls = 2;
+ ipv4 = &del_hdr->hdl[0];
+ ipv4->hdl = odu_bridge_ctx->odu_br_ipv4_hdr_hdl;
+ ipv6 = &del_hdr->hdl[1];
+ ipv6->hdl = odu_bridge_ctx->odu_br_ipv6_hdr_hdl;
+ result = ipa_del_hdr(del_hdr);
+ if (result || ipv4->status || ipv6->status)
+ ODU_BRIDGE_ERR("ipa_del_hdr failed");
+ kfree(del_hdr);
+}
+
+/**
+ * odu_bridge_register_properties() - set Tx/Rx properties for ipacm
+ *
+ * Register the network interface interface with Tx and Rx properties
+ * Tx properties are for data flowing from IPA to adapter, they
+ * have Header-Insertion properties both for Ipv4 and Ipv6 Ethernet framing.
+ * Rx properties are for data flowing from adapter to IPA, they have
+ * simple rule which always "hit".
+ *
+ */
+static int odu_bridge_register_properties(void)
+{
+ struct ipa_tx_intf tx_properties = {0};
+ struct ipa_ioc_tx_intf_prop properties[2] = { {0}, {0} };
+ struct ipa_ioc_tx_intf_prop *ipv4_property;
+ struct ipa_ioc_tx_intf_prop *ipv6_property;
+ struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
+ struct ipa_rx_intf rx_properties = {0};
+ struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
+ struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
+ int res = 0;
+
+ ODU_BRIDGE_FUNC_ENTRY();
+
+ tx_properties.prop = properties;
+ ipv4_property = &tx_properties.prop[0];
+ ipv4_property->ip = IPA_IP_v4;
+ ipv4_property->dst_pipe = IPA_CLIENT_ODU_EMB_CONS;
+ ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+ strlcpy(ipv4_property->hdr_name, ODU_BRIDGE_IPV4_HDR_NAME,
+ IPA_RESOURCE_NAME_MAX);
+ ipv6_property = &tx_properties.prop[1];
+ ipv6_property->ip = IPA_IP_v6;
+ ipv6_property->dst_pipe = IPA_CLIENT_ODU_EMB_CONS;
+ ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+ strlcpy(ipv6_property->hdr_name, ODU_BRIDGE_IPV6_HDR_NAME,
+ IPA_RESOURCE_NAME_MAX);
+ tx_properties.num_props = 2;
+
+ rx_properties.prop = rx_ioc_properties;
+ rx_ipv4_property = &rx_properties.prop[0];
+ rx_ipv4_property->ip = IPA_IP_v4;
+ rx_ipv4_property->attrib.attrib_mask = 0;
+ rx_ipv4_property->src_pipe = IPA_CLIENT_ODU_PROD;
+ rx_ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+ rx_ipv6_property = &rx_properties.prop[1];
+ rx_ipv6_property->ip = IPA_IP_v6;
+ rx_ipv6_property->attrib.attrib_mask = 0;
+ rx_ipv6_property->src_pipe = IPA_CLIENT_ODU_PROD;
+ rx_ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+ rx_properties.num_props = 2;
+
+ res = ipa_register_intf(odu_bridge_ctx->netdev_name, &tx_properties,
+ &rx_properties);
+ if (res) {
+ ODU_BRIDGE_ERR("fail on Tx/Rx properties registration %d\n",
+ res);
+ }
+
+ ODU_BRIDGE_FUNC_EXIT();
+
+ return res;
+}
+
+static void odu_bridge_deregister_properties(void)
+{
+ int res;
+
+ ODU_BRIDGE_FUNC_ENTRY();
+ res = ipa_deregister_intf(odu_bridge_ctx->netdev_name);
+ if (res)
+ ODU_BRIDGE_ERR("Fail on Tx prop deregister %d\n", res);
+ ODU_BRIDGE_FUNC_EXIT();
+}
+
+/**
+ * odu_bridge_init() - Initialize the ODU bridge driver
+ * @params: initialization parameters
+ *
+ * This function initialize all bridge internal data and register odu bridge to
+ * kernel for IOCTL and debugfs.
+ * Header addition and properties are registered to IPA driver.
+ *
+ * Return codes: 0: success,
+ * -EINVAL - Bad parameter
+ * Other negative value - Failure
+ */
+int odu_bridge_init(struct odu_bridge_params *params)
+{
+ int res;
+
+ ODU_BRIDGE_FUNC_ENTRY();
+
+ if (!params) {
+ ODU_BRIDGE_ERR("null pointer params\n");
+ return -EINVAL;
+ }
+ if (!params->netdev_name) {
+ ODU_BRIDGE_ERR("null pointer params->netdev_name\n");
+ return -EINVAL;
+ }
+ if (!params->tx_dp_notify) {
+ ODU_BRIDGE_ERR("null pointer params->tx_dp_notify\n");
+ return -EINVAL;
+ }
+ if (!params->send_dl_skb) {
+ ODU_BRIDGE_ERR("null pointer params->send_dl_skb\n");
+ return -EINVAL;
+ }
+ if (odu_bridge_ctx) {
+ ODU_BRIDGE_ERR("Already initialized\n");
+ return -EFAULT;
+ }
+ if (!ipa_is_ready()) {
+ ODU_BRIDGE_ERR("IPA is not ready\n");
+ return -EFAULT;
+ }
+
+ ODU_BRIDGE_DBG("device_ethaddr=%pM\n", params->device_ethaddr);
+
+ odu_bridge_ctx = kzalloc(sizeof(*odu_bridge_ctx), GFP_KERNEL);
+ if (!odu_bridge_ctx) {
+ ODU_BRIDGE_ERR("kzalloc err.\n");
+ return -ENOMEM;
+ }
+
+ odu_bridge_ctx->class = class_create(THIS_MODULE, ODU_BRIDGE_DRV_NAME);
+ if (!odu_bridge_ctx->class) {
+ ODU_BRIDGE_ERR("Class_create err.\n");
+ res = -ENODEV;
+ goto fail_class_create;
+ }
+
+ res = alloc_chrdev_region(&odu_bridge_ctx->dev_num, 0, 1,
+ ODU_BRIDGE_DRV_NAME);
+ if (res) {
+ ODU_BRIDGE_ERR("alloc_chrdev_region err.\n");
+ res = -ENODEV;
+ goto fail_alloc_chrdev_region;
+ }
+
+ odu_bridge_ctx->dev = device_create(odu_bridge_ctx->class, NULL,
+ odu_bridge_ctx->dev_num, odu_bridge_ctx, ODU_BRIDGE_DRV_NAME);
+ if (IS_ERR(odu_bridge_ctx->dev)) {
+ ODU_BRIDGE_ERR(":device_create err.\n");
+ res = -ENODEV;
+ goto fail_device_create;
+ }
+
+ cdev_init(&odu_bridge_ctx->cdev, &odu_bridge_drv_fops);
+ odu_bridge_ctx->cdev.owner = THIS_MODULE;
+ odu_bridge_ctx->cdev.ops = &odu_bridge_drv_fops;
+
+ res = cdev_add(&odu_bridge_ctx->cdev, odu_bridge_ctx->dev_num, 1);
+ if (res) {
+ ODU_BRIDGE_ERR(":cdev_add err=%d\n", -res);
+ res = -ENODEV;
+ goto fail_cdev_add;
+ }
+
+ odu_debugfs_init();
+
+ strlcpy(odu_bridge_ctx->netdev_name, params->netdev_name,
+ IPA_RESOURCE_NAME_MAX);
+ odu_bridge_ctx->priv = params->priv;
+ odu_bridge_ctx->tx_dp_notify = params->tx_dp_notify;
+ odu_bridge_ctx->send_dl_skb = params->send_dl_skb;
+ memcpy(odu_bridge_ctx->device_ethaddr, params->device_ethaddr,
+ ETH_ALEN);
+ odu_bridge_ctx->ipa_sys_desc_size = params->ipa_desc_size;
+ odu_bridge_ctx->mode = ODU_BRIDGE_MODE_ROUTER;
+
+ mutex_init(&odu_bridge_ctx->lock);
+
+ res = odu_bridge_add_hdrs();
+ if (res) {
+ ODU_BRIDGE_ERR("fail on odu_bridge_add_hdr %d\n", res);
+ goto fail_add_hdrs;
+ }
+
+ res = odu_bridge_register_properties();
+ if (res) {
+ ODU_BRIDGE_ERR("fail on register properties %d\n", res);
+ goto fail_register_properties;
+ }
+
+ ODU_BRIDGE_FUNC_EXIT();
+ return 0;
+
+fail_register_properties:
+ odu_bridge_del_hdrs();
+fail_add_hdrs:
+ odu_debugfs_destroy();
+fail_cdev_add:
+ device_destroy(odu_bridge_ctx->class, odu_bridge_ctx->dev_num);
+fail_device_create:
+ unregister_chrdev_region(odu_bridge_ctx->dev_num, 1);
+fail_alloc_chrdev_region:
+ class_destroy(odu_bridge_ctx->class);
+fail_class_create:
+ kfree(odu_bridge_ctx);
+ odu_bridge_ctx = NULL;
+ return res;
+}
+EXPORT_SYMBOL(odu_bridge_init);
+
+/**
+ * odu_bridge_cleanup() - De-Initialize the ODU bridge driver
+ *
+ * Return codes: 0: success,
+ * -EINVAL - Bad parameter
+ * Other negative value - Failure
+ */
+int odu_bridge_cleanup(void)
+{
+ ODU_BRIDGE_FUNC_ENTRY();
+
+ if (!odu_bridge_ctx) {
+ ODU_BRIDGE_ERR("Not initialized\n");
+ return -EFAULT;
+ }
+
+ if (odu_bridge_ctx->is_connected) {
+ ODU_BRIDGE_ERR("cannot deinit while bridge is conncetd\n");
+ return -EFAULT;
+ }
+
+ odu_bridge_deregister_properties();
+ odu_bridge_del_hdrs();
+ odu_debugfs_destroy();
+ cdev_del(&odu_bridge_ctx->cdev);
+ device_destroy(odu_bridge_ctx->class, odu_bridge_ctx->dev_num);
+ unregister_chrdev_region(odu_bridge_ctx->dev_num, 1);
+ class_destroy(odu_bridge_ctx->class);
+ ipc_log_context_destroy(odu_bridge_ctx->logbuf);
+ ipc_log_context_destroy(odu_bridge_ctx->logbuf_low);
+ kfree(odu_bridge_ctx);
+ odu_bridge_ctx = NULL;
+
+ ODU_BRIDGE_FUNC_EXIT();
+ return 0;
+}
+EXPORT_SYMBOL(odu_bridge_cleanup);
+
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ODU bridge driver");
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
new file mode 100644
index 0000000..981129e
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -0,0 +1,383 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa_mhi.h>
+#include <linux/ipa_qmi_service_v01.h>
+
+#ifndef _IPA_COMMON_I_H_
+#define _IPA_COMMON_I_H_
+#include <linux/ipc_logging.h>
+#include <linux/ipa.h>
+#include <linux/ipa_uc_offload.h>
+
+#define __FILENAME__ \
+ (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
+
+#define IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client) \
+ log_info.file = __FILENAME__; \
+ log_info.line = __LINE__; \
+ log_info.type = EP; \
+ log_info.id_string = ipa_clients_strings[client]
+
+#define IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info) \
+ log_info.file = __FILENAME__; \
+ log_info.line = __LINE__; \
+ log_info.type = SIMPLE; \
+ log_info.id_string = __func__
+
+#define IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name) \
+ log_info.file = __FILENAME__; \
+ log_info.line = __LINE__; \
+ log_info.type = RESOURCE; \
+ log_info.id_string = resource_name
+
+#define IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str) \
+ log_info.file = __FILENAME__; \
+ log_info.line = __LINE__; \
+ log_info.type = SPECIAL; \
+ log_info.id_string = id_str
+
+#define IPA_ACTIVE_CLIENTS_INC_EP(client) \
+ do { \
+ struct ipa_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \
+ ipa_inc_client_enable_clks(&log_info); \
+ } while (0)
+
+#define IPA_ACTIVE_CLIENTS_DEC_EP(client) \
+ do { \
+ struct ipa_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \
+ ipa_dec_client_disable_clks(&log_info); \
+ } while (0)
+
+#define IPA_ACTIVE_CLIENTS_INC_SIMPLE() \
+ do { \
+ struct ipa_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \
+ ipa_inc_client_enable_clks(&log_info); \
+ } while (0)
+
+#define IPA_ACTIVE_CLIENTS_DEC_SIMPLE() \
+ do { \
+ struct ipa_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \
+ ipa_dec_client_disable_clks(&log_info); \
+ } while (0)
+
+#define IPA_ACTIVE_CLIENTS_INC_RESOURCE(resource_name) \
+ do { \
+ struct ipa_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \
+ ipa_inc_client_enable_clks(&log_info); \
+ } while (0)
+
+#define IPA_ACTIVE_CLIENTS_DEC_RESOURCE(resource_name) \
+ do { \
+ struct ipa_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \
+ ipa_dec_client_disable_clks(&log_info); \
+ } while (0)
+
+#define IPA_ACTIVE_CLIENTS_INC_SPECIAL(id_str) \
+ do { \
+ struct ipa_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \
+ ipa_inc_client_enable_clks(&log_info); \
+ } while (0)
+
+#define IPA_ACTIVE_CLIENTS_DEC_SPECIAL(id_str) \
+ do { \
+ struct ipa_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \
+ ipa_dec_client_disable_clks(&log_info); \
+ } while (0)
+
+#define ipa_assert_on(condition)\
+do {\
+ if (unlikely(condition))\
+ ipa_assert();\
+} while (0)
+
+#define IPA_CLIENT_IS_PROD(x) (x >= IPA_CLIENT_PROD && x < IPA_CLIENT_CONS)
+#define IPA_CLIENT_IS_CONS(x) (x >= IPA_CLIENT_CONS && x < IPA_CLIENT_MAX)
+
+#define IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC (1000)
+#define IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC (2000)
+
+enum ipa_active_client_log_type {
+ EP,
+ SIMPLE,
+ RESOURCE,
+ SPECIAL,
+ INVALID
+};
+
+struct ipa_active_client_logging_info {
+ const char *id_string;
+ char *file;
+ int line;
+ enum ipa_active_client_log_type type;
+};
+
+/**
+ * struct ipa_mem_buffer - IPA memory buffer
+ * @base: base
+ * @phys_base: physical base address
+ * @size: size of memory buffer
+ */
+struct ipa_mem_buffer {
+ void *base;
+ dma_addr_t phys_base;
+ u32 size;
+};
+
+#define IPA_MHI_GSI_ER_START 10
+#define IPA_MHI_GSI_ER_END 16
+
+/**
+ * enum ipa3_mhi_burst_mode - MHI channel burst mode state
+ *
+ * Values are according to MHI specification
+ * @IPA_MHI_BURST_MODE_DEFAULT: burst mode enabled for HW channels,
+ * disabled for SW channels
+ * @IPA_MHI_BURST_MODE_RESERVED:
+ * @IPA_MHI_BURST_MODE_DISABLE: Burst mode is disabled for this channel
+ * @IPA_MHI_BURST_MODE_ENABLE: Burst mode is enabled for this channel
+ *
+ */
+enum ipa3_mhi_burst_mode {
+ IPA_MHI_BURST_MODE_DEFAULT,
+ IPA_MHI_BURST_MODE_RESERVED,
+ IPA_MHI_BURST_MODE_DISABLE,
+ IPA_MHI_BURST_MODE_ENABLE,
+};
+
+/**
+ * enum ipa_hw_mhi_channel_states - MHI channel state machine
+ *
+ * Values are according to MHI specification
+ * @IPA_HW_MHI_CHANNEL_STATE_DISABLE: Channel is disabled and not processed by
+ * the host or device.
+ * @IPA_HW_MHI_CHANNEL_STATE_ENABLE: A channel is enabled after being
+ * initialized and configured by host, including its channel context and
+ * associated transfer ring. While this state, the channel is not active
+ * and the device does not process transfer.
+ * @IPA_HW_MHI_CHANNEL_STATE_RUN: The device processes transfers and doorbell
+ * for channels.
+ * @IPA_HW_MHI_CHANNEL_STATE_SUSPEND: Used to halt operations on the channel.
+ * The device does not process transfers for the channel in this state.
+ * This state is typically used to synchronize the transition to low power
+ * modes.
+ * @IPA_HW_MHI_CHANNEL_STATE_STOP: Used to halt operations on the channel.
+ * The device does not process transfers for the channel in this state.
+ * @IPA_HW_MHI_CHANNEL_STATE_ERROR: The device detected an error in an element
+ * from the transfer ring associated with the channel.
+ * @IPA_HW_MHI_CHANNEL_STATE_INVALID: Invalid state. Shall not be in use in
+ * operational scenario.
+ */
+enum ipa_hw_mhi_channel_states {
+ IPA_HW_MHI_CHANNEL_STATE_DISABLE = 0,
+ IPA_HW_MHI_CHANNEL_STATE_ENABLE = 1,
+ IPA_HW_MHI_CHANNEL_STATE_RUN = 2,
+ IPA_HW_MHI_CHANNEL_STATE_SUSPEND = 3,
+ IPA_HW_MHI_CHANNEL_STATE_STOP = 4,
+ IPA_HW_MHI_CHANNEL_STATE_ERROR = 5,
+ IPA_HW_MHI_CHANNEL_STATE_INVALID = 0xFF
+};
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO
+ * command. Parameters are sent as 32b immediate parameters.
+ * @isDlUlSyncEnabled: Flag to indicate if DL UL Syncronization is enabled
+ * @UlAccmVal: UL Timer Accumulation value (Period after which device will poll
+ * for UL data)
+ * @ulMsiEventThreshold: Threshold at which HW fires MSI to host for UL events
+ * @dlMsiEventThreshold: Threshold at which HW fires MSI to host for DL events
+ */
+union IpaHwMhiDlUlSyncCmdData_t {
+ struct IpaHwMhiDlUlSyncCmdParams_t {
+ u32 isDlUlSyncEnabled:8;
+ u32 UlAccmVal:8;
+ u32 ulMsiEventThreshold:8;
+ u32 dlMsiEventThreshold:8;
+ } params;
+ u32 raw32b;
+};
+
+struct ipa_mhi_ch_ctx {
+ u8 chstate;/*0-7*/
+ u8 brstmode:2;/*8-9*/
+ u8 pollcfg:6;/*10-15*/
+ u16 rsvd;/*16-31*/
+ u32 chtype;
+ u32 erindex;
+ u64 rbase;
+ u64 rlen;
+ u64 rp;
+ u64 wp;
+} __packed;
+
+struct ipa_mhi_ev_ctx {
+ u32 intmodc:16;
+ u32 intmodt:16;
+ u32 ertype;
+ u32 msivec;
+ u64 rbase;
+ u64 rlen;
+ u64 rp;
+ u64 wp;
+} __packed;
+
+struct ipa_mhi_init_uc_engine {
+ struct ipa_mhi_msi_info *msi;
+ u32 mmio_addr;
+ u32 host_ctrl_addr;
+ u32 host_data_addr;
+ u32 first_ch_idx;
+ u32 first_er_idx;
+ union IpaHwMhiDlUlSyncCmdData_t *ipa_cached_dl_ul_sync_info;
+};
+
+struct ipa_mhi_init_gsi_engine {
+ u32 first_ch_idx;
+};
+
+struct ipa_mhi_init_engine {
+ struct ipa_mhi_init_uc_engine uC;
+ struct ipa_mhi_init_gsi_engine gsi;
+};
+
+struct start_gsi_channel {
+ enum ipa_hw_mhi_channel_states state;
+ struct ipa_mhi_msi_info *msi;
+ struct ipa_mhi_ev_ctx *ev_ctx_host;
+ u64 event_context_addr;
+ struct ipa_mhi_ch_ctx *ch_ctx_host;
+ u64 channel_context_addr;
+ void (*ch_err_cb)(struct gsi_chan_err_notify *notify);
+ void (*ev_err_cb)(struct gsi_evt_err_notify *notify);
+ void *channel;
+ bool assert_bit40;
+ struct gsi_mhi_channel_scratch *mhi;
+ unsigned long *cached_gsi_evt_ring_hdl;
+ uint8_t evchid;
+};
+
+struct start_uc_channel {
+ enum ipa_hw_mhi_channel_states state;
+ u8 index;
+ u8 id;
+};
+
+struct start_mhi_channel {
+ struct start_uc_channel uC;
+ struct start_gsi_channel gsi;
+};
+
+struct ipa_mhi_connect_params_internal {
+ struct ipa_sys_connect_params *sys;
+ u8 channel_id;
+ struct start_mhi_channel start;
+};
+
+/**
+ * struct ipa_hdr_offset_entry - IPA header offset entry
+ * @link: entry's link in global header offset entries list
+ * @offset: the offset
+ * @bin: bin
+ */
+struct ipa_hdr_offset_entry {
+ struct list_head link;
+ u32 offset;
+ u32 bin;
+};
+
+extern const char *ipa_clients_strings[];
+
+#define IPA_IPC_LOGGING(buf, fmt, args...) \
+ do { \
+ if (buf) \
+ ipc_log_string((buf), fmt, __func__, __LINE__, \
+ ## args); \
+ } while (0)
+
+void ipa_inc_client_enable_clks(struct ipa_active_client_logging_info *id);
+void ipa_dec_client_disable_clks(struct ipa_active_client_logging_info *id);
+int ipa_inc_client_enable_clks_no_block(
+ struct ipa_active_client_logging_info *id);
+int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource);
+int ipa_resume_resource(enum ipa_rm_resource_name name);
+int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource);
+int ipa_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+ u32 bandwidth_mbps);
+void *ipa_get_ipc_logbuf(void);
+void *ipa_get_ipc_logbuf_low(void);
+void ipa_assert(void);
+
+/* MHI */
+int ipa_mhi_init_engine(struct ipa_mhi_init_engine *params);
+int ipa_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
+ u32 *clnt_hdl);
+int ipa_disconnect_mhi_pipe(u32 clnt_hdl);
+bool ipa_mhi_stop_gsi_channel(enum ipa_client_type client);
+int ipa_qmi_enable_force_clear_datapath_send(
+ struct ipa_enable_force_clear_datapath_req_msg_v01 *req);
+int ipa_qmi_disable_force_clear_datapath_send(
+ struct ipa_disable_force_clear_datapath_req_msg_v01 *req);
+int ipa_generate_tag_process(void);
+int ipa_disable_sps_pipe(enum ipa_client_type client);
+int ipa_mhi_reset_channel_internal(enum ipa_client_type client);
+int ipa_mhi_start_channel_internal(enum ipa_client_type client);
+bool ipa_mhi_sps_channel_empty(enum ipa_client_type client);
+int ipa_mhi_resume_channels_internal(enum ipa_client_type client,
+ bool LPTransitionRejected, bool brstmode_enabled,
+ union __packed gsi_channel_scratch ch_scratch, u8 index);
+int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req);
+int ipa_mhi_query_ch_info(enum ipa_client_type client,
+ struct gsi_chan_info *ch_info);
+int ipa_mhi_destroy_channel(enum ipa_client_type client);
+int ipa_mhi_is_using_dma(bool *flag);
+const char *ipa_mhi_get_state_str(int state);
+
+/* MHI uC */
+int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd);
+int ipa_uc_mhi_init
+ (void (*ready_cb)(void), void (*wakeup_request_cb)(void));
+void ipa_uc_mhi_cleanup(void);
+int ipa_uc_mhi_reset_channel(int channelHandle);
+int ipa_uc_mhi_suspend_channel(int channelHandle);
+int ipa_uc_mhi_stop_event_update_channel(int channelHandle);
+int ipa_uc_mhi_print_stats(char *dbg_buff, int size);
+
+/* uC */
+int ipa_uc_state_check(void);
+
+/* general */
+void ipa_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb);
+void ipa_set_tag_process_before_gating(bool val);
+bool ipa_has_open_aggr_frame(enum ipa_client_type client);
+int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp);
+
+int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+
+u8 *ipa_write_64(u64 w, u8 *dest);
+u8 *ipa_write_32(u32 w, u8 *dest);
+u8 *ipa_write_16(u16 hw, u8 *dest);
+u8 *ipa_write_8(u8 b, u8 *dest);
+u8 *ipa_pad_to_64(u8 *dest);
+u8 *ipa_pad_to_32(u8 *dest);
+const char *ipa_get_version_string(enum ipa_hw_type ver);
+
+#endif /* _IPA_COMMON_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_rm.c b/drivers/platform/msm/ipa/ipa_rm.c
new file mode 100644
index 0000000..e01bb7e
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm.c
@@ -0,0 +1,1194 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/ipa.h>
+#include "ipa_rm_dependency_graph.h"
+#include "ipa_rm_i.h"
+#include "ipa_common_i.h"
+
+static const char *resource_name_to_str[IPA_RM_RESOURCE_MAX] = {
+ __stringify(IPA_RM_RESOURCE_Q6_PROD),
+ __stringify(IPA_RM_RESOURCE_USB_PROD),
+ __stringify(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD),
+ __stringify(IPA_RM_RESOURCE_HSIC_PROD),
+ __stringify(IPA_RM_RESOURCE_STD_ECM_PROD),
+ __stringify(IPA_RM_RESOURCE_RNDIS_PROD),
+ __stringify(IPA_RM_RESOURCE_WWAN_0_PROD),
+ __stringify(IPA_RM_RESOURCE_WLAN_PROD),
+ __stringify(IPA_RM_RESOURCE_ODU_ADAPT_PROD),
+ __stringify(IPA_RM_RESOURCE_MHI_PROD),
+ __stringify(IPA_RM_RESOURCE_Q6_CONS),
+ __stringify(IPA_RM_RESOURCE_USB_CONS),
+ __stringify(IPA_RM_RESOURCE_USB_DPL_CONS),
+ __stringify(IPA_RM_RESOURCE_HSIC_CONS),
+ __stringify(IPA_RM_RESOURCE_WLAN_CONS),
+ __stringify(IPA_RM_RESOURCE_APPS_CONS),
+ __stringify(IPA_RM_RESOURCE_ODU_ADAPT_CONS),
+ __stringify(IPA_RM_RESOURCE_MHI_CONS),
+};
+
+struct ipa_rm_profile_vote_type {
+ enum ipa_voltage_level volt[IPA_RM_RESOURCE_MAX];
+ enum ipa_voltage_level curr_volt;
+ u32 bw_prods[IPA_RM_RESOURCE_PROD_MAX];
+ u32 bw_cons[IPA_RM_RESOURCE_CONS_MAX];
+ u32 curr_bw;
+};
+
+struct ipa_rm_context_type {
+ struct ipa_rm_dep_graph *dep_graph;
+ struct workqueue_struct *ipa_rm_wq;
+ spinlock_t ipa_rm_lock;
+ struct ipa_rm_profile_vote_type prof_vote;
+};
+static struct ipa_rm_context_type *ipa_rm_ctx;
+
+struct ipa_rm_notify_ipa_work_type {
+ struct work_struct work;
+ enum ipa_voltage_level volt;
+ u32 bandwidth_mbps;
+};
+
+/**
+ * ipa_rm_create_resource() - create resource
+ * @create_params: [in] parameters needed
+ * for resource initialization
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * This function is called by IPA RM client to initialize client's resources.
+ * This API should be called before any other IPA RM API on a given resource
+ * name.
+ *
+ */
+int ipa_rm_create_resource(struct ipa_rm_create_params *create_params)
+{
+ struct ipa_rm_resource *resource;
+ unsigned long flags;
+ int result;
+
+ if (unlikely(!ipa_rm_ctx)) {
+ IPA_RM_ERR("IPA RM was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (!create_params) {
+ IPA_RM_ERR("invalid args\n");
+ return -EINVAL;
+ }
+ IPA_RM_DBG("%s\n", ipa_rm_resource_str(create_params->name));
+
+ if (create_params->floor_voltage < 0 ||
+ create_params->floor_voltage >= IPA_VOLTAGE_MAX) {
+ IPA_RM_ERR("invalid voltage %d\n",
+ create_params->floor_voltage);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+ if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ create_params->name,
+ &resource) == 0) {
+ IPA_RM_ERR("resource already exists\n");
+ result = -EEXIST;
+ goto bail;
+ }
+ result = ipa_rm_resource_create(create_params,
+ &resource);
+ if (result) {
+ IPA_RM_ERR("ipa_rm_resource_create() failed\n");
+ goto bail;
+ }
+ result = ipa_rm_dep_graph_add(ipa_rm_ctx->dep_graph, resource);
+ if (result) {
+ IPA_RM_ERR("ipa_rm_dep_graph_add() failed\n");
+ ipa_rm_resource_delete(resource);
+ goto bail;
+ }
+bail:
+ spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+ IPA_RM_DBG("EXIT with %d\n", result);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_rm_create_resource);
+
+/**
+ * ipa_rm_delete_resource() - delete resource
+ * @resource_name: name of resource to be deleted
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * This function is called by IPA RM client to delete client's resources.
+ *
+ */
+int ipa_rm_delete_resource(enum ipa_rm_resource_name resource_name)
+{
+ struct ipa_rm_resource *resource;
+ unsigned long flags;
+ int result;
+
+ if (unlikely(!ipa_rm_ctx)) {
+ IPA_RM_ERR("IPA RM was not initialized\n");
+ return -EINVAL;
+ }
+
+ IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name));
+ spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+ if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ resource_name,
+ &resource) != 0) {
+ IPA_RM_ERR("resource does not exist\n");
+ result = -EINVAL;
+ goto bail;
+ }
+ result = ipa_rm_resource_delete(resource);
+ if (result) {
+ IPA_RM_ERR("ipa_rm_resource_delete() failed\n");
+ goto bail;
+ }
+ result = ipa_rm_dep_graph_remove(ipa_rm_ctx->dep_graph,
+ resource_name);
+ if (result) {
+ IPA_RM_ERR("ipa_rm_dep_graph_remove() failed\n");
+ goto bail;
+ }
+bail:
+ spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+ IPA_RM_DBG("EXIT with %d\n", result);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_rm_delete_resource);
+
+static int _ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name,
+ bool userspace_dep)
+{
+ unsigned long flags;
+ int result;
+
+ if (unlikely(!ipa_rm_ctx)) {
+ IPA_RM_ERR("IPA RM was not initialized\n");
+ return -EINVAL;
+ }
+
+ IPA_RM_DBG("%s -> %s\n", ipa_rm_resource_str(resource_name),
+ ipa_rm_resource_str(depends_on_name));
+ spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+ result = ipa_rm_dep_graph_add_dependency(
+ ipa_rm_ctx->dep_graph,
+ resource_name,
+ depends_on_name,
+ userspace_dep);
+ spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+ IPA_RM_DBG("EXIT with %d\n", result);
+
+ return result;
+}
+
+/**
+ * ipa_rm_add_dependency() - create dependency between 2 resources
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: IPA_RM_RESORCE_GRANTED could be generated
+ * in case client registered with IPA RM
+ */
+int ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name)
+{
+ return _ipa_rm_add_dependency(resource_name, depends_on_name, false);
+}
+EXPORT_SYMBOL(ipa_rm_add_dependency);
+
+/**
+ * ipa_rm_add_dependency_from_ioctl() - create dependency between 2 resources
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * This function is expected to be called from IOCTL and the dependency will be
+ * marked as is was added by the userspace.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: IPA_RM_RESORCE_GRANTED could be generated
+ * in case client registered with IPA RM
+ */
+int ipa_rm_add_dependency_from_ioctl(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name)
+{
+ return _ipa_rm_add_dependency(resource_name, depends_on_name, true);
+}
+
+static int _ipa_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name,
+ bool userspsace_dep)
+{
+ int result;
+ struct ipa_rm_resource *consumer;
+ unsigned long time;
+ unsigned long flags;
+
+ if (unlikely(!ipa_rm_ctx)) {
+ IPA_RM_ERR("IPA RM was not initialized\n");
+ return -EINVAL;
+ }
+
+ IPA_RM_DBG("%s -> %s\n", ipa_rm_resource_str(resource_name),
+ ipa_rm_resource_str(depends_on_name));
+ spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+ result = ipa_rm_dep_graph_add_dependency(
+ ipa_rm_ctx->dep_graph,
+ resource_name,
+ depends_on_name,
+ userspsace_dep);
+ spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+ if (result == -EINPROGRESS) {
+ ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ depends_on_name,
+ &consumer);
+ IPA_RM_DBG("%s waits for GRANT of %s.\n",
+ ipa_rm_resource_str(resource_name),
+ ipa_rm_resource_str(depends_on_name));
+ time = wait_for_completion_timeout(
+ &((struct ipa_rm_resource_cons *)consumer)->
+ request_consumer_in_progress,
+ HZ);
+ result = 0;
+ if (!time) {
+ IPA_RM_ERR("TIMEOUT waiting for %s GRANT event.",
+ ipa_rm_resource_str(depends_on_name));
+ result = -ETIME;
+ }
+ IPA_RM_DBG("%s waited for %s GRANT %lu time.\n",
+ ipa_rm_resource_str(resource_name),
+ ipa_rm_resource_str(depends_on_name),
+ time);
+ }
+ IPA_RM_DBG("EXIT with %d\n", result);
+
+ return result;
+}
+/**
+ * ipa_rm_add_dependency_sync() - Create a dependency between 2 resources
+ * in a synchronized fashion. In case a producer resource is in GRANTED state
+ * and the newly added consumer resource is in RELEASED state, the consumer
+ * entity will be requested and the function will block until the consumer
+ * is granted.
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * This function is expected to be called from IOCTL and the dependency will be
+ * marked as is was added by the userspace.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: May block. See documentation above.
+ */
+int ipa_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name)
+{
+ return _ipa_rm_add_dependency_sync(resource_name, depends_on_name,
+ false);
+}
+EXPORT_SYMBOL(ipa_rm_add_dependency_sync);
+
+/**
+ * ipa_rm_add_dependency_sync_from_ioctl() - Create a dependency between 2
+ * resources in a synchronized fashion. In case a producer resource is in
+ * GRANTED state and the newly added consumer resource is in RELEASED state,
+ * the consumer entity will be requested and the function will block until
+ * the consumer is granted.
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: May block. See documentation above.
+ */
+int ipa_rm_add_dependency_sync_from_ioctl(
+ enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name)
+{
+ return _ipa_rm_add_dependency_sync(resource_name, depends_on_name,
+ true);
+}
+
+static int _ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name,
+ bool userspace_dep)
+{
+ unsigned long flags;
+ int result;
+
+ if (unlikely(!ipa_rm_ctx)) {
+ IPA_RM_ERR("IPA RM was not initialized\n");
+ return -EINVAL;
+ }
+
+ IPA_RM_DBG("%s -> %s\n", ipa_rm_resource_str(resource_name),
+ ipa_rm_resource_str(depends_on_name));
+ spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+ result = ipa_rm_dep_graph_delete_dependency(
+ ipa_rm_ctx->dep_graph,
+ resource_name,
+ depends_on_name,
+ userspace_dep);
+ spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+ IPA_RM_DBG("EXIT with %d\n", result);
+
+ return result;
+}
+
+/**
+ * ipa_rm_delete_dependency() - delete dependency between 2 resources
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: IPA_RM_RESORCE_GRANTED could be generated
+ * in case client registered with IPA RM
+ */
+int ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name)
+{
+ return _ipa_rm_delete_dependency(resource_name, depends_on_name, false);
+}
+EXPORT_SYMBOL(ipa_rm_delete_dependency);
+
+/**
+ * ipa_rm_delete_dependency_fron_ioctl() - delete dependency between 2 resources
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * This function is expected to be called from IOCTL and the dependency will be
+ * marked as is was added by the userspace.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: IPA_RM_RESORCE_GRANTED could be generated
+ * in case client registered with IPA RM
+ */
+int ipa_rm_delete_dependency_from_ioctl(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name)
+{
+ return _ipa_rm_delete_dependency(resource_name, depends_on_name, true);
+}
+
+/**
+ * ipa_rm_request_resource() - request resource
+ * @resource_name: [in] name of the requested resource
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * All registered callbacks are called with IPA_RM_RESOURCE_GRANTED
+ * on successful completion of this operation.
+ */
+int ipa_rm_request_resource(enum ipa_rm_resource_name resource_name)
+{
+ struct ipa_rm_resource *resource;
+ unsigned long flags;
+ int result;
+
+ if (unlikely(!ipa_rm_ctx)) {
+ IPA_RM_ERR("IPA RM was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
+ IPA_RM_ERR("can be called on PROD only\n");
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+ if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ resource_name,
+ &resource) != 0) {
+ IPA_RM_ERR("resource does not exists\n");
+ result = -EPERM;
+ goto bail;
+ }
+ result = ipa_rm_resource_producer_request(
+ (struct ipa_rm_resource_prod *)resource);
+
+bail:
+ spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_rm_request_resource);
+
+void delayed_release_work_func(struct work_struct *work)
+{
+ unsigned long flags;
+ struct ipa_rm_resource *resource;
+ struct ipa_rm_delayed_release_work_type *rwork = container_of(
+ to_delayed_work(work),
+ struct ipa_rm_delayed_release_work_type,
+ work);
+
+ if (!IPA_RM_RESORCE_IS_CONS(rwork->resource_name)) {
+ IPA_RM_ERR("can be called on CONS only\n");
+ kfree(rwork);
+ return;
+ }
+ spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+ if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ rwork->resource_name,
+ &resource) != 0) {
+ IPA_RM_ERR("resource does not exists\n");
+ goto bail;
+ }
+
+ ipa_rm_resource_consumer_release(
+ (struct ipa_rm_resource_cons *)resource, rwork->needed_bw,
+ rwork->dec_usage_count);
+
+bail:
+ spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+ kfree(rwork);
+
+}
+
+/**
+ * ipa_rm_request_resource_with_timer() - requests the specified consumer
+ * resource and releases it after 1 second
+ * @resource_name: name of the requested resource
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_request_resource_with_timer(enum ipa_rm_resource_name resource_name)
+{
+ unsigned long flags;
+ struct ipa_rm_resource *resource;
+ struct ipa_rm_delayed_release_work_type *release_work;
+ int result;
+
+ if (!IPA_RM_RESORCE_IS_CONS(resource_name)) {
+ IPA_RM_ERR("can be called on CONS only\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+ if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ resource_name,
+ &resource) != 0) {
+ IPA_RM_ERR("resource does not exists\n");
+ result = -EPERM;
+ goto bail;
+ }
+ result = ipa_rm_resource_consumer_request(
+ (struct ipa_rm_resource_cons *)resource, 0, false, true);
+ if (result != 0 && result != -EINPROGRESS) {
+ IPA_RM_ERR("consumer request returned error %d\n", result);
+ result = -EPERM;
+ goto bail;
+ }
+
+ release_work = kzalloc(sizeof(*release_work), GFP_ATOMIC);
+ if (!release_work) {
+ result = -ENOMEM;
+ goto bail;
+ }
+ release_work->resource_name = resource->name;
+ release_work->needed_bw = 0;
+ release_work->dec_usage_count = false;
+ INIT_DELAYED_WORK(&release_work->work, delayed_release_work_func);
+ schedule_delayed_work(&release_work->work,
+ msecs_to_jiffies(IPA_RM_RELEASE_DELAY_IN_MSEC));
+ result = 0;
+bail:
+ spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+
+ return result;
+}
+
+/**
+ * ipa_rm_release_resource() - release resource
+ * @resource_name: [in] name of the requested resource
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * All registered callbacks are called with IPA_RM_RESOURCE_RELEASED
+ * on successful completion of this operation.
+ */
+int ipa_rm_release_resource(enum ipa_rm_resource_name resource_name)
+{
+ unsigned long flags;
+ struct ipa_rm_resource *resource;
+ int result;
+
+ if (unlikely(!ipa_rm_ctx)) {
+ IPA_RM_ERR("IPA RM was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
+ IPA_RM_ERR("can be called on PROD only\n");
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+ if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ resource_name,
+ &resource) != 0) {
+ IPA_RM_ERR("resource does not exists\n");
+ result = -EPERM;
+ goto bail;
+ }
+ result = ipa_rm_resource_producer_release(
+ (struct ipa_rm_resource_prod *)resource);
+
+bail:
+ spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_rm_release_resource);
+
+/**
+ * ipa_rm_register() - register for event
+ * @resource_name: resource name
+ * @reg_params: [in] registration parameters
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Registration parameters provided here should be the same
+ * as provided later in ipa_rm_deregister() call.
+ */
+int ipa_rm_register(enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_register_params *reg_params)
+{
+ int result;
+ unsigned long flags;
+ struct ipa_rm_resource *resource;
+
+ IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name));
+
+ if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
+ IPA_RM_ERR("can be called on PROD only\n");
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+ if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ resource_name,
+ &resource) != 0) {
+ IPA_RM_ERR("resource does not exists\n");
+ result = -EPERM;
+ goto bail;
+ }
+ result = ipa_rm_resource_producer_register(
+ (struct ipa_rm_resource_prod *)resource,
+ reg_params,
+ true);
+bail:
+ spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+ IPA_RM_DBG("EXIT with %d\n", result);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_rm_register);
+
+/**
+ * ipa_rm_deregister() - cancel the registration
+ * @resource_name: resource name
+ * @reg_params: [in] registration parameters
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Registration parameters provided here should be the same
+ * as provided in ipa_rm_register() call.
+ */
+int ipa_rm_deregister(enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_register_params *reg_params)
+{
+ int result;
+ unsigned long flags;
+ struct ipa_rm_resource *resource;
+
+ IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name));
+
+ if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
+ IPA_RM_ERR("can be called on PROD only\n");
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+ if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ resource_name,
+ &resource) != 0) {
+ IPA_RM_ERR("resource does not exists\n");
+ result = -EPERM;
+ goto bail;
+ }
+ result = ipa_rm_resource_producer_deregister(
+ (struct ipa_rm_resource_prod *)resource,
+ reg_params);
+bail:
+ spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+ IPA_RM_DBG("EXIT with %d\n", result);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_rm_deregister);
+
+/**
+ * ipa_rm_set_perf_profile() - set performance profile
+ * @resource_name: resource name
+ * @profile: [in] profile information.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Set resource performance profile.
+ * Updates IPA driver if performance level changed.
+ */
+int ipa_rm_set_perf_profile(enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_perf_profile *profile)
+{
+ int result;
+ unsigned long flags;
+ struct ipa_rm_resource *resource;
+
+ if (unlikely(!ipa_rm_ctx)) {
+ IPA_RM_ERR("IPA RM was not initialized\n");
+ return -EINVAL;
+ }
+
+ IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name));
+ if (profile)
+ IPA_RM_DBG("BW: %d\n", profile->max_supported_bandwidth_mbps);
+
+ spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+ if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ resource_name,
+ &resource) != 0) {
+ IPA_RM_ERR("resource does not exists\n");
+ result = -EPERM;
+ goto bail;
+ }
+ result = ipa_rm_resource_set_perf_profile(resource, profile);
+ if (result) {
+ IPA_RM_ERR("ipa_rm_resource_set_perf_profile failed %d\n",
+ result);
+ goto bail;
+ }
+
+ result = 0;
+bail:
+ spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+ IPA_RM_DBG("EXIT with %d\n", result);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_rm_set_perf_profile);
+
+/**
+ * ipa_rm_notify_completion() -
+ * consumer driver notification for
+ * request_resource / release_resource operations
+ * completion
+ * @event: notified event
+ * @resource_name: resource name
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_notify_completion(enum ipa_rm_event event,
+ enum ipa_rm_resource_name resource_name)
+{
+ int result;
+
+ if (unlikely(!ipa_rm_ctx)) {
+ IPA_RM_ERR("IPA RM was not initialized\n");
+ return -EINVAL;
+ }
+
+ IPA_RM_DBG("event %d on %s\n", event,
+ ipa_rm_resource_str(resource_name));
+ if (!IPA_RM_RESORCE_IS_CONS(resource_name)) {
+ IPA_RM_ERR("can be called on CONS only\n");
+ result = -EINVAL;
+ goto bail;
+ }
+ ipa_rm_wq_send_cmd(IPA_RM_WQ_RESOURCE_CB,
+ resource_name,
+ event,
+ false);
+ result = 0;
+bail:
+ IPA_RM_DBG("EXIT with %d\n", result);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_rm_notify_completion);
+
+static void ipa_rm_wq_handler(struct work_struct *work)
+{
+ unsigned long flags;
+ struct ipa_rm_resource *resource;
+ struct ipa_rm_wq_work_type *ipa_rm_work =
+ container_of(work,
+ struct ipa_rm_wq_work_type,
+ work);
+ IPA_RM_DBG_LOW("%s cmd=%d event=%d notify_registered_only=%d\n",
+ ipa_rm_resource_str(ipa_rm_work->resource_name),
+ ipa_rm_work->wq_cmd,
+ ipa_rm_work->event,
+ ipa_rm_work->notify_registered_only);
+ switch (ipa_rm_work->wq_cmd) {
+ case IPA_RM_WQ_NOTIFY_PROD:
+ if (!IPA_RM_RESORCE_IS_PROD(ipa_rm_work->resource_name)) {
+ IPA_RM_ERR("resource is not PROD\n");
+ goto free_work;
+ }
+ spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+ if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ ipa_rm_work->resource_name,
+ &resource) != 0){
+ IPA_RM_ERR("resource does not exists\n");
+ spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+ goto free_work;
+ }
+ ipa_rm_resource_producer_notify_clients(
+ (struct ipa_rm_resource_prod *)resource,
+ ipa_rm_work->event,
+ ipa_rm_work->notify_registered_only);
+ spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+ break;
+ case IPA_RM_WQ_NOTIFY_CONS:
+ break;
+ case IPA_RM_WQ_RESOURCE_CB:
+ spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+ if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ ipa_rm_work->resource_name,
+ &resource) != 0){
+ IPA_RM_ERR("resource does not exists\n");
+ spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+ goto free_work;
+ }
+ ipa_rm_resource_consumer_handle_cb(
+ (struct ipa_rm_resource_cons *)resource,
+ ipa_rm_work->event);
+ spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+ break;
+ default:
+ break;
+ }
+
+free_work:
+ kfree((void *) work);
+}
+
+static void ipa_rm_wq_resume_handler(struct work_struct *work)
+{
+ unsigned long flags;
+ struct ipa_rm_resource *resource;
+ struct ipa_rm_wq_suspend_resume_work_type *ipa_rm_work =
+ container_of(work,
+ struct ipa_rm_wq_suspend_resume_work_type,
+ work);
+ IPA_RM_DBG_LOW("resume work handler: %s",
+ ipa_rm_resource_str(ipa_rm_work->resource_name));
+
+ if (!IPA_RM_RESORCE_IS_CONS(ipa_rm_work->resource_name)) {
+ IPA_RM_ERR("resource is not CONS\n");
+ return;
+ }
+ IPA_ACTIVE_CLIENTS_INC_RESOURCE(ipa_rm_resource_str(
+ ipa_rm_work->resource_name));
+ spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+ if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ ipa_rm_work->resource_name,
+ &resource) != 0){
+ IPA_RM_ERR("resource does not exists\n");
+ spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+ IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(
+ ipa_rm_work->resource_name));
+ goto bail;
+ }
+ ipa_rm_resource_consumer_request_work(
+ (struct ipa_rm_resource_cons *)resource,
+ ipa_rm_work->prev_state, ipa_rm_work->needed_bw, true);
+ spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+bail:
+ kfree(ipa_rm_work);
+}
+
+
+static void ipa_rm_wq_suspend_handler(struct work_struct *work)
+{
+ unsigned long flags;
+ struct ipa_rm_resource *resource;
+ struct ipa_rm_wq_suspend_resume_work_type *ipa_rm_work =
+ container_of(work,
+ struct ipa_rm_wq_suspend_resume_work_type,
+ work);
+ IPA_RM_DBG_LOW("suspend work handler: %s",
+ ipa_rm_resource_str(ipa_rm_work->resource_name));
+
+ if (!IPA_RM_RESORCE_IS_CONS(ipa_rm_work->resource_name)) {
+ IPA_RM_ERR("resource is not CONS\n");
+ return;
+ }
+ ipa_suspend_resource_sync(ipa_rm_work->resource_name);
+ spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+ if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ ipa_rm_work->resource_name,
+ &resource) != 0){
+ IPA_RM_ERR("resource does not exists\n");
+ spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+ return;
+ }
+ ipa_rm_resource_consumer_release_work(
+ (struct ipa_rm_resource_cons *)resource,
+ ipa_rm_work->prev_state,
+ true);
+ spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+
+ kfree(ipa_rm_work);
+}
+
+/**
+ * ipa_rm_wq_send_cmd() - send a command for deferred work
+ * @wq_cmd: command that should be executed
+ * @resource_name: resource on which command should be executed
+ * @notify_registered_only: notify only clients registered by
+ * ipa_rm_register()
+ *
+ * Returns: 0 on success, negative otherwise
+ */
+int ipa_rm_wq_send_cmd(enum ipa_rm_wq_cmd wq_cmd,
+ enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_event event,
+ bool notify_registered_only)
+{
+ int result = -ENOMEM;
+ struct ipa_rm_wq_work_type *work = kzalloc(sizeof(*work), GFP_ATOMIC);
+
+ if (work) {
+ INIT_WORK((struct work_struct *)work, ipa_rm_wq_handler);
+ work->wq_cmd = wq_cmd;
+ work->resource_name = resource_name;
+ work->event = event;
+ work->notify_registered_only = notify_registered_only;
+ result = queue_work(ipa_rm_ctx->ipa_rm_wq,
+ (struct work_struct *)work);
+ } else {
+ IPA_RM_ERR("no mem\n");
+ }
+
+ return result;
+}
+
+int ipa_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_state prev_state,
+ u32 needed_bw)
+{
+ int result = -ENOMEM;
+ struct ipa_rm_wq_suspend_resume_work_type *work = kzalloc(sizeof(*work),
+ GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *)work,
+ ipa_rm_wq_suspend_handler);
+ work->resource_name = resource_name;
+ work->prev_state = prev_state;
+ work->needed_bw = needed_bw;
+ result = queue_work(ipa_rm_ctx->ipa_rm_wq,
+ (struct work_struct *)work);
+ } else {
+ IPA_RM_ERR("no mem\n");
+ }
+
+ return result;
+}
+
+int ipa_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_state prev_state,
+ u32 needed_bw)
+{
+ int result = -ENOMEM;
+ struct ipa_rm_wq_suspend_resume_work_type *work = kzalloc(sizeof(*work),
+ GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *)work, ipa_rm_wq_resume_handler);
+ work->resource_name = resource_name;
+ work->prev_state = prev_state;
+ work->needed_bw = needed_bw;
+ result = queue_work(ipa_rm_ctx->ipa_rm_wq,
+ (struct work_struct *)work);
+ } else {
+ IPA_RM_ERR("no mem\n");
+ }
+
+ return result;
+}
+/**
+ * ipa_rm_initialize() - initialize IPA RM component
+ *
+ * Returns: 0 on success, negative otherwise
+ */
+int ipa_rm_initialize(void)
+{
+ int result;
+
+ ipa_rm_ctx = kzalloc(sizeof(*ipa_rm_ctx), GFP_KERNEL);
+ if (!ipa_rm_ctx) {
+ IPA_RM_ERR("no mem\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+ ipa_rm_ctx->ipa_rm_wq = create_singlethread_workqueue("ipa_rm_wq");
+ if (!ipa_rm_ctx->ipa_rm_wq) {
+ IPA_RM_ERR("create workqueue failed\n");
+ result = -ENOMEM;
+ goto create_wq_fail;
+ }
+ result = ipa_rm_dep_graph_create(&(ipa_rm_ctx->dep_graph));
+ if (result) {
+ IPA_RM_ERR("create dependency graph failed\n");
+ goto graph_alloc_fail;
+ }
+ spin_lock_init(&ipa_rm_ctx->ipa_rm_lock);
+ IPA_RM_DBG("SUCCESS\n");
+
+ return 0;
+graph_alloc_fail:
+ destroy_workqueue(ipa_rm_ctx->ipa_rm_wq);
+create_wq_fail:
+ kfree(ipa_rm_ctx);
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_stat() - print RM stat
+ * @buf: [in] The user buff used to print
+ * @size: [in] The size of buf
+ * Returns: number of bytes used on success, negative on failure
+ *
+ * This function is called by ipa_debugfs in order to receive
+ * a full picture of the current state of the RM
+ */
+
+int ipa_rm_stat(char *buf, int size)
+{
+ unsigned long flags;
+ int i, cnt = 0, result = EINVAL;
+ struct ipa_rm_resource *resource = NULL;
+ u32 sum_bw_prod = 0;
+ u32 sum_bw_cons = 0;
+
+ if (!buf || size < 0)
+ return result;
+
+ spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+ for (i = 0; i < IPA_RM_RESOURCE_PROD_MAX; ++i) {
+ result = ipa_rm_dep_graph_get_resource(
+ ipa_rm_ctx->dep_graph,
+ i,
+ &resource);
+ if (!result) {
+ result = ipa_rm_resource_producer_print_stat(
+ resource, buf + cnt,
+ size-cnt);
+ if (result < 0)
+ goto bail;
+ cnt += result;
+ }
+ }
+
+ for (i = 0; i < IPA_RM_RESOURCE_PROD_MAX; i++)
+ sum_bw_prod += ipa_rm_ctx->prof_vote.bw_prods[i];
+
+ for (i = 0; i < IPA_RM_RESOURCE_CONS_MAX; i++)
+ sum_bw_cons += ipa_rm_ctx->prof_vote.bw_cons[i];
+
+ result = scnprintf(buf + cnt, size - cnt,
+ "All prod bandwidth: %d, All cons bandwidth: %d\n",
+ sum_bw_prod, sum_bw_cons);
+ cnt += result;
+
+ result = scnprintf(buf + cnt, size - cnt,
+ "Voting: voltage %d, bandwidth %d\n",
+ ipa_rm_ctx->prof_vote.curr_volt,
+ ipa_rm_ctx->prof_vote.curr_bw);
+ cnt += result;
+
+ result = cnt;
+bail:
+ spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+
+ return result;
+}
+
+/**
+ * ipa_rm_resource_str() - returns string that represent the resource
+ * @resource_name: [in] resource name
+ */
+const char *ipa_rm_resource_str(enum ipa_rm_resource_name resource_name)
+{
+ if (resource_name < 0 || resource_name >= IPA_RM_RESOURCE_MAX)
+ return "INVALID RESOURCE";
+
+ return resource_name_to_str[resource_name];
+};
+
+static void ipa_rm_perf_profile_notify_to_ipa_work(struct work_struct *work)
+{
+ struct ipa_rm_notify_ipa_work_type *notify_work = container_of(work,
+ struct ipa_rm_notify_ipa_work_type,
+ work);
+ int res;
+
+ IPA_RM_DBG_LOW("calling to IPA driver. voltage %d bandwidth %d\n",
+ notify_work->volt, notify_work->bandwidth_mbps);
+
+ res = ipa_set_required_perf_profile(notify_work->volt,
+ notify_work->bandwidth_mbps);
+ if (res) {
+ IPA_RM_ERR("ipa_set_required_perf_profile failed %d\n", res);
+ goto bail;
+ }
+
+ IPA_RM_DBG_LOW("IPA driver notified\n");
+bail:
+ kfree(notify_work);
+}
+
+static void ipa_rm_perf_profile_notify_to_ipa(enum ipa_voltage_level volt,
+ u32 bandwidth)
+{
+ struct ipa_rm_notify_ipa_work_type *work;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ IPA_RM_ERR("no mem\n");
+ return;
+ }
+
+ INIT_WORK(&work->work, ipa_rm_perf_profile_notify_to_ipa_work);
+ work->volt = volt;
+ work->bandwidth_mbps = bandwidth;
+ queue_work(ipa_rm_ctx->ipa_rm_wq, &work->work);
+}
+
+/**
+ * ipa_rm_perf_profile_change() - change performance profile vote for resource
+ * @resource_name: [in] resource name
+ *
+ * change bandwidth and voltage vote based on resource state.
+ */
+void ipa_rm_perf_profile_change(enum ipa_rm_resource_name resource_name)
+{
+ enum ipa_voltage_level old_volt;
+ u32 *bw_ptr;
+ u32 old_bw;
+ struct ipa_rm_resource *resource;
+ int i;
+ u32 sum_bw_prod = 0;
+ u32 sum_bw_cons = 0;
+
+ IPA_RM_DBG_LOW("%s\n", ipa_rm_resource_str(resource_name));
+
+ if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ resource_name,
+ &resource) != 0) {
+ IPA_RM_ERR("resource does not exists\n");
+ WARN_ON(1);
+ return;
+ }
+
+ old_volt = ipa_rm_ctx->prof_vote.curr_volt;
+ old_bw = ipa_rm_ctx->prof_vote.curr_bw;
+
+ if (IPA_RM_RESORCE_IS_PROD(resource_name)) {
+ bw_ptr = &ipa_rm_ctx->prof_vote.bw_prods[resource_name];
+ } else if (IPA_RM_RESORCE_IS_CONS(resource_name)) {
+ bw_ptr = &ipa_rm_ctx->prof_vote.bw_cons[
+ resource_name - IPA_RM_RESOURCE_PROD_MAX];
+ } else {
+ IPA_RM_ERR("Invalid resource_name\n");
+ return;
+ }
+
+ switch (resource->state) {
+ case IPA_RM_GRANTED:
+ case IPA_RM_REQUEST_IN_PROGRESS:
+ IPA_RM_DBG_LOW("max_bw = %d, needed_bw = %d\n",
+ resource->max_bw, resource->needed_bw);
+ *bw_ptr = min(resource->max_bw, resource->needed_bw);
+ ipa_rm_ctx->prof_vote.volt[resource_name] =
+ resource->floor_voltage;
+ break;
+
+ case IPA_RM_RELEASE_IN_PROGRESS:
+ case IPA_RM_RELEASED:
+ *bw_ptr = 0;
+ ipa_rm_ctx->prof_vote.volt[resource_name] = 0;
+ break;
+
+ default:
+ IPA_RM_ERR("unknown state %d\n", resource->state);
+ WARN_ON(1);
+ return;
+ }
+ IPA_RM_DBG_LOW("resource bandwidth: %d voltage: %d\n", *bw_ptr,
+ resource->floor_voltage);
+
+ ipa_rm_ctx->prof_vote.curr_volt = IPA_VOLTAGE_UNSPECIFIED;
+ for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) {
+ if (ipa_rm_ctx->prof_vote.volt[i] >
+ ipa_rm_ctx->prof_vote.curr_volt) {
+ ipa_rm_ctx->prof_vote.curr_volt =
+ ipa_rm_ctx->prof_vote.volt[i];
+ }
+ }
+
+ for (i = 0; i < IPA_RM_RESOURCE_PROD_MAX; i++)
+ sum_bw_prod += ipa_rm_ctx->prof_vote.bw_prods[i];
+
+ for (i = 0; i < IPA_RM_RESOURCE_CONS_MAX; i++)
+ sum_bw_cons += ipa_rm_ctx->prof_vote.bw_cons[i];
+
+ IPA_RM_DBG_LOW("all prod bandwidth: %d all cons bandwidth: %d\n",
+ sum_bw_prod, sum_bw_cons);
+ ipa_rm_ctx->prof_vote.curr_bw = min(sum_bw_prod, sum_bw_cons);
+
+ if (ipa_rm_ctx->prof_vote.curr_volt == old_volt &&
+ ipa_rm_ctx->prof_vote.curr_bw == old_bw) {
+ IPA_RM_DBG_LOW("same voting\n");
+ return;
+ }
+
+ IPA_RM_DBG_LOW("new voting: voltage %d bandwidth %d\n",
+ ipa_rm_ctx->prof_vote.curr_volt,
+ ipa_rm_ctx->prof_vote.curr_bw);
+
+ ipa_rm_perf_profile_notify_to_ipa(ipa_rm_ctx->prof_vote.curr_volt,
+ ipa_rm_ctx->prof_vote.curr_bw);
+
+ return;
+};
+/**
+ * ipa_rm_exit() - free all IPA RM resources
+ */
+void ipa_rm_exit(void)
+{
+ IPA_RM_DBG("ENTER\n");
+ ipa_rm_dep_graph_delete(ipa_rm_ctx->dep_graph);
+ destroy_workqueue(ipa_rm_ctx->ipa_rm_wq);
+ kfree(ipa_rm_ctx);
+ ipa_rm_ctx = NULL;
+ IPA_RM_DBG("EXIT\n");
+}
diff --git a/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c
new file mode 100644
index 0000000..54cad88
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c
@@ -0,0 +1,251 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include "ipa_rm_dependency_graph.h"
+#include "ipa_rm_i.h"
+
+static int ipa_rm_dep_get_index(enum ipa_rm_resource_name resource_name)
+{
+ int resource_index = IPA_RM_INDEX_INVALID;
+
+ if (IPA_RM_RESORCE_IS_PROD(resource_name))
+ resource_index = ipa_rm_prod_index(resource_name);
+ else if (IPA_RM_RESORCE_IS_CONS(resource_name))
+ resource_index = ipa_rm_cons_index(resource_name);
+
+ return resource_index;
+}
+
+/**
+ * ipa_rm_dep_graph_create() - creates graph
+ * @dep_graph: [out] created dependency graph
+ *
+ * Returns: dependency graph on success, NULL on failure
+ */
+int ipa_rm_dep_graph_create(struct ipa_rm_dep_graph **dep_graph)
+{
+ int result = 0;
+
+ *dep_graph = kzalloc(sizeof(**dep_graph), GFP_KERNEL);
+ if (!*dep_graph) {
+ IPA_RM_ERR("no mem\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_dep_graph_delete() - destroyes the graph
+ * @graph: [in] dependency graph
+ *
+ * Frees all resources.
+ */
+void ipa_rm_dep_graph_delete(struct ipa_rm_dep_graph *graph)
+{
+ int resource_index;
+
+ if (!graph) {
+ IPA_RM_ERR("invalid params\n");
+ return;
+ }
+ for (resource_index = 0;
+ resource_index < IPA_RM_RESOURCE_MAX;
+ resource_index++)
+ kfree(graph->resource_table[resource_index]);
+ memset(graph->resource_table, 0, sizeof(graph->resource_table));
+}
+
+/**
+ * ipa_rm_dep_graph_get_resource() - provides a resource by name
+ * @graph: [in] dependency graph
+ * @name: [in] name of the resource
+ * @resource: [out] resource in case of success
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_dep_graph_get_resource(
+ struct ipa_rm_dep_graph *graph,
+ enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_resource **resource)
+{
+ int result;
+ int resource_index;
+
+ if (!graph) {
+ result = -EINVAL;
+ goto bail;
+ }
+ resource_index = ipa_rm_dep_get_index(resource_name);
+ if (resource_index == IPA_RM_INDEX_INVALID) {
+ result = -EINVAL;
+ goto bail;
+ }
+ *resource = graph->resource_table[resource_index];
+ if (!*resource) {
+ result = -EINVAL;
+ goto bail;
+ }
+ result = 0;
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_dep_graph_add() - adds resource to graph
+ * @graph: [in] dependency graph
+ * @resource: [in] resource to add
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_dep_graph_add(struct ipa_rm_dep_graph *graph,
+ struct ipa_rm_resource *resource)
+{
+ int result = 0;
+ int resource_index;
+
+ if (!graph || !resource) {
+ result = -EINVAL;
+ goto bail;
+ }
+ resource_index = ipa_rm_dep_get_index(resource->name);
+ if (resource_index == IPA_RM_INDEX_INVALID) {
+ result = -EINVAL;
+ goto bail;
+ }
+ graph->resource_table[resource_index] = resource;
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_dep_graph_remove() - removes resource from graph
+ * @graph: [in] dependency graph
+ * @resource: [in] resource to add
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_dep_graph_remove(struct ipa_rm_dep_graph *graph,
+ enum ipa_rm_resource_name resource_name)
+{
+ if (!graph)
+ return -EINVAL;
+ graph->resource_table[resource_name] = NULL;
+
+ return 0;
+}
+
+/**
+ * ipa_rm_dep_graph_add_dependency() - adds dependency between
+ * two nodes in graph
+ * @graph: [in] dependency graph
+ * @resource_name: [in] resource to add
+ * @depends_on_name: [in] resource to add
+ * @userspace_dep: [in] operation requested by userspace ?
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_dep_graph_add_dependency(struct ipa_rm_dep_graph *graph,
+ enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name,
+ bool userspace_dep)
+{
+ struct ipa_rm_resource *dependent = NULL;
+ struct ipa_rm_resource *dependency = NULL;
+ int result;
+
+ if (!graph ||
+ !IPA_RM_RESORCE_IS_PROD(resource_name) ||
+ !IPA_RM_RESORCE_IS_CONS(depends_on_name)) {
+ IPA_RM_ERR("invalid params\n");
+ result = -EINVAL;
+ goto bail;
+ }
+ if (ipa_rm_dep_graph_get_resource(graph,
+ resource_name,
+ &dependent)) {
+ IPA_RM_ERR("%s does not exist\n",
+ ipa_rm_resource_str(resource_name));
+ result = -EINVAL;
+ goto bail;
+ }
+ if (ipa_rm_dep_graph_get_resource(graph,
+ depends_on_name,
+ &dependency)) {
+ IPA_RM_ERR("%s does not exist\n",
+ ipa_rm_resource_str(depends_on_name));
+ result = -EINVAL;
+ goto bail;
+ }
+ result = ipa_rm_resource_add_dependency(dependent, dependency,
+ userspace_dep);
+bail:
+ IPA_RM_DBG("EXIT with %d\n", result);
+
+ return result;
+}
+
+/**
+ * ipa_rm_dep_graph_delete_dependency() - deleted dependency between
+ * two nodes in graph
+ * @graph: [in] dependency graph
+ * @resource_name: [in] resource to delete
+ * @depends_on_name: [in] resource to delete
+ * @userspace_dep: [in] operation requested by userspace ?
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa_rm_dep_graph_delete_dependency(struct ipa_rm_dep_graph *graph,
+ enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name,
+ bool userspace_dep)
+{
+ struct ipa_rm_resource *dependent = NULL;
+ struct ipa_rm_resource *dependency = NULL;
+ int result;
+
+ if (!graph ||
+ !IPA_RM_RESORCE_IS_PROD(resource_name) ||
+ !IPA_RM_RESORCE_IS_CONS(depends_on_name)) {
+ IPA_RM_ERR("invalid params\n");
+ result = -EINVAL;
+ goto bail;
+ }
+
+ if (ipa_rm_dep_graph_get_resource(graph,
+ resource_name,
+ &dependent)) {
+ IPA_RM_ERR("%s does not exist\n",
+ ipa_rm_resource_str(resource_name));
+ result = -EINVAL;
+ goto bail;
+ }
+
+ if (ipa_rm_dep_graph_get_resource(graph,
+ depends_on_name,
+ &dependency)) {
+ IPA_RM_ERR("%s does not exist\n",
+ ipa_rm_resource_str(depends_on_name));
+ result = -EINVAL;
+ goto bail;
+ }
+
+ result = ipa_rm_resource_delete_dependency(dependent, dependency,
+ userspace_dep);
+bail:
+ IPA_RM_DBG("EXIT with %d\n", result);
+
+ return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h
new file mode 100644
index 0000000..e322d81
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h
@@ -0,0 +1,49 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RM_DEPENDENCY_GRAPH_H_
+#define _IPA_RM_DEPENDENCY_GRAPH_H_
+
+#include <linux/list.h>
+#include <linux/ipa.h>
+#include "ipa_rm_resource.h"
+
+struct ipa_rm_dep_graph {
+ struct ipa_rm_resource *resource_table[IPA_RM_RESOURCE_MAX];
+};
+
+int ipa_rm_dep_graph_get_resource(
+ struct ipa_rm_dep_graph *graph,
+ enum ipa_rm_resource_name name,
+ struct ipa_rm_resource **resource);
+
+int ipa_rm_dep_graph_create(struct ipa_rm_dep_graph **dep_graph);
+
+void ipa_rm_dep_graph_delete(struct ipa_rm_dep_graph *graph);
+
+int ipa_rm_dep_graph_add(struct ipa_rm_dep_graph *graph,
+ struct ipa_rm_resource *resource);
+
+int ipa_rm_dep_graph_remove(struct ipa_rm_dep_graph *graph,
+ enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_dep_graph_add_dependency(struct ipa_rm_dep_graph *graph,
+ enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name,
+ bool userspsace_dep);
+
+int ipa_rm_dep_graph_delete_dependency(struct ipa_rm_dep_graph *graph,
+ enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name,
+ bool userspsace_dep);
+
+#endif /* _IPA_RM_DEPENDENCY_GRAPH_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_rm_i.h b/drivers/platform/msm/ipa/ipa_rm_i.h
new file mode 100644
index 0000000..eb86c54
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_i.h
@@ -0,0 +1,157 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RM_I_H_
+#define _IPA_RM_I_H_
+
+#include <linux/workqueue.h>
+#include <linux/ipa.h>
+#include "ipa_rm_resource.h"
+#include "ipa_common_i.h"
+
+#define IPA_RM_DRV_NAME "ipa_rm"
+
+#define IPA_RM_DBG_LOW(fmt, args...) \
+ do { \
+ pr_debug(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+ ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+#define IPA_RM_DBG(fmt, args...) \
+ do { \
+ pr_debug(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+ ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPA_RM_ERR(fmt, args...) \
+ do { \
+ pr_err(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+ ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPA_RM_RESOURCE_CONS_MAX \
+ (IPA_RM_RESOURCE_MAX - IPA_RM_RESOURCE_PROD_MAX)
+#define IPA_RM_RESORCE_IS_PROD(x) \
+ (x >= IPA_RM_RESOURCE_PROD && x < IPA_RM_RESOURCE_PROD_MAX)
+#define IPA_RM_RESORCE_IS_CONS(x) \
+ (x >= IPA_RM_RESOURCE_PROD_MAX && x < IPA_RM_RESOURCE_MAX)
+#define IPA_RM_INDEX_INVALID (-1)
+#define IPA_RM_RELEASE_DELAY_IN_MSEC 1000
+
+int ipa_rm_prod_index(enum ipa_rm_resource_name resource_name);
+int ipa_rm_cons_index(enum ipa_rm_resource_name resource_name);
+
+/**
+ * struct ipa_rm_delayed_release_work_type - IPA RM delayed resource release
+ * work type
+ * @delayed_work: work struct
+ * @ipa_rm_resource_name: name of the resource on which this work should be done
+ * @needed_bw: bandwidth required for resource in Mbps
+ * @dec_usage_count: decrease usage count on release ?
+ */
+struct ipa_rm_delayed_release_work_type {
+ struct delayed_work work;
+ enum ipa_rm_resource_name resource_name;
+ u32 needed_bw;
+ bool dec_usage_count;
+
+};
+
+/**
+ * enum ipa_rm_wq_cmd - workqueue commands
+ */
+enum ipa_rm_wq_cmd {
+ IPA_RM_WQ_NOTIFY_PROD,
+ IPA_RM_WQ_NOTIFY_CONS,
+ IPA_RM_WQ_RESOURCE_CB
+};
+
+/**
+ * struct ipa_rm_wq_work_type - IPA RM worqueue specific
+ * work type
+ * @work: work struct
+ * @wq_cmd: command that should be processed in workqueue context
+ * @resource_name: name of the resource on which this work
+ * should be done
+ * @dep_graph: data structure to search for resource if exists
+ * @event: event to notify
+ * @notify_registered_only: notify only clients registered by
+ * ipa_rm_register()
+ */
+struct ipa_rm_wq_work_type {
+ struct work_struct work;
+ enum ipa_rm_wq_cmd wq_cmd;
+ enum ipa_rm_resource_name resource_name;
+ enum ipa_rm_event event;
+ bool notify_registered_only;
+};
+
+/**
+ * struct ipa_rm_wq_suspend_resume_work_type - IPA RM worqueue resume or
+ * suspend work type
+ * @work: work struct
+ * @resource_name: name of the resource on which this work
+ * should be done
+ * @prev_state:
+ * @needed_bw:
+ */
+struct ipa_rm_wq_suspend_resume_work_type {
+ struct work_struct work;
+ enum ipa_rm_resource_name resource_name;
+ enum ipa_rm_resource_state prev_state;
+ u32 needed_bw;
+
+};
+
+int ipa_rm_wq_send_cmd(enum ipa_rm_wq_cmd wq_cmd,
+ enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_event event,
+ bool notify_registered_only);
+
+int ipa_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_state prev_state,
+ u32 needed_bw);
+
+int ipa_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_state prev_state,
+ u32 needed_bw);
+
+int ipa_rm_initialize(void);
+
+int ipa_rm_stat(char *buf, int size);
+
+const char *ipa_rm_resource_str(enum ipa_rm_resource_name resource_name);
+
+void ipa_rm_perf_profile_change(enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_request_resource_with_timer(enum ipa_rm_resource_name resource_name);
+
+void delayed_release_work_func(struct work_struct *work);
+
+int ipa_rm_add_dependency_from_ioctl(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name);
+
+int ipa_rm_delete_dependency_from_ioctl(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name);
+
+void ipa_rm_exit(void);
+
+#endif /* _IPA_RM_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c
new file mode 100644
index 0000000..8e33d71
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c
@@ -0,0 +1,273 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/unistd.h>
+#include <linux/workqueue.h>
+#include <linux/ipa.h>
+#include "ipa_rm_i.h"
+
+/**
+ * struct ipa_rm_it_private - IPA RM Inactivity Timer private
+ * data
+ * @initied: indicates if instance was initialized
+ * @lock - spinlock for mutual exclusion
+ * @resource_name - resource name
+ * @work: delayed work object for running delayed releas
+ * function
+ * @resource_requested: boolean flag indicates if resource was requested
+ * @reschedule_work: boolean flag indicates to not release and to
+ * reschedule the release work.
+ * @work_in_progress: boolean flag indicates is release work was scheduled.
+ * @jiffies: number of jiffies for timeout
+ *
+ * WWAN private - holds all relevant info about WWAN driver
+ */
+struct ipa_rm_it_private {
+ bool initied;
+ enum ipa_rm_resource_name resource_name;
+ spinlock_t lock;
+ struct delayed_work work;
+ bool resource_requested;
+ bool reschedule_work;
+ bool work_in_progress;
+ unsigned long jiffies;
+};
+
+static struct ipa_rm_it_private ipa_rm_it_handles[IPA_RM_RESOURCE_MAX];
+
+/**
+ * ipa_rm_inactivity_timer_func() - called when timer expired in
+ * the context of the shared workqueue. Checks internally if
+ * reschedule_work flag is set. In case it is not set this function calls to
+ * ipa_rm_release_resource(). In case reschedule_work is set this function
+ * reschedule the work. This flag is cleared cleared when
+ * calling to ipa_rm_inactivity_timer_release_resource().
+ *
+ * @work: work object provided by the work queue
+ *
+ * Return codes:
+ * None
+ */
+static void ipa_rm_inactivity_timer_func(struct work_struct *work)
+{
+
+ struct ipa_rm_it_private *me = container_of(to_delayed_work(work),
+ struct ipa_rm_it_private,
+ work);
+ unsigned long flags;
+
+ IPA_RM_DBG_LOW("%s: timer expired for resource %d!\n", __func__,
+ me->resource_name);
+
+ spin_lock_irqsave(
+ &ipa_rm_it_handles[me->resource_name].lock, flags);
+ if (ipa_rm_it_handles[me->resource_name].reschedule_work) {
+ IPA_RM_DBG_LOW("%s: setting delayed work\n", __func__);
+ ipa_rm_it_handles[me->resource_name].reschedule_work = false;
+ queue_delayed_work(system_unbound_wq,
+ &ipa_rm_it_handles[me->resource_name].work,
+ ipa_rm_it_handles[me->resource_name].jiffies);
+ } else if (ipa_rm_it_handles[me->resource_name].resource_requested) {
+ IPA_RM_DBG_LOW("%s: not calling release\n", __func__);
+ ipa_rm_it_handles[me->resource_name].work_in_progress = false;
+ } else {
+ IPA_RM_DBG_LOW("%s: calling release_resource on resource %d!\n",
+ __func__, me->resource_name);
+ ipa_rm_release_resource(me->resource_name);
+ ipa_rm_it_handles[me->resource_name].work_in_progress = false;
+ }
+ spin_unlock_irqrestore(
+ &ipa_rm_it_handles[me->resource_name].lock, flags);
+}
+
+/**
+* ipa_rm_inactivity_timer_init() - Init function for IPA RM
+* inactivity timer. This function shall be called prior calling
+* any other API of IPA RM inactivity timer.
+*
+* @resource_name: Resource name. @see ipa_rm.h
+* @msecs: time in miliseccond, that IPA RM inactivity timer
+* shall wait prior calling to ipa_rm_release_resource().
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int ipa_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
+ unsigned long msecs)
+{
+ IPA_RM_DBG_LOW("%s: resource %d\n", __func__, resource_name);
+
+ if (resource_name < 0 ||
+ resource_name >= IPA_RM_RESOURCE_MAX) {
+ IPA_RM_ERR("%s: Invalid parameter\n", __func__);
+ return -EINVAL;
+ }
+
+ if (ipa_rm_it_handles[resource_name].initied) {
+ IPA_RM_ERR("%s: resource %d already inited\n",
+ __func__, resource_name);
+ return -EINVAL;
+ }
+
+ spin_lock_init(&ipa_rm_it_handles[resource_name].lock);
+ ipa_rm_it_handles[resource_name].resource_name = resource_name;
+ ipa_rm_it_handles[resource_name].jiffies = msecs_to_jiffies(msecs);
+ ipa_rm_it_handles[resource_name].resource_requested = false;
+ ipa_rm_it_handles[resource_name].reschedule_work = false;
+ ipa_rm_it_handles[resource_name].work_in_progress = false;
+
+ INIT_DELAYED_WORK(&ipa_rm_it_handles[resource_name].work,
+ ipa_rm_inactivity_timer_func);
+ ipa_rm_it_handles[resource_name].initied = 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_rm_inactivity_timer_init);
+
+/**
+* ipa_rm_inactivity_timer_destroy() - De-Init function for IPA
+* RM inactivity timer.
+*
+* @resource_name: Resource name. @see ipa_rm.h
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int ipa_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name)
+{
+ IPA_RM_DBG_LOW("%s: resource %d\n", __func__, resource_name);
+
+ if (resource_name < 0 ||
+ resource_name >= IPA_RM_RESOURCE_MAX) {
+ IPA_RM_ERR("%s: Invalid parameter\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!ipa_rm_it_handles[resource_name].initied) {
+ IPA_RM_ERR("%s: resource %d already inited\n",
+ __func__, resource_name);
+ return -EINVAL;
+ }
+
+ cancel_delayed_work_sync(&ipa_rm_it_handles[resource_name].work);
+
+ memset(&ipa_rm_it_handles[resource_name], 0,
+ sizeof(struct ipa_rm_it_private));
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_rm_inactivity_timer_destroy);
+
+/**
+* ipa_rm_inactivity_timer_request_resource() - Same as
+* ipa_rm_request_resource(), with a difference that calling to
+* this function will also cancel the inactivity timer, if
+* ipa_rm_inactivity_timer_release_resource() was called earlier.
+*
+* @resource_name: Resource name. @see ipa_rm.h
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int ipa_rm_inactivity_timer_request_resource(
+ enum ipa_rm_resource_name resource_name)
+{
+ int ret;
+ unsigned long flags;
+
+ IPA_RM_DBG_LOW("%s: resource %d\n", __func__, resource_name);
+
+ if (resource_name < 0 ||
+ resource_name >= IPA_RM_RESOURCE_MAX) {
+ IPA_RM_ERR("%s: Invalid parameter\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!ipa_rm_it_handles[resource_name].initied) {
+ IPA_RM_ERR("%s: Not initialized\n", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags);
+ ipa_rm_it_handles[resource_name].resource_requested = true;
+ spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags);
+ ret = ipa_rm_request_resource(resource_name);
+ IPA_RM_DBG_LOW("%s: resource %d: returning %d\n", __func__,
+ resource_name, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_rm_inactivity_timer_request_resource);
+
+/**
+* ipa_rm_inactivity_timer_release_resource() - Sets the
+* inactivity timer to the timeout set by
+* ipa_rm_inactivity_timer_init(). When the timeout expires, IPA
+* RM inactivity timer will call to ipa_rm_release_resource().
+* If a call to ipa_rm_inactivity_timer_request_resource() was
+* made BEFORE the timout has expired, rge timer will be
+* cancelled.
+*
+* @resource_name: Resource name. @see ipa_rm.h
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int ipa_rm_inactivity_timer_release_resource(
+ enum ipa_rm_resource_name resource_name)
+{
+ unsigned long flags;
+
+ IPA_RM_DBG_LOW("%s: resource %d\n", __func__, resource_name);
+
+ if (resource_name < 0 ||
+ resource_name >= IPA_RM_RESOURCE_MAX) {
+ IPA_RM_ERR("%s: Invalid parameter\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!ipa_rm_it_handles[resource_name].initied) {
+ IPA_RM_ERR("%s: Not initialized\n", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags);
+ ipa_rm_it_handles[resource_name].resource_requested = false;
+ if (ipa_rm_it_handles[resource_name].work_in_progress) {
+ IPA_RM_DBG_LOW("%s: Timer already set, no sched again %d\n",
+ __func__, resource_name);
+ ipa_rm_it_handles[resource_name].reschedule_work = true;
+ spin_unlock_irqrestore(
+ &ipa_rm_it_handles[resource_name].lock, flags);
+ return 0;
+ }
+ ipa_rm_it_handles[resource_name].work_in_progress = true;
+ ipa_rm_it_handles[resource_name].reschedule_work = false;
+ IPA_RM_DBG_LOW("%s: setting delayed work\n", __func__);
+ queue_delayed_work(system_unbound_wq,
+ &ipa_rm_it_handles[resource_name].work,
+ ipa_rm_it_handles[resource_name].jiffies);
+ spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_rm_inactivity_timer_release_resource);
+
diff --git a/drivers/platform/msm/ipa/ipa_rm_peers_list.c b/drivers/platform/msm/ipa/ipa_rm_peers_list.c
new file mode 100644
index 0000000..fe8e781
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_peers_list.c
@@ -0,0 +1,280 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include "ipa_rm_i.h"
+
+/**
+ * ipa_rm_peers_list_get_resource_index() - resource name to index
+ * of this resource in corresponding peers list
+ * @resource_name: [in] resource name
+ *
+ * Returns: resource index mapping, IPA_RM_INDEX_INVALID
+ * in case provided resource name isn't contained in enum
+ * ipa_rm_resource_name.
+ *
+ */
+static int ipa_rm_peers_list_get_resource_index(
+ enum ipa_rm_resource_name resource_name)
+{
+ int resource_index = IPA_RM_INDEX_INVALID;
+
+ if (IPA_RM_RESORCE_IS_PROD(resource_name))
+ resource_index = ipa_rm_prod_index(resource_name);
+ else if (IPA_RM_RESORCE_IS_CONS(resource_name)) {
+ resource_index = ipa_rm_cons_index(resource_name);
+ if (resource_index != IPA_RM_INDEX_INVALID)
+ resource_index =
+ resource_index - IPA_RM_RESOURCE_PROD_MAX;
+ }
+
+ return resource_index;
+}
+
+static bool ipa_rm_peers_list_check_index(int index,
+ struct ipa_rm_peers_list *peers_list)
+{
+ return !(index > peers_list->max_peers || index < 0);
+}
+
+/**
+ * ipa_rm_peers_list_create() - creates the peers list
+ *
+ * @max_peers: maximum number of peers in new list
+ * @peers_list: [out] newly created peers list
+ *
+ * Returns: 0 in case of SUCCESS, negative otherwise
+ */
+int ipa_rm_peers_list_create(int max_peers,
+ struct ipa_rm_peers_list **peers_list)
+{
+ int result;
+
+ *peers_list = kzalloc(sizeof(**peers_list), GFP_ATOMIC);
+ if (!*peers_list) {
+ IPA_RM_ERR("no mem\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+
+ (*peers_list)->max_peers = max_peers;
+ (*peers_list)->peers = kzalloc((*peers_list)->max_peers *
+ sizeof(*((*peers_list)->peers)), GFP_ATOMIC);
+ if (!((*peers_list)->peers)) {
+ IPA_RM_ERR("no mem\n");
+ result = -ENOMEM;
+ goto list_alloc_fail;
+ }
+
+ return 0;
+
+list_alloc_fail:
+ kfree(*peers_list);
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_peers_list_delete() - deletes the peers list
+ *
+ * @peers_list: peers list
+ *
+ */
+void ipa_rm_peers_list_delete(struct ipa_rm_peers_list *peers_list)
+{
+ if (peers_list) {
+ kfree(peers_list->peers);
+ kfree(peers_list);
+ }
+}
+
+/**
+ * ipa_rm_peers_list_remove_peer() - removes peer from the list
+ *
+ * @peers_list: peers list
+ * @resource_name: name of the resource to remove
+ *
+ */
+void ipa_rm_peers_list_remove_peer(
+ struct ipa_rm_peers_list *peers_list,
+ enum ipa_rm_resource_name resource_name)
+{
+ if (!peers_list)
+ return;
+
+ peers_list->peers[ipa_rm_peers_list_get_resource_index(
+ resource_name)].resource = NULL;
+ peers_list->peers[ipa_rm_peers_list_get_resource_index(
+ resource_name)].userspace_dep = false;
+ peers_list->peers_count--;
+}
+
+/**
+ * ipa_rm_peers_list_add_peer() - adds peer to the list
+ *
+ * @peers_list: peers list
+ * @resource: resource to add
+ *
+ */
+void ipa_rm_peers_list_add_peer(
+ struct ipa_rm_peers_list *peers_list,
+ struct ipa_rm_resource *resource,
+ bool userspace_dep)
+{
+ if (!peers_list || !resource)
+ return;
+
+ peers_list->peers[ipa_rm_peers_list_get_resource_index(
+ resource->name)].resource = resource;
+ peers_list->peers[ipa_rm_peers_list_get_resource_index(
+ resource->name)].userspace_dep = userspace_dep;
+ peers_list->peers_count++;
+}
+
+/**
+ * ipa_rm_peers_list_is_empty() - checks
+ * if resource peers list is empty
+ *
+ * @peers_list: peers list
+ *
+ * Returns: true if the list is empty, false otherwise
+ */
+bool ipa_rm_peers_list_is_empty(struct ipa_rm_peers_list *peers_list)
+{
+ bool result = true;
+
+ if (!peers_list)
+ goto bail;
+
+ if (peers_list->peers_count > 0)
+ result = false;
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_peers_list_has_last_peer() - checks
+ * if resource peers list has exactly one peer
+ *
+ * @peers_list: peers list
+ *
+ * Returns: true if the list has exactly one peer, false otherwise
+ */
+bool ipa_rm_peers_list_has_last_peer(
+ struct ipa_rm_peers_list *peers_list)
+{
+ bool result = false;
+
+ if (!peers_list)
+ goto bail;
+
+ if (peers_list->peers_count == 1)
+ result = true;
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_peers_list_check_dependency() - check dependency
+ * between 2 peer lists
+ * @resource_peers: first peers list
+ * @resource_name: first peers list resource name
+ * @depends_on_peers: second peers list
+ * @depends_on_name: second peers list resource name
+ * @userspace_dep: [out] dependency was created by userspace
+ *
+ * Returns: true if there is dependency, false otherwise
+ *
+ */
+bool ipa_rm_peers_list_check_dependency(
+ struct ipa_rm_peers_list *resource_peers,
+ enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_peers_list *depends_on_peers,
+ enum ipa_rm_resource_name depends_on_name,
+ bool *userspace_dep)
+{
+ bool result = false;
+ int resource_index;
+
+ if (!resource_peers || !depends_on_peers || !userspace_dep)
+ return result;
+
+ resource_index = ipa_rm_peers_list_get_resource_index(depends_on_name);
+ if (resource_peers->peers[resource_index].resource != NULL) {
+ result = true;
+ *userspace_dep = resource_peers->peers[resource_index].
+ userspace_dep;
+ }
+
+ resource_index = ipa_rm_peers_list_get_resource_index(resource_name);
+ if (depends_on_peers->peers[resource_index].resource != NULL) {
+ result = true;
+ *userspace_dep = depends_on_peers->peers[resource_index].
+ userspace_dep;
+ }
+
+ return result;
+}
+
+/**
+ * ipa_rm_peers_list_get_resource() - get resource by
+ * resource index
+ * @resource_index: resource index
+ * @resource_peers: peers list
+ *
+ * Returns: the resource if found, NULL otherwise
+ */
+struct ipa_rm_resource *ipa_rm_peers_list_get_resource(int resource_index,
+ struct ipa_rm_peers_list *resource_peers)
+{
+ struct ipa_rm_resource *result = NULL;
+
+ if (!ipa_rm_peers_list_check_index(resource_index, resource_peers))
+ goto bail;
+
+ result = resource_peers->peers[resource_index].resource;
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_peers_list_get_userspace_dep() - returns whether resource dependency
+ * was added by userspace
+ * @resource_index: resource index
+ * @resource_peers: peers list
+ *
+ * Returns: true if dependency was added by userspace, false by kernel
+ */
+bool ipa_rm_peers_list_get_userspace_dep(int resource_index,
+ struct ipa_rm_peers_list *resource_peers)
+{
+ bool result = false;
+
+ if (!ipa_rm_peers_list_check_index(resource_index, resource_peers))
+ goto bail;
+
+ result = resource_peers->peers[resource_index].userspace_dep;
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_peers_list_get_size() - get peers list sise
+ *
+ * @peers_list: peers list
+ *
+ * Returns: the size of the peers list
+ */
+int ipa_rm_peers_list_get_size(struct ipa_rm_peers_list *peers_list)
+{
+ return peers_list->max_peers;
+}
diff --git a/drivers/platform/msm/ipa/ipa_rm_peers_list.h b/drivers/platform/msm/ipa/ipa_rm_peers_list.h
new file mode 100644
index 0000000..cf1c157
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_peers_list.h
@@ -0,0 +1,62 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RM_PEERS_LIST_H_
+#define _IPA_RM_PEERS_LIST_H_
+
+#include "ipa_rm_resource.h"
+
+struct ipa_rm_resource_peer {
+ struct ipa_rm_resource *resource;
+ bool userspace_dep;
+};
+
+/**
+ * struct ipa_rm_peers_list - IPA RM resource peers list
+ * @peers: the list of references to resources dependent on this resource
+ * in case of producer or list of dependencies in case of consumer
+ * @max_peers: maximum number of peers for this resource
+ * @peers_count: actual number of peers for this resource
+ */
+struct ipa_rm_peers_list {
+ struct ipa_rm_resource_peer *peers;
+ int max_peers;
+ int peers_count;
+};
+
+int ipa_rm_peers_list_create(int max_peers,
+ struct ipa_rm_peers_list **peers_list);
+void ipa_rm_peers_list_delete(struct ipa_rm_peers_list *peers_list);
+void ipa_rm_peers_list_remove_peer(
+ struct ipa_rm_peers_list *peers_list,
+ enum ipa_rm_resource_name resource_name);
+void ipa_rm_peers_list_add_peer(
+ struct ipa_rm_peers_list *peers_list,
+ struct ipa_rm_resource *resource,
+ bool userspace_dep);
+bool ipa_rm_peers_list_check_dependency(
+ struct ipa_rm_peers_list *resource_peers,
+ enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_peers_list *depends_on_peers,
+ enum ipa_rm_resource_name depends_on_name,
+ bool *userspace_dep);
+struct ipa_rm_resource *ipa_rm_peers_list_get_resource(int resource_index,
+ struct ipa_rm_peers_list *peers_list);
+bool ipa_rm_peers_list_get_userspace_dep(int resource_index,
+ struct ipa_rm_peers_list *resource_peers);
+int ipa_rm_peers_list_get_size(struct ipa_rm_peers_list *peers_list);
+bool ipa_rm_peers_list_is_empty(struct ipa_rm_peers_list *peers_list);
+bool ipa_rm_peers_list_has_last_peer(
+ struct ipa_rm_peers_list *peers_list);
+
+
+#endif /* _IPA_RM_PEERS_LIST_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.c b/drivers/platform/msm/ipa/ipa_rm_resource.c
new file mode 100644
index 0000000..ec5eb3d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_resource.c
@@ -0,0 +1,1207 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include "ipa_rm_resource.h"
+#include "ipa_rm_i.h"
+#include "ipa_common_i.h"
+/**
+ * ipa_rm_dep_prod_index() - producer name to producer index mapping
+ * @resource_name: [in] resource name (should be of producer)
+ *
+ * Returns: resource index mapping, IPA_RM_INDEX_INVALID
+ * in case provided resource name isn't contained
+ * in enum ipa_rm_resource_name or is not of producers.
+ *
+ */
+int ipa_rm_prod_index(enum ipa_rm_resource_name resource_name)
+{
+ int result = resource_name;
+
+ switch (resource_name) {
+ case IPA_RM_RESOURCE_Q6_PROD:
+ case IPA_RM_RESOURCE_USB_PROD:
+ case IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD:
+ case IPA_RM_RESOURCE_HSIC_PROD:
+ case IPA_RM_RESOURCE_STD_ECM_PROD:
+ case IPA_RM_RESOURCE_RNDIS_PROD:
+ case IPA_RM_RESOURCE_WWAN_0_PROD:
+ case IPA_RM_RESOURCE_WLAN_PROD:
+ case IPA_RM_RESOURCE_ODU_ADAPT_PROD:
+ case IPA_RM_RESOURCE_MHI_PROD:
+ break;
+ default:
+ result = IPA_RM_INDEX_INVALID;
+ break;
+ }
+
+ return result;
+}
+
+/**
+ * ipa_rm_cons_index() - consumer name to consumer index mapping
+ * @resource_name: [in] resource name (should be of consumer)
+ *
+ * Returns: resource index mapping, IPA_RM_INDEX_INVALID
+ * in case provided resource name isn't contained
+ * in enum ipa_rm_resource_name or is not of consumers.
+ *
+ */
+int ipa_rm_cons_index(enum ipa_rm_resource_name resource_name)
+{
+ int result = resource_name;
+
+ switch (resource_name) {
+ case IPA_RM_RESOURCE_Q6_CONS:
+ case IPA_RM_RESOURCE_USB_CONS:
+ case IPA_RM_RESOURCE_HSIC_CONS:
+ case IPA_RM_RESOURCE_WLAN_CONS:
+ case IPA_RM_RESOURCE_APPS_CONS:
+ case IPA_RM_RESOURCE_ODU_ADAPT_CONS:
+ case IPA_RM_RESOURCE_MHI_CONS:
+ case IPA_RM_RESOURCE_USB_DPL_CONS:
+ break;
+ default:
+ result = IPA_RM_INDEX_INVALID;
+ break;
+ }
+
+ return result;
+}
+
+int ipa_rm_resource_consumer_release_work(
+ struct ipa_rm_resource_cons *consumer,
+ enum ipa_rm_resource_state prev_state,
+ bool notify_completion)
+{
+ int driver_result;
+
+ IPA_RM_DBG_LOW("calling driver CB\n");
+ driver_result = consumer->release_resource();
+ IPA_RM_DBG_LOW("driver CB returned with %d\n", driver_result);
+ /*
+ * Treat IPA_RM_RELEASE_IN_PROGRESS as IPA_RM_RELEASED
+ * for CONS which remains in RELEASE_IN_PROGRESS.
+ */
+ if (driver_result == -EINPROGRESS)
+ driver_result = 0;
+ if (driver_result != 0 && driver_result != -EINPROGRESS) {
+ IPA_RM_ERR("driver CB returned error %d\n", driver_result);
+ consumer->resource.state = prev_state;
+ goto bail;
+ }
+ if (driver_result == 0) {
+ if (notify_completion)
+ ipa_rm_resource_consumer_handle_cb(consumer,
+ IPA_RM_RESOURCE_RELEASED);
+ else
+ consumer->resource.state = IPA_RM_RELEASED;
+ }
+ complete_all(&consumer->request_consumer_in_progress);
+
+ ipa_rm_perf_profile_change(consumer->resource.name);
+bail:
+ return driver_result;
+}
+
+int ipa_rm_resource_consumer_request_work(struct ipa_rm_resource_cons *consumer,
+ enum ipa_rm_resource_state prev_state,
+ u32 prod_needed_bw,
+ bool notify_completion)
+{
+ int driver_result;
+
+ IPA_RM_DBG_LOW("calling driver CB\n");
+ driver_result = consumer->request_resource();
+ IPA_RM_DBG_LOW("driver CB returned with %d\n", driver_result);
+ if (driver_result == 0) {
+ if (notify_completion) {
+ ipa_rm_resource_consumer_handle_cb(consumer,
+ IPA_RM_RESOURCE_GRANTED);
+ } else {
+ consumer->resource.state = IPA_RM_GRANTED;
+ ipa_rm_perf_profile_change(consumer->resource.name);
+ ipa_resume_resource(consumer->resource.name);
+ }
+ } else if (driver_result != -EINPROGRESS) {
+ consumer->resource.state = prev_state;
+ consumer->resource.needed_bw -= prod_needed_bw;
+ consumer->usage_count--;
+ }
+
+ return driver_result;
+}
+
+int ipa_rm_resource_consumer_request(
+ struct ipa_rm_resource_cons *consumer,
+ u32 prod_needed_bw,
+ bool inc_usage_count,
+ bool wake_client)
+{
+ int result = 0;
+ enum ipa_rm_resource_state prev_state;
+ struct ipa_active_client_logging_info log_info;
+
+ IPA_RM_DBG_LOW("%s state: %d\n",
+ ipa_rm_resource_str(consumer->resource.name),
+ consumer->resource.state);
+
+ prev_state = consumer->resource.state;
+ consumer->resource.needed_bw += prod_needed_bw;
+ switch (consumer->resource.state) {
+ case IPA_RM_RELEASED:
+ case IPA_RM_RELEASE_IN_PROGRESS:
+ reinit_completion(&consumer->request_consumer_in_progress);
+ consumer->resource.state = IPA_RM_REQUEST_IN_PROGRESS;
+ IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
+ ipa_rm_resource_str(consumer->resource.name));
+ if (prev_state == IPA_RM_RELEASE_IN_PROGRESS ||
+ ipa_inc_client_enable_clks_no_block(&log_info) != 0) {
+ IPA_RM_DBG_LOW("async resume work for %s\n",
+ ipa_rm_resource_str(consumer->resource.name));
+ ipa_rm_wq_send_resume_cmd(consumer->resource.name,
+ prev_state,
+ prod_needed_bw);
+ result = -EINPROGRESS;
+ break;
+ }
+ result = ipa_rm_resource_consumer_request_work(consumer,
+ prev_state,
+ prod_needed_bw,
+ false);
+ break;
+ case IPA_RM_GRANTED:
+ if (wake_client) {
+ result = ipa_rm_resource_consumer_request_work(
+ consumer, prev_state, prod_needed_bw, false);
+ break;
+ }
+ ipa_rm_perf_profile_change(consumer->resource.name);
+ break;
+ case IPA_RM_REQUEST_IN_PROGRESS:
+ result = -EINPROGRESS;
+ break;
+ default:
+ consumer->resource.needed_bw -= prod_needed_bw;
+ result = -EPERM;
+ goto bail;
+ }
+ if (inc_usage_count)
+ consumer->usage_count++;
+bail:
+ IPA_RM_DBG_LOW("%s new state: %d\n",
+ ipa_rm_resource_str(consumer->resource.name),
+ consumer->resource.state);
+ IPA_RM_DBG_LOW("EXIT with %d\n", result);
+
+ return result;
+}
+
+int ipa_rm_resource_consumer_release(
+ struct ipa_rm_resource_cons *consumer,
+ u32 prod_needed_bw,
+ bool dec_usage_count)
+{
+ int result = 0;
+ enum ipa_rm_resource_state save_state;
+
+ IPA_RM_DBG_LOW("%s state: %d\n",
+ ipa_rm_resource_str(consumer->resource.name),
+ consumer->resource.state);
+ save_state = consumer->resource.state;
+ consumer->resource.needed_bw -= prod_needed_bw;
+ switch (consumer->resource.state) {
+ case IPA_RM_RELEASED:
+ break;
+ case IPA_RM_GRANTED:
+ case IPA_RM_REQUEST_IN_PROGRESS:
+ if (dec_usage_count && consumer->usage_count > 0)
+ consumer->usage_count--;
+ if (consumer->usage_count == 0) {
+ consumer->resource.state = IPA_RM_RELEASE_IN_PROGRESS;
+ if (save_state == IPA_RM_REQUEST_IN_PROGRESS ||
+ ipa_suspend_resource_no_block(
+ consumer->resource.name) != 0) {
+ ipa_rm_wq_send_suspend_cmd(
+ consumer->resource.name,
+ save_state,
+ prod_needed_bw);
+ result = -EINPROGRESS;
+ goto bail;
+ }
+ result = ipa_rm_resource_consumer_release_work(consumer,
+ save_state, false);
+ goto bail;
+ } else if (consumer->resource.state == IPA_RM_GRANTED) {
+ ipa_rm_perf_profile_change(consumer->resource.name);
+ }
+ break;
+ case IPA_RM_RELEASE_IN_PROGRESS:
+ if (dec_usage_count && consumer->usage_count > 0)
+ consumer->usage_count--;
+ result = -EINPROGRESS;
+ break;
+ default:
+ result = -EPERM;
+ goto bail;
+ }
+bail:
+ IPA_RM_DBG_LOW("%s new state: %d\n",
+ ipa_rm_resource_str(consumer->resource.name),
+ consumer->resource.state);
+ IPA_RM_DBG_LOW("EXIT with %d\n", result);
+
+ return result;
+}
+
+/**
+ * ipa_rm_resource_producer_notify_clients() - notify
+ * all registered clients of given producer
+ * @producer: producer
+ * @event: event to notify
+ * @notify_registered_only: notify only clients registered by
+ * ipa_rm_register()
+ */
+void ipa_rm_resource_producer_notify_clients(
+ struct ipa_rm_resource_prod *producer,
+ enum ipa_rm_event event,
+ bool notify_registered_only)
+{
+ struct ipa_rm_notification_info *reg_info;
+
+ IPA_RM_DBG_LOW("%s event: %d notify_registered_only: %d\n",
+ ipa_rm_resource_str(producer->resource.name),
+ event,
+ notify_registered_only);
+
+ list_for_each_entry(reg_info, &(producer->event_listeners), link) {
+ if (notify_registered_only && !reg_info->explicit)
+ continue;
+
+ IPA_RM_DBG_LOW("Notifying %s event: %d\n",
+ ipa_rm_resource_str(producer->resource.name), event);
+ reg_info->reg_params.notify_cb(reg_info->reg_params.user_data,
+ event,
+ 0);
+ IPA_RM_DBG_LOW("back from client CB\n");
+ }
+}
+
+static int ipa_rm_resource_producer_create(struct ipa_rm_resource **resource,
+ struct ipa_rm_resource_prod **producer,
+ struct ipa_rm_create_params *create_params,
+ int *max_peers)
+{
+ int result = 0;
+
+ *producer = kzalloc(sizeof(**producer), GFP_ATOMIC);
+ if (*producer == NULL) {
+ IPA_RM_ERR("no mem\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+
+ INIT_LIST_HEAD(&((*producer)->event_listeners));
+ result = ipa_rm_resource_producer_register(*producer,
+ &(create_params->reg_params),
+ false);
+ if (result) {
+ IPA_RM_ERR("ipa_rm_resource_producer_register() failed\n");
+ goto register_fail;
+ }
+
+ (*resource) = (struct ipa_rm_resource *) (*producer);
+ (*resource)->type = IPA_RM_PRODUCER;
+ *max_peers = IPA_RM_RESOURCE_CONS_MAX;
+ goto bail;
+register_fail:
+ kfree(*producer);
+bail:
+ return result;
+}
+
+static void ipa_rm_resource_producer_delete(
+ struct ipa_rm_resource_prod *producer)
+{
+ struct ipa_rm_notification_info *reg_info;
+ struct list_head *pos, *q;
+
+ ipa_rm_resource_producer_release(producer);
+ list_for_each_safe(pos, q, &(producer->event_listeners)) {
+ reg_info = list_entry(pos,
+ struct ipa_rm_notification_info,
+ link);
+ list_del(pos);
+ kfree(reg_info);
+ }
+}
+
+static int ipa_rm_resource_consumer_create(struct ipa_rm_resource **resource,
+ struct ipa_rm_resource_cons **consumer,
+ struct ipa_rm_create_params *create_params,
+ int *max_peers)
+{
+ int result = 0;
+
+ *consumer = kzalloc(sizeof(**consumer), GFP_ATOMIC);
+ if (*consumer == NULL) {
+ IPA_RM_ERR("no mem\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+
+ (*consumer)->request_resource = create_params->request_resource;
+ (*consumer)->release_resource = create_params->release_resource;
+ (*resource) = (struct ipa_rm_resource *) (*consumer);
+ (*resource)->type = IPA_RM_CONSUMER;
+ init_completion(&((*consumer)->request_consumer_in_progress));
+ *max_peers = IPA_RM_RESOURCE_PROD_MAX;
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_resource_create() - creates resource
+ * @create_params: [in] parameters needed
+ * for resource initialization with IPA RM
+ * @resource: [out] created resource
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_resource_create(
+ struct ipa_rm_create_params *create_params,
+ struct ipa_rm_resource **resource)
+{
+ struct ipa_rm_resource_cons *consumer;
+ struct ipa_rm_resource_prod *producer;
+ int max_peers;
+ int result = 0;
+
+ if (!create_params) {
+ result = -EINVAL;
+ goto bail;
+ }
+
+ if (IPA_RM_RESORCE_IS_PROD(create_params->name)) {
+ result = ipa_rm_resource_producer_create(resource,
+ &producer,
+ create_params,
+ &max_peers);
+ if (result) {
+ IPA_RM_ERR("ipa_rm_resource_producer_create failed\n");
+ goto bail;
+ }
+ } else if (IPA_RM_RESORCE_IS_CONS(create_params->name)) {
+ result = ipa_rm_resource_consumer_create(resource,
+ &consumer,
+ create_params,
+ &max_peers);
+ if (result) {
+ IPA_RM_ERR("ipa_rm_resource_producer_create failed\n");
+ goto bail;
+ }
+ } else {
+ IPA_RM_ERR("invalied resource\n");
+ result = -EPERM;
+ goto bail;
+ }
+
+ result = ipa_rm_peers_list_create(max_peers,
+ &((*resource)->peers_list));
+ if (result) {
+ IPA_RM_ERR("ipa_rm_peers_list_create failed\n");
+ goto peers_alloc_fail;
+ }
+ (*resource)->name = create_params->name;
+ (*resource)->floor_voltage = create_params->floor_voltage;
+ (*resource)->state = IPA_RM_RELEASED;
+ goto bail;
+
+peers_alloc_fail:
+ ipa_rm_resource_delete(*resource);
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_resource_delete() - deletes resource
+ * @resource: [in] resource
+ * for resource initialization with IPA RM
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_resource_delete(struct ipa_rm_resource *resource)
+{
+ struct ipa_rm_resource *consumer;
+ struct ipa_rm_resource *producer;
+ int peers_index;
+ int result = 0;
+ int list_size;
+ bool userspace_dep;
+
+ if (!resource) {
+ IPA_RM_ERR("invalid params\n");
+ return -EINVAL;
+ }
+
+ IPA_RM_DBG("ipa_rm_resource_delete ENTER with resource %d\n",
+ resource->name);
+ if (resource->type == IPA_RM_PRODUCER) {
+ if (resource->peers_list) {
+ list_size = ipa_rm_peers_list_get_size(
+ resource->peers_list);
+ for (peers_index = 0;
+ peers_index < list_size;
+ peers_index++) {
+ consumer = ipa_rm_peers_list_get_resource(
+ peers_index,
+ resource->peers_list);
+ if (consumer) {
+ userspace_dep =
+ ipa_rm_peers_list_get_userspace_dep(
+ peers_index,
+ resource->peers_list);
+ ipa_rm_resource_delete_dependency(
+ resource,
+ consumer,
+ userspace_dep);
+ }
+ }
+ }
+
+ ipa_rm_resource_producer_delete(
+ (struct ipa_rm_resource_prod *) resource);
+ } else if (resource->type == IPA_RM_CONSUMER) {
+ if (resource->peers_list) {
+ list_size = ipa_rm_peers_list_get_size(
+ resource->peers_list);
+ for (peers_index = 0;
+ peers_index < list_size;
+ peers_index++){
+ producer = ipa_rm_peers_list_get_resource(
+ peers_index,
+ resource->peers_list);
+ if (producer) {
+ userspace_dep =
+ ipa_rm_peers_list_get_userspace_dep(
+ peers_index,
+ resource->peers_list);
+ ipa_rm_resource_delete_dependency(
+ producer,
+ resource,
+ userspace_dep);
+ }
+ }
+ }
+ }
+ ipa_rm_peers_list_delete(resource->peers_list);
+ kfree(resource);
+ return result;
+}
+
+/**
+ * ipa_rm_resource_register() - register resource
+ * @resource: [in] resource
+ * @reg_params: [in] registration parameters
+ * @explicit: [in] registered explicitly by ipa_rm_register()
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Producer resource is expected for this call.
+ *
+ */
+int ipa_rm_resource_producer_register(struct ipa_rm_resource_prod *producer,
+ struct ipa_rm_register_params *reg_params,
+ bool explicit)
+{
+ int result = 0;
+ struct ipa_rm_notification_info *reg_info;
+ struct list_head *pos;
+
+ if (!producer || !reg_params) {
+ IPA_RM_ERR("invalid params\n");
+ result = -EPERM;
+ goto bail;
+ }
+
+ list_for_each(pos, &(producer->event_listeners)) {
+ reg_info = list_entry(pos,
+ struct ipa_rm_notification_info,
+ link);
+ if (reg_info->reg_params.notify_cb ==
+ reg_params->notify_cb) {
+ IPA_RM_ERR("already registered\n");
+ result = -EPERM;
+ goto bail;
+ }
+
+ }
+
+ reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
+ if (reg_info == NULL) {
+ IPA_RM_ERR("no mem\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+
+ reg_info->reg_params.user_data = reg_params->user_data;
+ reg_info->reg_params.notify_cb = reg_params->notify_cb;
+ reg_info->explicit = explicit;
+ INIT_LIST_HEAD(®_info->link);
+ list_add(®_info->link, &producer->event_listeners);
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_resource_deregister() - register resource
+ * @resource: [in] resource
+ * @reg_params: [in] registration parameters
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Producer resource is expected for this call.
+ * This function deleted only single instance of
+ * registration info.
+ *
+ */
+int ipa_rm_resource_producer_deregister(struct ipa_rm_resource_prod *producer,
+ struct ipa_rm_register_params *reg_params)
+{
+ int result = -EINVAL;
+ struct ipa_rm_notification_info *reg_info;
+ struct list_head *pos, *q;
+
+ if (!producer || !reg_params) {
+ IPA_RM_ERR("invalid params\n");
+ return -EINVAL;
+ }
+
+ list_for_each_safe(pos, q, &(producer->event_listeners)) {
+ reg_info = list_entry(pos,
+ struct ipa_rm_notification_info,
+ link);
+ if (reg_info->reg_params.notify_cb ==
+ reg_params->notify_cb) {
+ list_del(pos);
+ kfree(reg_info);
+ result = 0;
+ goto bail;
+ }
+ }
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_resource_add_dependency() - add dependency between two
+ * given resources
+ * @resource: [in] resource resource
+ * @depends_on: [in] depends_on resource
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_resource_add_dependency(struct ipa_rm_resource *resource,
+ struct ipa_rm_resource *depends_on,
+ bool userspace_dep)
+{
+ int result = 0;
+ int consumer_result;
+ bool add_dep_by_userspace;
+
+ if (!resource || !depends_on) {
+ IPA_RM_ERR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (ipa_rm_peers_list_check_dependency(resource->peers_list,
+ resource->name,
+ depends_on->peers_list,
+ depends_on->name,
+ &add_dep_by_userspace)) {
+ IPA_RM_ERR("dependency already exists, added by %s\n",
+ add_dep_by_userspace ? "userspace" : "kernel");
+ return -EEXIST;
+ }
+
+ ipa_rm_peers_list_add_peer(resource->peers_list, depends_on,
+ userspace_dep);
+ ipa_rm_peers_list_add_peer(depends_on->peers_list, resource,
+ userspace_dep);
+ IPA_RM_DBG("%s state: %d\n", ipa_rm_resource_str(resource->name),
+ resource->state);
+
+ resource->needed_bw += depends_on->max_bw;
+ switch (resource->state) {
+ case IPA_RM_RELEASED:
+ case IPA_RM_RELEASE_IN_PROGRESS:
+ break;
+ case IPA_RM_GRANTED:
+ case IPA_RM_REQUEST_IN_PROGRESS:
+ {
+ enum ipa_rm_resource_state prev_state = resource->state;
+
+ resource->state = IPA_RM_REQUEST_IN_PROGRESS;
+ ((struct ipa_rm_resource_prod *)
+ resource)->pending_request++;
+ consumer_result = ipa_rm_resource_consumer_request(
+ (struct ipa_rm_resource_cons *)depends_on,
+ resource->max_bw,
+ true, false);
+ if (consumer_result != -EINPROGRESS) {
+ resource->state = prev_state;
+ ((struct ipa_rm_resource_prod *)
+ resource)->pending_request--;
+ ipa_rm_perf_profile_change(resource->name);
+ }
+ result = consumer_result;
+ break;
+ }
+ default:
+ IPA_RM_ERR("invalid state\n");
+ result = -EPERM;
+ goto bail;
+ }
+bail:
+ IPA_RM_DBG("%s new state: %d\n", ipa_rm_resource_str(resource->name),
+ resource->state);
+ IPA_RM_DBG("EXIT with %d\n", result);
+
+ return result;
+}
+
+/**
+ * ipa_rm_resource_delete_dependency() - add dependency between two
+ * given resources
+ * @resource: [in] resource resource
+ * @depends_on: [in] depends_on resource
+ *
+ * Returns: 0 on success, negative on failure
+ * In case the resource state was changed, a notification
+ * will be sent to the RM client
+ */
+int ipa_rm_resource_delete_dependency(struct ipa_rm_resource *resource,
+ struct ipa_rm_resource *depends_on,
+ bool userspace_dep)
+{
+ int result = 0;
+ bool state_changed = false;
+ bool release_consumer = false;
+ enum ipa_rm_event evt;
+ bool add_dep_by_userspace;
+
+ if (!resource || !depends_on) {
+ IPA_RM_ERR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (!ipa_rm_peers_list_check_dependency(resource->peers_list,
+ resource->name,
+ depends_on->peers_list,
+ depends_on->name,
+ &add_dep_by_userspace)) {
+ IPA_RM_ERR("dependency does not exist\n");
+ return -EINVAL;
+ }
+
+ /*
+ * to avoid race conditions between kernel and userspace
+ * need to check that the dependency was added by same entity
+ */
+ if (add_dep_by_userspace != userspace_dep) {
+ IPA_RM_DBG("dependency was added by %s\n",
+ add_dep_by_userspace ? "userspace" : "kernel");
+ IPA_RM_DBG("ignore request to delete dependency by %s\n",
+ userspace_dep ? "userspace" : "kernel");
+ return 0;
+ }
+
+ IPA_RM_DBG("%s state: %d\n", ipa_rm_resource_str(resource->name),
+ resource->state);
+
+ resource->needed_bw -= depends_on->max_bw;
+ switch (resource->state) {
+ case IPA_RM_RELEASED:
+ break;
+ case IPA_RM_GRANTED:
+ ipa_rm_perf_profile_change(resource->name);
+ release_consumer = true;
+ break;
+ case IPA_RM_RELEASE_IN_PROGRESS:
+ if (((struct ipa_rm_resource_prod *)
+ resource)->pending_release > 0)
+ ((struct ipa_rm_resource_prod *)
+ resource)->pending_release--;
+ if (depends_on->state == IPA_RM_RELEASE_IN_PROGRESS &&
+ ((struct ipa_rm_resource_prod *)
+ resource)->pending_release == 0) {
+ resource->state = IPA_RM_RELEASED;
+ state_changed = true;
+ evt = IPA_RM_RESOURCE_RELEASED;
+ ipa_rm_perf_profile_change(resource->name);
+ }
+ break;
+ case IPA_RM_REQUEST_IN_PROGRESS:
+ release_consumer = true;
+ if (((struct ipa_rm_resource_prod *)
+ resource)->pending_request > 0)
+ ((struct ipa_rm_resource_prod *)
+ resource)->pending_request--;
+ if (depends_on->state == IPA_RM_REQUEST_IN_PROGRESS &&
+ ((struct ipa_rm_resource_prod *)
+ resource)->pending_request == 0) {
+ resource->state = IPA_RM_GRANTED;
+ state_changed = true;
+ evt = IPA_RM_RESOURCE_GRANTED;
+ ipa_rm_perf_profile_change(resource->name);
+ }
+ break;
+ default:
+ result = -EINVAL;
+ goto bail;
+ }
+ if (state_changed) {
+ (void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD,
+ resource->name,
+ evt,
+ false);
+ }
+ IPA_RM_DBG("%s new state: %d\n", ipa_rm_resource_str(resource->name),
+ resource->state);
+ ipa_rm_peers_list_remove_peer(resource->peers_list,
+ depends_on->name);
+ ipa_rm_peers_list_remove_peer(depends_on->peers_list,
+ resource->name);
+ if (release_consumer)
+ (void) ipa_rm_resource_consumer_release(
+ (struct ipa_rm_resource_cons *)depends_on,
+ resource->max_bw,
+ true);
+bail:
+ IPA_RM_DBG("EXIT with %d\n", result);
+
+ return result;
+}
+
+/**
+ * ipa_rm_resource_producer_request() - producer resource request
+ * @producer: [in] producer
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_resource_producer_request(struct ipa_rm_resource_prod *producer)
+{
+ int peers_index;
+ int result = 0;
+ struct ipa_rm_resource *consumer;
+ int consumer_result;
+ enum ipa_rm_resource_state state;
+
+ state = producer->resource.state;
+ switch (producer->resource.state) {
+ case IPA_RM_RELEASED:
+ case IPA_RM_RELEASE_IN_PROGRESS:
+ producer->resource.state = IPA_RM_REQUEST_IN_PROGRESS;
+ break;
+ case IPA_RM_GRANTED:
+ goto unlock_and_bail;
+ case IPA_RM_REQUEST_IN_PROGRESS:
+ result = -EINPROGRESS;
+ goto unlock_and_bail;
+ default:
+ result = -EINVAL;
+ goto unlock_and_bail;
+ }
+
+ producer->pending_request = 0;
+ for (peers_index = 0;
+ peers_index < ipa_rm_peers_list_get_size(
+ producer->resource.peers_list);
+ peers_index++) {
+ consumer = ipa_rm_peers_list_get_resource(peers_index,
+ producer->resource.peers_list);
+ if (consumer) {
+ producer->pending_request++;
+ consumer_result = ipa_rm_resource_consumer_request(
+ (struct ipa_rm_resource_cons *)consumer,
+ producer->resource.max_bw,
+ true, false);
+ if (consumer_result == -EINPROGRESS) {
+ result = -EINPROGRESS;
+ } else {
+ producer->pending_request--;
+ if (consumer_result != 0) {
+ result = consumer_result;
+ goto bail;
+ }
+ }
+ }
+ }
+
+ if (producer->pending_request == 0) {
+ producer->resource.state = IPA_RM_GRANTED;
+ ipa_rm_perf_profile_change(producer->resource.name);
+ (void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD,
+ producer->resource.name,
+ IPA_RM_RESOURCE_GRANTED,
+ true);
+ result = 0;
+ }
+unlock_and_bail:
+ if (state != producer->resource.state)
+ IPA_RM_DBG_LOW("%s state changed %d->%d\n",
+ ipa_rm_resource_str(producer->resource.name),
+ state,
+ producer->resource.state);
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_resource_producer_release() - producer resource release
+ * producer: [in] producer resource
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa_rm_resource_producer_release(struct ipa_rm_resource_prod *producer)
+{
+ int peers_index;
+ int result = 0;
+ struct ipa_rm_resource *consumer;
+ int consumer_result;
+ enum ipa_rm_resource_state state;
+
+ state = producer->resource.state;
+ switch (producer->resource.state) {
+ case IPA_RM_RELEASED:
+ goto bail;
+ case IPA_RM_GRANTED:
+ case IPA_RM_REQUEST_IN_PROGRESS:
+ producer->resource.state = IPA_RM_RELEASE_IN_PROGRESS;
+ break;
+ case IPA_RM_RELEASE_IN_PROGRESS:
+ result = -EINPROGRESS;
+ goto bail;
+ default:
+ result = -EPERM;
+ goto bail;
+ }
+
+ producer->pending_release = 0;
+ for (peers_index = 0;
+ peers_index < ipa_rm_peers_list_get_size(
+ producer->resource.peers_list);
+ peers_index++) {
+ consumer = ipa_rm_peers_list_get_resource(peers_index,
+ producer->resource.peers_list);
+ if (consumer) {
+ producer->pending_release++;
+ consumer_result = ipa_rm_resource_consumer_release(
+ (struct ipa_rm_resource_cons *)consumer,
+ producer->resource.max_bw,
+ true);
+ producer->pending_release--;
+ }
+ }
+
+ if (producer->pending_release == 0) {
+ producer->resource.state = IPA_RM_RELEASED;
+ ipa_rm_perf_profile_change(producer->resource.name);
+ (void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD,
+ producer->resource.name,
+ IPA_RM_RESOURCE_RELEASED,
+ true);
+ }
+bail:
+ if (state != producer->resource.state)
+ IPA_RM_DBG_LOW("%s state changed %d->%d\n",
+ ipa_rm_resource_str(producer->resource.name),
+ state,
+ producer->resource.state);
+
+ return result;
+}
+
+static void ipa_rm_resource_producer_handle_cb(
+ struct ipa_rm_resource_prod *producer,
+ enum ipa_rm_event event)
+{
+ IPA_RM_DBG_LOW("%s state: %d event: %d pending_request: %d\n",
+ ipa_rm_resource_str(producer->resource.name),
+ producer->resource.state,
+ event,
+ producer->pending_request);
+
+ switch (producer->resource.state) {
+ case IPA_RM_REQUEST_IN_PROGRESS:
+ if (event != IPA_RM_RESOURCE_GRANTED)
+ goto unlock_and_bail;
+ if (producer->pending_request > 0) {
+ producer->pending_request--;
+ if (producer->pending_request == 0) {
+ producer->resource.state =
+ IPA_RM_GRANTED;
+ ipa_rm_perf_profile_change(
+ producer->resource.name);
+ ipa_rm_resource_producer_notify_clients(
+ producer,
+ IPA_RM_RESOURCE_GRANTED,
+ false);
+ goto bail;
+ }
+ }
+ break;
+ case IPA_RM_RELEASE_IN_PROGRESS:
+ if (event != IPA_RM_RESOURCE_RELEASED)
+ goto unlock_and_bail;
+ if (producer->pending_release > 0) {
+ producer->pending_release--;
+ if (producer->pending_release == 0) {
+ producer->resource.state =
+ IPA_RM_RELEASED;
+ ipa_rm_perf_profile_change(
+ producer->resource.name);
+ ipa_rm_resource_producer_notify_clients(
+ producer,
+ IPA_RM_RESOURCE_RELEASED,
+ false);
+ goto bail;
+ }
+ }
+ break;
+ case IPA_RM_GRANTED:
+ case IPA_RM_RELEASED:
+ default:
+ goto unlock_and_bail;
+ }
+unlock_and_bail:
+ IPA_RM_DBG_LOW("%s new state: %d\n",
+ ipa_rm_resource_str(producer->resource.name),
+ producer->resource.state);
+bail:
+ return;
+}
+
+/**
+ * ipa_rm_resource_consumer_handle_cb() - propagates resource
+ * notification to all dependent producers
+ * @consumer: [in] notifying resource
+ *
+ */
+void ipa_rm_resource_consumer_handle_cb(struct ipa_rm_resource_cons *consumer,
+ enum ipa_rm_event event)
+{
+ int peers_index;
+ struct ipa_rm_resource *producer;
+
+ if (!consumer) {
+ IPA_RM_ERR("invalid params\n");
+ return;
+ }
+ IPA_RM_DBG_LOW("%s state: %d event: %d\n",
+ ipa_rm_resource_str(consumer->resource.name),
+ consumer->resource.state,
+ event);
+
+ switch (consumer->resource.state) {
+ case IPA_RM_REQUEST_IN_PROGRESS:
+ if (event == IPA_RM_RESOURCE_RELEASED)
+ goto bail;
+ consumer->resource.state = IPA_RM_GRANTED;
+ ipa_rm_perf_profile_change(consumer->resource.name);
+ ipa_resume_resource(consumer->resource.name);
+ complete_all(&consumer->request_consumer_in_progress);
+ break;
+ case IPA_RM_RELEASE_IN_PROGRESS:
+ if (event == IPA_RM_RESOURCE_GRANTED)
+ goto bail;
+ consumer->resource.state = IPA_RM_RELEASED;
+ break;
+ case IPA_RM_GRANTED:
+ case IPA_RM_RELEASED:
+ default:
+ goto bail;
+ }
+
+ for (peers_index = 0;
+ peers_index < ipa_rm_peers_list_get_size(
+ consumer->resource.peers_list);
+ peers_index++) {
+ producer = ipa_rm_peers_list_get_resource(peers_index,
+ consumer->resource.peers_list);
+ if (producer)
+ ipa_rm_resource_producer_handle_cb(
+ (struct ipa_rm_resource_prod *)
+ producer,
+ event);
+ }
+
+ return;
+bail:
+ IPA_RM_DBG_LOW("%s new state: %d\n",
+ ipa_rm_resource_str(consumer->resource.name),
+ consumer->resource.state);
+}
+
+/*
+ * ipa_rm_resource_set_perf_profile() - sets the performance profile to
+ * resource.
+ *
+ * @resource: [in] resource
+ * @profile: [in] profile to be set
+ *
+ * sets the profile to the given resource, In case the resource is
+ * granted, update bandwidth vote of the resource
+ */
+int ipa_rm_resource_set_perf_profile(struct ipa_rm_resource *resource,
+ struct ipa_rm_perf_profile *profile)
+{
+ int peers_index;
+ struct ipa_rm_resource *peer;
+
+ if (!resource || !profile) {
+ IPA_RM_ERR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (profile->max_supported_bandwidth_mbps == resource->max_bw) {
+ IPA_RM_DBG_LOW("same profile\n");
+ return 0;
+ }
+
+ if ((resource->type == IPA_RM_PRODUCER &&
+ (resource->state == IPA_RM_GRANTED ||
+ resource->state == IPA_RM_REQUEST_IN_PROGRESS)) ||
+ resource->type == IPA_RM_CONSUMER) {
+ for (peers_index = 0;
+ peers_index < ipa_rm_peers_list_get_size(
+ resource->peers_list);
+ peers_index++) {
+ peer = ipa_rm_peers_list_get_resource(peers_index,
+ resource->peers_list);
+ if (!peer)
+ continue;
+ peer->needed_bw -= resource->max_bw;
+ peer->needed_bw +=
+ profile->max_supported_bandwidth_mbps;
+ if (peer->state == IPA_RM_GRANTED)
+ ipa_rm_perf_profile_change(peer->name);
+ }
+ }
+
+ resource->max_bw = profile->max_supported_bandwidth_mbps;
+ if (resource->state == IPA_RM_GRANTED)
+ ipa_rm_perf_profile_change(resource->name);
+
+ return 0;
+}
+
+
+/*
+ * ipa_rm_resource_producer_print_stat() - print the
+ * resource status and all his dependencies
+ *
+ * @resource: [in] Resource resource
+ * @buff: [in] The buf used to print
+ * @size: [in] Buf size
+ *
+ * Returns: number of bytes used on success, negative on failure
+ */
+int ipa_rm_resource_producer_print_stat(
+ struct ipa_rm_resource *resource,
+ char *buf,
+ int size){
+
+ int i;
+ int nbytes;
+ int cnt = 0;
+ struct ipa_rm_resource *consumer;
+
+ if (!buf || size < 0)
+ return -EINVAL;
+
+ nbytes = scnprintf(buf + cnt, size - cnt,
+ ipa_rm_resource_str(resource->name));
+ cnt += nbytes;
+ nbytes = scnprintf(buf + cnt, size - cnt, "[%d, ", resource->max_bw);
+ cnt += nbytes;
+
+ switch (resource->state) {
+ case IPA_RM_RELEASED:
+ nbytes = scnprintf(buf + cnt, size - cnt,
+ "Released] -> ");
+ cnt += nbytes;
+ break;
+ case IPA_RM_REQUEST_IN_PROGRESS:
+ nbytes = scnprintf(buf + cnt, size - cnt,
+ "Request In Progress] -> ");
+ cnt += nbytes;
+ break;
+ case IPA_RM_GRANTED:
+ nbytes = scnprintf(buf + cnt, size - cnt,
+ "Granted] -> ");
+ cnt += nbytes;
+ break;
+ case IPA_RM_RELEASE_IN_PROGRESS:
+ nbytes = scnprintf(buf + cnt, size - cnt,
+ "Release In Progress] -> ");
+ cnt += nbytes;
+ break;
+ default:
+ return -EPERM;
+ }
+
+ for (i = 0; i < resource->peers_list->max_peers; ++i) {
+ consumer =
+ ipa_rm_peers_list_get_resource(
+ i,
+ resource->peers_list);
+ if (consumer) {
+ nbytes = scnprintf(buf + cnt, size - cnt,
+ ipa_rm_resource_str(consumer->name));
+ cnt += nbytes;
+ nbytes = scnprintf(buf + cnt, size - cnt, "[%d, ",
+ consumer->max_bw);
+ cnt += nbytes;
+
+ switch (consumer->state) {
+ case IPA_RM_RELEASED:
+ nbytes = scnprintf(buf + cnt, size - cnt,
+ "Released], ");
+ cnt += nbytes;
+ break;
+ case IPA_RM_REQUEST_IN_PROGRESS:
+ nbytes = scnprintf(buf + cnt, size - cnt,
+ "Request In Progress], ");
+ cnt += nbytes;
+ break;
+ case IPA_RM_GRANTED:
+ nbytes = scnprintf(buf + cnt, size - cnt,
+ "Granted], ");
+ cnt += nbytes;
+ break;
+ case IPA_RM_RELEASE_IN_PROGRESS:
+ nbytes = scnprintf(buf + cnt, size - cnt,
+ "Release In Progress], ");
+ cnt += nbytes;
+ break;
+ default:
+ return -EPERM;
+ }
+ }
+ }
+ nbytes = scnprintf(buf + cnt, size - cnt, "\n");
+ cnt += nbytes;
+
+ return cnt;
+}
diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.h b/drivers/platform/msm/ipa/ipa_rm_resource.h
new file mode 100644
index 0000000..5c3a019
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_resource.h
@@ -0,0 +1,165 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RM_RESOURCE_H_
+#define _IPA_RM_RESOURCE_H_
+
+#include <linux/list.h>
+#include <linux/ipa.h>
+#include "ipa_rm_peers_list.h"
+
+/**
+ * enum ipa_rm_resource_state - resource state
+ */
+enum ipa_rm_resource_state {
+ IPA_RM_RELEASED,
+ IPA_RM_REQUEST_IN_PROGRESS,
+ IPA_RM_GRANTED,
+ IPA_RM_RELEASE_IN_PROGRESS
+};
+
+/**
+ * enum ipa_rm_resource_type - IPA resource manager resource type
+ */
+enum ipa_rm_resource_type {
+ IPA_RM_PRODUCER,
+ IPA_RM_CONSUMER
+};
+
+/**
+ * struct ipa_rm_notification_info - notification information
+ * of IPA RM client
+ * @reg_params: registration parameters
+ * @explicit: registered explicitly by ipa_rm_register()
+ * @link: link to the list of all registered clients information
+ */
+struct ipa_rm_notification_info {
+ struct ipa_rm_register_params reg_params;
+ bool explicit;
+ struct list_head link;
+};
+
+/**
+ * struct ipa_rm_resource - IPA RM resource
+ * @name: name identifying resource
+ * @type: type of resource (PRODUCER or CONSUMER)
+ * @floor_voltage: minimum voltage level for operation
+ * @max_bw: maximum bandwidth required for resource in Mbps
+ * @state: state of the resource
+ * @peers_list: list of the peers of the resource
+ */
+struct ipa_rm_resource {
+ enum ipa_rm_resource_name name;
+ enum ipa_rm_resource_type type;
+ enum ipa_voltage_level floor_voltage;
+ u32 max_bw;
+ u32 needed_bw;
+ enum ipa_rm_resource_state state;
+ struct ipa_rm_peers_list *peers_list;
+};
+
+/**
+ * struct ipa_rm_resource_cons - IPA RM consumer
+ * @resource: resource
+ * @usage_count: number of producers in GRANTED / REQUESTED state
+ * using this consumer
+ * @request_consumer_in_progress: when set, the consumer is during its request
+ * phase
+ * @request_resource: function which should be called to request resource
+ * from resource manager
+ * @release_resource: function which should be called to release resource
+ * from resource manager
+ * Add new fields after @resource only.
+ */
+struct ipa_rm_resource_cons {
+ struct ipa_rm_resource resource;
+ int usage_count;
+ struct completion request_consumer_in_progress;
+ int (*request_resource)(void);
+ int (*release_resource)(void);
+};
+
+/**
+ * struct ipa_rm_resource_prod - IPA RM producer
+ * @resource: resource
+ * @event_listeners: clients registered with this producer
+ * for notifications in resource state
+ * list Add new fields after @resource only.
+ */
+struct ipa_rm_resource_prod {
+ struct ipa_rm_resource resource;
+ struct list_head event_listeners;
+ int pending_request;
+ int pending_release;
+};
+
+int ipa_rm_resource_create(
+ struct ipa_rm_create_params *create_params,
+ struct ipa_rm_resource **resource);
+
+int ipa_rm_resource_delete(struct ipa_rm_resource *resource);
+
+int ipa_rm_resource_producer_register(struct ipa_rm_resource_prod *producer,
+ struct ipa_rm_register_params *reg_params,
+ bool explicit);
+
+int ipa_rm_resource_producer_deregister(struct ipa_rm_resource_prod *producer,
+ struct ipa_rm_register_params *reg_params);
+
+int ipa_rm_resource_add_dependency(struct ipa_rm_resource *resource,
+ struct ipa_rm_resource *depends_on,
+ bool userspace_dep);
+
+int ipa_rm_resource_delete_dependency(struct ipa_rm_resource *resource,
+ struct ipa_rm_resource *depends_on,
+ bool userspace_dep);
+
+int ipa_rm_resource_producer_request(struct ipa_rm_resource_prod *producer);
+
+int ipa_rm_resource_producer_release(struct ipa_rm_resource_prod *producer);
+
+int ipa_rm_resource_consumer_request(struct ipa_rm_resource_cons *consumer,
+ u32 needed_bw,
+ bool inc_usage_count,
+ bool wake_client);
+
+int ipa_rm_resource_consumer_release(struct ipa_rm_resource_cons *consumer,
+ u32 needed_bw,
+ bool dec_usage_count);
+
+int ipa_rm_resource_set_perf_profile(struct ipa_rm_resource *resource,
+ struct ipa_rm_perf_profile *profile);
+
+void ipa_rm_resource_consumer_handle_cb(struct ipa_rm_resource_cons *consumer,
+ enum ipa_rm_event event);
+
+void ipa_rm_resource_producer_notify_clients(
+ struct ipa_rm_resource_prod *producer,
+ enum ipa_rm_event event,
+ bool notify_registered_only);
+
+int ipa_rm_resource_producer_print_stat(
+ struct ipa_rm_resource *resource,
+ char *buf,
+ int size);
+
+int ipa_rm_resource_consumer_request_work(struct ipa_rm_resource_cons *consumer,
+ enum ipa_rm_resource_state prev_state,
+ u32 needed_bw,
+ bool notify_completion);
+
+int ipa_rm_resource_consumer_release_work(
+ struct ipa_rm_resource_cons *consumer,
+ enum ipa_rm_resource_state prev_state,
+ bool notify_completion);
+
+#endif /* _IPA_RM_RESOURCE_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h b/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h
new file mode 100644
index 0000000..ae6cfc4
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa_mhi.h>
+#include <linux/ipa_qmi_service_v01.h>
+
+#ifndef _IPA_UC_OFFLOAD_COMMON_I_H_
+#define _IPA_UC_OFFLOAD_COMMON_I_H_
+
+int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp);
+int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+
+#endif /* _IPA_UC_OFFLOAD_COMMON_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/Makefile b/drivers/platform/msm/ipa/ipa_v2/Makefile
new file mode 100644
index 0000000..69b8a4c
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_IPA) += ipat.o
+ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
+ ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
+ ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o
+
+obj-$(CONFIG_RMNET_IPA) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
new file mode 100644
index 0000000..037231c
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -0,0 +1,4812 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/compat.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/fs.h>
+#include <linux/genalloc.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/rbtree.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/qcom_iommu.h>
+#include <linux/time.h>
+#include <linux/hashtable.h>
+#include <linux/hash.h>
+#include "ipa_i.h"
+#include "../ipa_rm_i.h"
+
+#define CREATE_TRACE_POINTS
+#include "ipa_trace.h"
+
+#define IPA_SUMMING_THRESHOLD (0x10)
+#define IPA_PIPE_MEM_START_OFST (0x0)
+#define IPA_PIPE_MEM_SIZE (0x0)
+#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
+ x == IPA_MODE_MOBILE_AP_WAN || \
+ x == IPA_MODE_MOBILE_AP_WLAN)
+#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
+#define IPA_A5_MUX_HEADER_LENGTH (8)
+#define IPA_ROUTING_RULE_BYTE_SIZE (4)
+#define IPA_BAM_CNFG_BITS_VALv1_1 (0x7FFFE004)
+#define IPA_BAM_CNFG_BITS_VALv2_0 (0xFFFFE004)
+#define IPA_STATUS_CLEAR_OFST (0x3f28)
+#define IPA_STATUS_CLEAR_SIZE (32)
+
+#define IPA_AGGR_MAX_STR_LENGTH (10)
+
+#define CLEANUP_TAG_PROCESS_TIMEOUT 150
+
+#define IPA2_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
+
+#define IPA2_ACTIVE_CLIENT_LOG_TYPE_EP 0
+#define IPA2_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
+#define IPA2_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
+#define IPA2_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
+
+#define MAX_POLLING_ITERATION 40
+#define MIN_POLLING_ITERATION 1
+#define ONE_MSEC 1
+
+#define IPA_AGGR_STR_IN_BYTES(str) \
+ (strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
+
+#define IPA_SPS_PROD_TIMEOUT_MSEC 100
+
+#ifdef CONFIG_COMPAT
+#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_HDR, \
+ compat_uptr_t)
+#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_HDR, \
+ compat_uptr_t)
+#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_RT_RULE, \
+ compat_uptr_t)
+#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_RT_RULE, \
+ compat_uptr_t)
+#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_FLT_RULE, \
+ compat_uptr_t)
+#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_FLT_RULE, \
+ compat_uptr_t)
+#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_RT_TBL, \
+ compat_uptr_t)
+#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_COPY_HDR, \
+ compat_uptr_t)
+#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF, \
+ compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF_TX_PROPS, \
+ compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF_RX_PROPS, \
+ compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
+ compat_uptr_t)
+#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_HDR, \
+ compat_uptr_t)
+#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ALLOC_NAT_MEM, \
+ compat_uptr_t)
+#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_V4_INIT_NAT, \
+ compat_uptr_t)
+#define IPA_IOC_NAT_DMA32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_NAT_DMA, \
+ compat_uptr_t)
+#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_V4_DEL_NAT, \
+ compat_uptr_t)
+#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_NAT_OFFSET, \
+ compat_uptr_t)
+#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_PULL_MSG, \
+ compat_uptr_t)
+#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_RM_ADD_DEPENDENCY, \
+ compat_uptr_t)
+#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_RM_DEL_DEPENDENCY, \
+ compat_uptr_t)
+#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GENERATE_FLT_EQ, \
+ compat_uptr_t)
+#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_RT_TBL_INDEX, \
+ compat_uptr_t)
+#define IPA_IOC_WRITE_QMAPID32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_WRITE_QMAPID, \
+ compat_uptr_t)
+#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_MDFY_FLT_RULE, \
+ compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
+ compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
+ compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
+ compat_uptr_t)
+#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_HDR_PROC_CTX, \
+ compat_uptr_t)
+#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_HDR_PROC_CTX, \
+ compat_uptr_t)
+#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_MDFY_RT_RULE, \
+ compat_uptr_t)
+
+/**
+ * struct ipa_ioc_nat_alloc_mem32 - nat table memory allocation
+ * properties
+ * @dev_name: input parameter, the name of table
+ * @size: input parameter, size of table in bytes
+ * @offset: output parameter, offset into page in case of system memory
+ */
+struct ipa_ioc_nat_alloc_mem32 {
+ char dev_name[IPA_RESOURCE_NAME_MAX];
+ compat_size_t size;
+ compat_off_t offset;
+};
+#endif
+
+static void ipa_start_tag_process(struct work_struct *work);
+static DECLARE_WORK(ipa_tag_work, ipa_start_tag_process);
+
+static void ipa_sps_release_resource(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa_sps_release_resource_work,
+ ipa_sps_release_resource);
+
+static struct ipa_plat_drv_res ipa_res = {0, };
+
+struct msm_bus_scale_pdata *bus_scale_table;
+
+static struct clk *ipa_clk_src;
+static struct clk *ipa_clk;
+static struct clk *smmu_clk;
+static struct clk *sys_noc_ipa_axi_clk;
+static struct clk *ipa_cnoc_clk;
+static struct clk *ipa_inactivity_clk;
+
+struct ipa_context *ipa_ctx;
+static struct device *master_dev;
+struct platform_device *ipa_pdev;
+static struct {
+ bool present;
+ bool arm_smmu;
+ bool disable_htw;
+ bool fast_map;
+ bool s1_bypass;
+ u32 ipa_base;
+ u32 ipa_size;
+} smmu_info;
+
+static char *active_clients_table_buf;
+
+int ipa2_active_clients_log_print_buffer(char *buf, int size)
+{
+ int i;
+ int nbytes;
+ int cnt = 0;
+ int start_idx;
+ int end_idx;
+
+ start_idx = (ipa_ctx->ipa2_active_clients_logging.log_tail + 1) %
+ IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+ end_idx = ipa_ctx->ipa2_active_clients_logging.log_head;
+ for (i = start_idx; i != end_idx;
+ i = (i + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) {
+ nbytes = scnprintf(buf + cnt, size - cnt, "%s\n",
+ ipa_ctx->ipa2_active_clients_logging
+ .log_buffer[i]);
+ cnt += nbytes;
+ }
+
+ return cnt;
+}
+
+int ipa2_active_clients_log_print_table(char *buf, int size)
+{
+ int i;
+ struct ipa2_active_client_htable_entry *iterator;
+ int cnt = 0;
+
+ cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n");
+ hash_for_each(ipa_ctx->ipa2_active_clients_logging.htable, i,
+ iterator, list) {
+ switch (iterator->type) {
+ case IPA2_ACTIVE_CLIENT_LOG_TYPE_EP:
+ cnt += scnprintf(buf + cnt, size - cnt,
+ "%-40s %-3d ENDPOINT\n",
+ iterator->id_string, iterator->count);
+ break;
+ case IPA2_ACTIVE_CLIENT_LOG_TYPE_SIMPLE:
+ cnt += scnprintf(buf + cnt, size - cnt,
+ "%-40s %-3d SIMPLE\n",
+ iterator->id_string, iterator->count);
+ break;
+ case IPA2_ACTIVE_CLIENT_LOG_TYPE_RESOURCE:
+ cnt += scnprintf(buf + cnt, size - cnt,
+ "%-40s %-3d RESOURCE\n",
+ iterator->id_string, iterator->count);
+ break;
+ case IPA2_ACTIVE_CLIENT_LOG_TYPE_SPECIAL:
+ cnt += scnprintf(buf + cnt, size - cnt,
+ "%-40s %-3d SPECIAL\n",
+ iterator->id_string, iterator->count);
+ break;
+ default:
+ IPAERR("Trying to print illegal active_clients type");
+ break;
+ }
+ }
+ cnt += scnprintf(buf + cnt, size - cnt,
+ "\nTotal active clients count: %d\n",
+ ipa_ctx->ipa_active_clients.cnt);
+
+ return cnt;
+}
+
+static int ipa2_active_clients_panic_notifier(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ ipa_active_clients_lock();
+ ipa2_active_clients_log_print_table(active_clients_table_buf,
+ IPA2_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
+ IPAERR("%s", active_clients_table_buf);
+ ipa_active_clients_unlock();
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ipa2_active_clients_panic_blk = {
+ .notifier_call = ipa2_active_clients_panic_notifier,
+};
+
+static int ipa2_active_clients_log_insert(const char *string)
+{
+ int head;
+ int tail;
+
+ head = ipa_ctx->ipa2_active_clients_logging.log_head;
+ tail = ipa_ctx->ipa2_active_clients_logging.log_tail;
+
+ if (!ipa_ctx->ipa2_active_clients_logging.log_rdy)
+ return -EPERM;
+ memset(ipa_ctx->ipa2_active_clients_logging.log_buffer[head], '_',
+ IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN);
+ strlcpy(ipa_ctx->ipa2_active_clients_logging.log_buffer[head], string,
+ (size_t)IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN);
+ head = (head + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+ if (tail == head)
+ tail = (tail + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+
+ ipa_ctx->ipa2_active_clients_logging.log_tail = tail;
+ ipa_ctx->ipa2_active_clients_logging.log_head = head;
+
+ return 0;
+}
+
+static int ipa2_active_clients_log_init(void)
+{
+ int i;
+
+ ipa_ctx->ipa2_active_clients_logging.log_buffer[0] = kzalloc(
+ IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES *
+ sizeof(char[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN]),
+ GFP_KERNEL);
+ active_clients_table_buf = kzalloc(sizeof(
+ char[IPA2_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL);
+ if (ipa_ctx->ipa2_active_clients_logging.log_buffer == NULL) {
+ IPAERR("Active Clients Logging memory allocation failed");
+ goto bail;
+ }
+ for (i = 0; i < IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; i++) {
+ ipa_ctx->ipa2_active_clients_logging.log_buffer[i] =
+ ipa_ctx->ipa2_active_clients_logging.log_buffer[0] +
+ (IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN * i);
+ }
+ ipa_ctx->ipa2_active_clients_logging.log_head = 0;
+ ipa_ctx->ipa2_active_clients_logging.log_tail =
+ IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+ hash_init(ipa_ctx->ipa2_active_clients_logging.htable);
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &ipa2_active_clients_panic_blk);
+ ipa_ctx->ipa2_active_clients_logging.log_rdy = 1;
+
+ return 0;
+
+bail:
+ return -ENOMEM;
+}
+
+void ipa2_active_clients_log_clear(void)
+{
+ ipa_active_clients_lock();
+ ipa_ctx->ipa2_active_clients_logging.log_head = 0;
+ ipa_ctx->ipa2_active_clients_logging.log_tail =
+ IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+ ipa_active_clients_unlock();
+}
+
+static void ipa2_active_clients_log_destroy(void)
+{
+ ipa_ctx->ipa2_active_clients_logging.log_rdy = 0;
+ kfree(ipa_ctx->ipa2_active_clients_logging.log_buffer[0]);
+ ipa_ctx->ipa2_active_clients_logging.log_head = 0;
+ ipa_ctx->ipa2_active_clients_logging.log_tail =
+ IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+}
+
+enum ipa_smmu_cb_type {
+ IPA_SMMU_CB_AP,
+ IPA_SMMU_CB_WLAN,
+ IPA_SMMU_CB_UC,
+ IPA_SMMU_CB_MAX
+
+};
+
+static struct ipa_smmu_cb_ctx smmu_cb[IPA_SMMU_CB_MAX];
+
+struct iommu_domain *ipa2_get_smmu_domain(void)
+{
+ if (smmu_cb[IPA_SMMU_CB_AP].valid)
+ return smmu_cb[IPA_SMMU_CB_AP].mapping->domain;
+
+ IPAERR("CB not valid\n");
+
+ return NULL;
+}
+
+struct iommu_domain *ipa2_get_uc_smmu_domain(void)
+{
+ if (smmu_cb[IPA_SMMU_CB_UC].valid)
+ return smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
+
+ IPAERR("CB not valid\n");
+
+ return NULL;
+}
+
+struct iommu_domain *ipa2_get_wlan_smmu_domain(void)
+{
+ if (smmu_cb[IPA_SMMU_CB_WLAN].valid)
+ return smmu_cb[IPA_SMMU_CB_WLAN].iommu;
+
+ IPAERR("CB not valid\n");
+
+ return NULL;
+}
+
+struct device *ipa2_get_dma_dev(void)
+{
+ return ipa_ctx->pdev;
+}
+
+/**
+ * ipa2_get_smmu_ctx()- Return the smmu context
+ *
+ * Return value: pointer to smmu context address
+ */
+struct ipa_smmu_cb_ctx *ipa2_get_smmu_ctx(void)
+{
+ return &smmu_cb[IPA_SMMU_CB_AP];
+}
+
+
+/**
+ * ipa2_get_wlan_smmu_ctx()- Return the wlan smmu context
+ *
+ * Return value: pointer to smmu context address
+ */
+struct ipa_smmu_cb_ctx *ipa2_get_wlan_smmu_ctx(void)
+{
+ return &smmu_cb[IPA_SMMU_CB_WLAN];
+}
+
+/**
+ * ipa2_get_uc_smmu_ctx()- Return the uc smmu context
+ *
+ * Return value: pointer to smmu context address
+ */
+struct ipa_smmu_cb_ctx *ipa2_get_uc_smmu_ctx(void)
+{
+ return &smmu_cb[IPA_SMMU_CB_UC];
+}
+
+static int ipa_open(struct inode *inode, struct file *filp)
+{
+ struct ipa_context *ctx = NULL;
+
+ IPADBG("ENTER\n");
+ ctx = container_of(inode->i_cdev, struct ipa_context, cdev);
+ filp->private_data = ctx;
+
+ return 0;
+}
+
+/**
+* ipa_flow_control() - Enable/Disable flow control on a particular client.
+* Return codes:
+* None
+*/
+void ipa_flow_control(enum ipa_client_type ipa_client,
+ bool enable, uint32_t qmap_id)
+{
+ struct ipa_ep_cfg_ctrl ep_ctrl = {0};
+ int ep_idx;
+ struct ipa_ep_context *ep;
+
+ /* Check if tethered flow control is needed or not.*/
+ if (!ipa_ctx->tethered_flow_control) {
+ IPADBG("Apps flow control is not needed\n");
+ return;
+ }
+
+ /* Check if ep is valid. */
+ ep_idx = ipa2_get_ep_mapping(ipa_client);
+ if (ep_idx == -1) {
+ IPADBG("Invalid IPA client\n");
+ return;
+ }
+
+ ep = &ipa_ctx->ep[ep_idx];
+ if (!ep->valid || (ep->client != IPA_CLIENT_USB_PROD)) {
+ IPADBG("EP not valid/Not applicable for client.\n");
+ return;
+ }
+
+ spin_lock(&ipa_ctx->disconnect_lock);
+ /* Check if the QMAP_ID matches. */
+ if (ep->cfg.meta.qmap_id != qmap_id) {
+ IPADBG("Flow control ind not for same flow: %u %u\n",
+ ep->cfg.meta.qmap_id, qmap_id);
+ spin_unlock(&ipa_ctx->disconnect_lock);
+ return;
+ }
+ if (!ep->disconnect_in_progress) {
+ if (enable) {
+ IPADBG("Enabling Flow\n");
+ ep_ctrl.ipa_ep_delay = false;
+ IPA_STATS_INC_CNT(ipa_ctx->stats.flow_enable);
+ } else {
+ IPADBG("Disabling Flow\n");
+ ep_ctrl.ipa_ep_delay = true;
+ IPA_STATS_INC_CNT(ipa_ctx->stats.flow_disable);
+ }
+ ep_ctrl.ipa_ep_suspend = false;
+ ipa2_cfg_ep_ctrl(ep_idx, &ep_ctrl);
+ } else {
+ IPADBG("EP disconnect is in progress\n");
+ }
+ spin_unlock(&ipa_ctx->disconnect_lock);
+}
+
+static void ipa_wan_msg_free_cb(void *buff, u32 len, u32 type)
+{
+ if (!buff) {
+ IPAERR("Null buffer\n");
+ return;
+ }
+
+ if (type != WAN_UPSTREAM_ROUTE_ADD &&
+ type != WAN_UPSTREAM_ROUTE_DEL &&
+ type != WAN_EMBMS_CONNECT) {
+ IPAERR("Wrong type given. buff %p type %d\n", buff, type);
+ return;
+ }
+
+ kfree(buff);
+}
+
+static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type)
+{
+ int retval;
+ struct ipa_wan_msg *wan_msg;
+ struct ipa_msg_meta msg_meta;
+
+ wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
+ if (!wan_msg) {
+ IPAERR("no memory\n");
+ return -ENOMEM;
+ }
+
+ if (copy_from_user((u8 *)wan_msg, (u8 *)usr_param,
+ sizeof(struct ipa_wan_msg))) {
+ kfree(wan_msg);
+ return -EFAULT;
+ }
+
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ msg_meta.msg_type = msg_type;
+ msg_meta.msg_len = sizeof(struct ipa_wan_msg);
+ retval = ipa2_send_msg(&msg_meta, wan_msg, ipa_wan_msg_free_cb);
+ if (retval) {
+ IPAERR("ipa2_send_msg failed: %d\n", retval);
+ kfree(wan_msg);
+ return retval;
+ }
+
+ return 0;
+}
+
+
+static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int retval = 0;
+ u32 pyld_sz;
+ u8 header[128] = { 0 };
+ u8 *param = NULL;
+ struct ipa_ioc_nat_alloc_mem nat_mem;
+ struct ipa_ioc_v4_nat_init nat_init;
+ struct ipa_ioc_v4_nat_del nat_del;
+ struct ipa_ioc_rm_dependency rm_depend;
+ size_t sz;
+
+ IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
+
+ if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
+ return -ENOTTY;
+ if (_IOC_NR(cmd) >= IPA_IOCTL_MAX)
+ return -ENOTTY;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ switch (cmd) {
+ case IPA_IOC_ALLOC_NAT_MEM:
+ if (copy_from_user((u8 *)&nat_mem, (u8 *)arg,
+ sizeof(struct ipa_ioc_nat_alloc_mem))) {
+ retval = -EFAULT;
+ break;
+ }
+ /* null terminate the string */
+ nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
+
+ if (ipa2_allocate_nat_device(&nat_mem)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, (u8 *)&nat_mem,
+ sizeof(struct ipa_ioc_nat_alloc_mem))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_V4_INIT_NAT:
+ if (copy_from_user((u8 *)&nat_init, (u8 *)arg,
+ sizeof(struct ipa_ioc_v4_nat_init))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa2_nat_init_cmd(&nat_init)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_NAT_DMA:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_nat_dma_cmd))) {
+ retval = -EFAULT;
+ break;
+ }
+
+ pyld_sz =
+ sizeof(struct ipa_ioc_nat_dma_cmd) +
+ ((struct ipa_ioc_nat_dma_cmd *)header)->entries *
+ sizeof(struct ipa_ioc_nat_dma_one);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (ipa2_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_V4_DEL_NAT:
+ if (copy_from_user((u8 *)&nat_del, (u8 *)arg,
+ sizeof(struct ipa_ioc_v4_nat_del))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa2_nat_del_cmd(&nat_del)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_ADD_HDR:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_add_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_hdr) +
+ ((struct ipa_ioc_add_hdr *)header)->num_hdrs *
+ sizeof(struct ipa_hdr_add);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa2_add_hdr((struct ipa_ioc_add_hdr *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_DEL_HDR:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_del_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_del_hdr) +
+ ((struct ipa_ioc_del_hdr *)header)->num_hdls *
+ sizeof(struct ipa_hdr_del);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa2_del_hdr((struct ipa_ioc_del_hdr *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_ADD_RT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_add_rt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_rt_rule) +
+ ((struct ipa_ioc_add_rt_rule *)header)->num_rules *
+ sizeof(struct ipa_rt_rule_add);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa2_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_MDFY_RT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_mdfy_rt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_mdfy_rt_rule) +
+ ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules *
+ sizeof(struct ipa_rt_rule_mdfy);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa2_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_DEL_RT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_del_rt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_del_rt_rule) +
+ ((struct ipa_ioc_del_rt_rule *)header)->num_hdls *
+ sizeof(struct ipa_rt_rule_del);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa2_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_ADD_FLT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_add_flt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_flt_rule) +
+ ((struct ipa_ioc_add_flt_rule *)header)->num_rules *
+ sizeof(struct ipa_flt_rule_add);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa2_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_DEL_FLT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_del_flt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_del_flt_rule) +
+ ((struct ipa_ioc_del_flt_rule *)header)->num_hdls *
+ sizeof(struct ipa_flt_rule_del);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa2_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_MDFY_FLT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_mdfy_flt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_mdfy_flt_rule) +
+ ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules *
+ sizeof(struct ipa_flt_rule_mdfy);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa2_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_COMMIT_HDR:
+ retval = ipa2_commit_hdr();
+ break;
+ case IPA_IOC_RESET_HDR:
+ retval = ipa2_reset_hdr();
+ break;
+ case IPA_IOC_COMMIT_RT:
+ retval = ipa2_commit_rt(arg);
+ break;
+ case IPA_IOC_RESET_RT:
+ retval = ipa2_reset_rt(arg);
+ break;
+ case IPA_IOC_COMMIT_FLT:
+ retval = ipa2_commit_flt(arg);
+ break;
+ case IPA_IOC_RESET_FLT:
+ retval = ipa2_reset_flt(arg);
+ break;
+ case IPA_IOC_GET_RT_TBL:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_get_rt_tbl))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa2_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, header,
+ sizeof(struct ipa_ioc_get_rt_tbl))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_PUT_RT_TBL:
+ retval = ipa2_put_rt_tbl(arg);
+ break;
+ case IPA_IOC_GET_HDR:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_get_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa2_get_hdr((struct ipa_ioc_get_hdr *)header)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, header,
+ sizeof(struct ipa_ioc_get_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_PUT_HDR:
+ retval = ipa2_put_hdr(arg);
+ break;
+ case IPA_IOC_SET_FLT:
+ retval = ipa_cfg_filter(arg);
+ break;
+ case IPA_IOC_COPY_HDR:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_copy_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa2_copy_hdr((struct ipa_ioc_copy_hdr *)header)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, header,
+ sizeof(struct ipa_ioc_copy_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_QUERY_INTF:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_query_intf))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_query_intf((struct ipa_ioc_query_intf *)header)) {
+ retval = -1;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, header,
+ sizeof(struct ipa_ioc_query_intf))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_QUERY_INTF_TX_PROPS:
+ sz = sizeof(struct ipa_ioc_query_intf_tx_props);
+ if (copy_from_user(header, (u8 *)arg, sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (((struct ipa_ioc_query_intf_tx_props *)header)->num_tx_props
+ > IPA_NUM_PROPS_MAX) {
+ retval = -EFAULT;
+ break;
+ }
+
+ pyld_sz = sz + ((struct ipa_ioc_query_intf_tx_props *)
+ header)->num_tx_props *
+ sizeof(struct ipa_ioc_tx_intf_prop);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_query_intf_tx_props(
+ (struct ipa_ioc_query_intf_tx_props *)param)) {
+ retval = -1;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_QUERY_INTF_RX_PROPS:
+ sz = sizeof(struct ipa_ioc_query_intf_rx_props);
+ if (copy_from_user(header, (u8 *)arg, sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (((struct ipa_ioc_query_intf_rx_props *)header)->num_rx_props
+ > IPA_NUM_PROPS_MAX) {
+ retval = -EFAULT;
+ break;
+ }
+
+ pyld_sz = sz + ((struct ipa_ioc_query_intf_rx_props *)
+ header)->num_rx_props *
+ sizeof(struct ipa_ioc_rx_intf_prop);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_query_intf_rx_props(
+ (struct ipa_ioc_query_intf_rx_props *)param)) {
+ retval = -1;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_QUERY_INTF_EXT_PROPS:
+ sz = sizeof(struct ipa_ioc_query_intf_ext_props);
+ if (copy_from_user(header, (u8 *)arg, sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (((struct ipa_ioc_query_intf_ext_props *)
+ header)->num_ext_props > IPA_NUM_PROPS_MAX) {
+ retval = -EFAULT;
+ break;
+ }
+
+ pyld_sz = sz + ((struct ipa_ioc_query_intf_ext_props *)
+ header)->num_ext_props *
+ sizeof(struct ipa_ioc_ext_intf_prop);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_query_intf_ext_props(
+ (struct ipa_ioc_query_intf_ext_props *)param)) {
+ retval = -1;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_PULL_MSG:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_msg_meta))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz = sizeof(struct ipa_msg_meta) +
+ ((struct ipa_msg_meta *)header)->msg_len;
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_pull_msg((struct ipa_msg_meta *)param,
+ (char *)param + sizeof(struct ipa_msg_meta),
+ ((struct ipa_msg_meta *)param)->msg_len) !=
+ ((struct ipa_msg_meta *)param)->msg_len) {
+ retval = -1;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_RM_ADD_DEPENDENCY:
+ if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
+ sizeof(struct ipa_ioc_rm_dependency))) {
+ retval = -EFAULT;
+ break;
+ }
+ retval = ipa_rm_add_dependency_from_ioctl(
+ rm_depend.resource_name, rm_depend.depends_on_name);
+ break;
+ case IPA_IOC_RM_DEL_DEPENDENCY:
+ if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
+ sizeof(struct ipa_ioc_rm_dependency))) {
+ retval = -EFAULT;
+ break;
+ }
+ retval = ipa_rm_delete_dependency_from_ioctl(
+ rm_depend.resource_name, rm_depend.depends_on_name);
+ break;
+ case IPA_IOC_GENERATE_FLT_EQ:
+ {
+ struct ipa_ioc_generate_flt_eq flt_eq;
+
+ if (copy_from_user(&flt_eq, (u8 *)arg,
+ sizeof(struct ipa_ioc_generate_flt_eq))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_generate_flt_eq(flt_eq.ip, &flt_eq.attrib,
+ &flt_eq.eq_attrib)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, &flt_eq,
+ sizeof(struct ipa_ioc_generate_flt_eq))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ }
+ case IPA_IOC_QUERY_EP_MAPPING:
+ {
+ retval = ipa2_get_ep_mapping(arg);
+ break;
+ }
+ case IPA_IOC_QUERY_RT_TBL_INDEX:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa2_query_rt_index(
+ (struct ipa_ioc_get_rt_tbl_indx *)header)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, header,
+ sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_WRITE_QMAPID:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_write_qmapid))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa2_write_qmap_id((struct ipa_ioc_write_qmapid *)header)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, header,
+ sizeof(struct ipa_ioc_write_qmapid))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD:
+ retval = ipa_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD);
+ if (retval) {
+ IPAERR("ipa_send_wan_msg failed: %d\n", retval);
+ break;
+ }
+ break;
+ case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL:
+ retval = ipa_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL);
+ if (retval) {
+ IPAERR("ipa_send_wan_msg failed: %d\n", retval);
+ break;
+ }
+ break;
+ case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED:
+ retval = ipa_send_wan_msg(arg, WAN_EMBMS_CONNECT);
+ if (retval) {
+ IPAERR("ipa_send_wan_msg failed: %d\n", retval);
+ break;
+ }
+ break;
+ case IPA_IOC_ADD_HDR_PROC_CTX:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_add_hdr_proc_ctx))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_hdr_proc_ctx) +
+ ((struct ipa_ioc_add_hdr_proc_ctx *)header)->num_proc_ctxs *
+ sizeof(struct ipa_hdr_proc_ctx_add);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa2_add_hdr_proc_ctx(
+ (struct ipa_ioc_add_hdr_proc_ctx *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_DEL_HDR_PROC_CTX:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_del_hdr_proc_ctx))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_del_hdr_proc_ctx) +
+ ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls *
+ sizeof(struct ipa_hdr_proc_ctx_del);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa2_del_hdr_proc_ctx(
+ (struct ipa_ioc_del_hdr_proc_ctx *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_GET_HW_VERSION:
+ pyld_sz = sizeof(enum ipa_hw_type);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ memcpy(param, &ipa_ctx->ipa_hw_type, pyld_sz);
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ default: /* redundant, as cmd was checked against MAXNR */
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return -ENOTTY;
+ }
+ kfree(param);
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return retval;
+}
+
+/**
+* ipa_setup_dflt_rt_tables() - Setup default routing tables
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate memory
+* -EPERM: failed to add the tables
+*/
+int ipa_setup_dflt_rt_tables(void)
+{
+ struct ipa_ioc_add_rt_rule *rt_rule;
+ struct ipa_rt_rule_add *rt_rule_entry;
+
+ rt_rule =
+ kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
+ sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
+ if (!rt_rule) {
+ IPAERR("fail to alloc mem\n");
+ return -ENOMEM;
+ }
+ /* setup a default v4 route to point to Apps */
+ rt_rule->num_rules = 1;
+ rt_rule->commit = 1;
+ rt_rule->ip = IPA_IP_v4;
+ strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
+ IPA_RESOURCE_NAME_MAX);
+
+ rt_rule_entry = &rt_rule->rules[0];
+ rt_rule_entry->at_rear = 1;
+ rt_rule_entry->rule.dst = IPA_CLIENT_APPS_LAN_CONS;
+ rt_rule_entry->rule.hdr_hdl = ipa_ctx->excp_hdr_hdl;
+
+ if (ipa2_add_rt_rule(rt_rule)) {
+ IPAERR("fail to add dflt v4 rule\n");
+ kfree(rt_rule);
+ return -EPERM;
+ }
+ IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+ ipa_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+ /* setup a default v6 route to point to A5 */
+ rt_rule->ip = IPA_IP_v6;
+ if (ipa2_add_rt_rule(rt_rule)) {
+ IPAERR("fail to add dflt v6 rule\n");
+ kfree(rt_rule);
+ return -EPERM;
+ }
+ IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+ ipa_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+ /*
+ * because these tables are the very first to be added, they will both
+ * have the same index (0) which is essential for programming the
+ * "route" end-point config
+ */
+
+ kfree(rt_rule);
+
+ return 0;
+}
+
+static int ipa_setup_exception_path(void)
+{
+ struct ipa_ioc_add_hdr *hdr;
+ struct ipa_hdr_add *hdr_entry;
+ struct ipa_route route = { 0 };
+ int ret;
+
+ /* install the basic exception header */
+ hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
+ sizeof(struct ipa_hdr_add), GFP_KERNEL);
+ if (!hdr) {
+ IPAERR("fail to alloc exception hdr\n");
+ return -ENOMEM;
+ }
+ hdr->num_hdrs = 1;
+ hdr->commit = 1;
+ hdr_entry = &hdr->hdr[0];
+
+ if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) {
+ strlcpy(hdr_entry->name, IPA_A5_MUX_HDR_NAME,
+ IPA_RESOURCE_NAME_MAX);
+ /* set template for the A5_MUX hdr in header addition block */
+ hdr_entry->hdr_len = IPA_A5_MUX_HEADER_LENGTH;
+ } else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) {
+ strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME,
+ IPA_RESOURCE_NAME_MAX);
+ hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
+ } else {
+ WARN_ON(1);
+ }
+
+ if (ipa2_add_hdr(hdr)) {
+ IPAERR("fail to add exception hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ if (hdr_entry->status) {
+ IPAERR("fail to add exception hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ipa_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
+
+ /* set the route register to pass exception packets to Apps */
+ route.route_def_pipe = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+ route.route_frag_def_pipe = ipa2_get_ep_mapping(
+ IPA_CLIENT_APPS_LAN_CONS);
+ route.route_def_hdr_table = !ipa_ctx->hdr_tbl_lcl;
+
+ if (ipa_cfg_route(&route)) {
+ IPAERR("fail to add exception hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ kfree(hdr);
+ return ret;
+}
+
+static int ipa_init_smem_region(int memory_region_size,
+ int memory_region_offset)
+{
+ struct ipa_hw_imm_cmd_dma_shared_mem cmd;
+ struct ipa_desc desc;
+ struct ipa_mem_buffer mem;
+ int rc;
+
+ if (memory_region_size == 0)
+ return 0;
+
+ memset(&desc, 0, sizeof(desc));
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&mem, 0, sizeof(mem));
+
+ mem.size = memory_region_size;
+ mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size,
+ &mem.phys_base, GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
+ return -ENOMEM;
+ }
+
+ memset(mem.base, 0, mem.size);
+ cmd.size = mem.size;
+ cmd.system_addr = mem.phys_base;
+ cmd.local_addr = ipa_ctx->smem_restricted_bytes +
+ memory_region_offset;
+ desc.opcode = IPA_DMA_SHARED_MEM;
+ desc.pyld = &cmd;
+ desc.len = sizeof(cmd);
+ desc.type = IPA_IMM_CMD_DESC;
+
+ rc = ipa_send_cmd(1, &desc);
+ if (rc) {
+ IPAERR("failed to send immediate command (error %d)\n", rc);
+ rc = -EFAULT;
+ }
+
+ dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
+ mem.phys_base);
+
+ return rc;
+}
+
+/**
+* ipa_init_q6_smem() - Initialize Q6 general memory and
+* header memory regions in IPA.
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate dma memory
+* -EFAULT: failed to send IPA command to initialize the memory
+*/
+int ipa_init_q6_smem(void)
+{
+ int rc;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ if (ipa_ctx->ipa_hw_type == IPA_HW_v2_0)
+ rc = ipa_init_smem_region(IPA_MEM_PART(modem_size) -
+ IPA_MEM_RAM_MODEM_NETWORK_STATS_SIZE,
+ IPA_MEM_PART(modem_ofst));
+ else
+ rc = ipa_init_smem_region(IPA_MEM_PART(modem_size),
+ IPA_MEM_PART(modem_ofst));
+
+ if (rc) {
+ IPAERR("failed to initialize Modem RAM memory\n");
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return rc;
+ }
+
+ rc = ipa_init_smem_region(IPA_MEM_PART(modem_hdr_size),
+ IPA_MEM_PART(modem_hdr_ofst));
+ if (rc) {
+ IPAERR("failed to initialize Modem HDRs RAM memory\n");
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return rc;
+ }
+
+ rc = ipa_init_smem_region(IPA_MEM_PART(modem_hdr_proc_ctx_size),
+ IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
+ if (rc) {
+ IPAERR("failed to initialize Modem proc ctx RAM memory\n");
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return rc;
+ }
+
+ rc = ipa_init_smem_region(IPA_MEM_PART(modem_comp_decomp_size),
+ IPA_MEM_PART(modem_comp_decomp_ofst));
+ if (rc) {
+ IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n");
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return rc;
+ }
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return rc;
+}
+
+static void ipa_free_buffer(void *user1, int user2)
+{
+ kfree(user1);
+}
+
+int ipa_q6_pipe_delay(bool zip_pipes)
+{
+ u32 reg_val = 0;
+ int client_idx;
+ int ep_idx;
+
+ /* For ZIP pipes, processing is done in AFTER_SHUTDOWN callback. */
+ for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+ /* Skip the processing for non Q6 pipes. */
+ if (!IPA_CLIENT_IS_Q6_PROD(client_idx))
+ continue;
+ /* Skip the processing for NON-ZIP pipes. */
+ else if (zip_pipes && IPA_CLIENT_IS_Q6_NON_ZIP_PROD(client_idx))
+ continue;
+ /* Skip the processing for ZIP pipes. */
+ else if (!zip_pipes && IPA_CLIENT_IS_Q6_ZIP_PROD(client_idx))
+ continue;
+
+ ep_idx = ipa2_get_ep_mapping(client_idx);
+ if (ep_idx == -1)
+ continue;
+
+ IPA_SETFIELD_IN_REG(reg_val, 1,
+ IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT,
+ IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_CTRL_N_OFST(ep_idx), reg_val);
+ }
+
+ return 0;
+}
+
+int ipa_q6_monitor_holb_mitigation(bool enable)
+{
+ int ep_idx;
+ int client_idx;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+ if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx)) {
+ ep_idx = ipa2_get_ep_mapping(client_idx);
+ if (ep_idx == -1)
+ continue;
+ /* Send a command to Uc to enable/disable
+ * holb monitoring.
+ */
+ ipa_uc_monitor_holb(client_idx, enable);
+ }
+ }
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+static int ipa_q6_avoid_holb(bool zip_pipes)
+{
+ u32 reg_val;
+ int ep_idx;
+ int client_idx;
+ struct ipa_ep_cfg_ctrl avoid_holb;
+
+ memset(&avoid_holb, 0, sizeof(avoid_holb));
+ avoid_holb.ipa_ep_suspend = true;
+
+ /* For ZIP pipes, processing is done in AFTER_SHUTDOWN callback. */
+ for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+ /* Skip the processing for non Q6 pipes. */
+ if (!IPA_CLIENT_IS_Q6_CONS(client_idx))
+ continue;
+ /* Skip the processing for NON-ZIP pipes. */
+ else if (zip_pipes && IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx))
+ continue;
+ /* Skip the processing for ZIP pipes. */
+ else if (!zip_pipes && IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx))
+ continue;
+
+ ep_idx = ipa2_get_ep_mapping(client_idx);
+ if (ep_idx == -1)
+ continue;
+
+ /*
+ * ipa2_cfg_ep_holb is not used here because we are
+ * setting HOLB on Q6 pipes, and from APPS perspective
+ * they are not valid, therefore, the above function
+ * will fail.
+ */
+ reg_val = 0;
+ IPA_SETFIELD_IN_REG(reg_val, 0,
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_SHFT,
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(ep_idx),
+ reg_val);
+
+ reg_val = 0;
+ IPA_SETFIELD_IN_REG(reg_val, 1,
+ IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_SHFT,
+ IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(ep_idx),
+ reg_val);
+
+ ipa2_cfg_ep_ctrl(ep_idx, &avoid_holb);
+ }
+
+ return 0;
+}
+
+static u32 ipa_get_max_flt_rt_cmds(u32 num_pipes)
+{
+ u32 max_cmds = 0;
+
+ /* As many filter tables as there are pipes, x2 for IPv4 and IPv6 */
+ max_cmds += num_pipes * 2;
+
+ /* For each of the Modem routing tables */
+ max_cmds += (IPA_MEM_PART(v4_modem_rt_index_hi) -
+ IPA_MEM_PART(v4_modem_rt_index_lo) + 1);
+
+ max_cmds += (IPA_MEM_PART(v6_modem_rt_index_hi) -
+ IPA_MEM_PART(v6_modem_rt_index_lo) + 1);
+
+ return max_cmds;
+}
+
+static int ipa_q6_clean_q6_tables(void)
+{
+ struct ipa_desc *desc;
+ struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL;
+ int pipe_idx;
+ int num_cmds = 0;
+ int index;
+ int retval;
+ struct ipa_mem_buffer mem = { 0 };
+ u32 *entry;
+ u32 max_cmds = ipa_get_max_flt_rt_cmds(ipa_ctx->ipa_num_pipes);
+
+ mem.base = dma_alloc_coherent(ipa_ctx->pdev, 4, &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("failed to alloc DMA buff of size 4\n");
+ return -ENOMEM;
+ }
+
+ mem.size = 4;
+ entry = mem.base;
+ *entry = ipa_ctx->empty_rt_tbl_mem.phys_base;
+
+ desc = kcalloc(max_cmds, sizeof(struct ipa_desc), GFP_KERNEL);
+ if (!desc) {
+ IPAERR("failed to allocate memory\n");
+ retval = -ENOMEM;
+ goto bail_dma;
+ }
+
+ cmd = kcalloc(max_cmds, sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
+ GFP_KERNEL);
+ if (!cmd) {
+ IPAERR("failed to allocate memory\n");
+ retval = -ENOMEM;
+ goto bail_desc;
+ }
+
+ /*
+ * Iterating over all the pipes which are either invalid but connected
+ * or connected but not configured by AP.
+ */
+ for (pipe_idx = 0; pipe_idx < ipa_ctx->ipa_num_pipes; pipe_idx++) {
+ if (!ipa_ctx->ep[pipe_idx].valid ||
+ ipa_ctx->ep[pipe_idx].skip_ep_cfg) {
+ /*
+ * Need to point v4 and v6 fltr tables to an empty
+ * table
+ */
+ cmd[num_cmds].size = mem.size;
+ cmd[num_cmds].system_addr = mem.phys_base;
+ cmd[num_cmds].local_addr =
+ ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v4_flt_ofst) + 8 + pipe_idx * 4;
+
+ desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
+ desc[num_cmds].pyld = &cmd[num_cmds];
+ desc[num_cmds].len = sizeof(*cmd);
+ desc[num_cmds].type = IPA_IMM_CMD_DESC;
+ num_cmds++;
+
+ cmd[num_cmds].size = mem.size;
+ cmd[num_cmds].system_addr = mem.phys_base;
+ cmd[num_cmds].local_addr =
+ ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v6_flt_ofst) + 8 + pipe_idx * 4;
+
+ desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
+ desc[num_cmds].pyld = &cmd[num_cmds];
+ desc[num_cmds].len = sizeof(*cmd);
+ desc[num_cmds].type = IPA_IMM_CMD_DESC;
+ num_cmds++;
+ }
+ }
+
+ /* Need to point v4/v6 modem routing tables to an empty table */
+ for (index = IPA_MEM_PART(v4_modem_rt_index_lo);
+ index <= IPA_MEM_PART(v4_modem_rt_index_hi);
+ index++) {
+ cmd[num_cmds].size = mem.size;
+ cmd[num_cmds].system_addr = mem.phys_base;
+ cmd[num_cmds].local_addr = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v4_rt_ofst) + index * 4;
+
+ desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
+ desc[num_cmds].pyld = &cmd[num_cmds];
+ desc[num_cmds].len = sizeof(*cmd);
+ desc[num_cmds].type = IPA_IMM_CMD_DESC;
+ num_cmds++;
+ }
+
+ for (index = IPA_MEM_PART(v6_modem_rt_index_lo);
+ index <= IPA_MEM_PART(v6_modem_rt_index_hi);
+ index++) {
+ cmd[num_cmds].size = mem.size;
+ cmd[num_cmds].system_addr = mem.phys_base;
+ cmd[num_cmds].local_addr = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v6_rt_ofst) + index * 4;
+
+ desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
+ desc[num_cmds].pyld = &cmd[num_cmds];
+ desc[num_cmds].len = sizeof(*cmd);
+ desc[num_cmds].type = IPA_IMM_CMD_DESC;
+ num_cmds++;
+ }
+
+ retval = ipa_send_cmd(num_cmds, desc);
+ if (retval) {
+ IPAERR("failed to send immediate command (error %d)\n", retval);
+ retval = -EFAULT;
+ }
+
+ kfree(cmd);
+
+bail_desc:
+ kfree(desc);
+
+bail_dma:
+ dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+ return retval;
+}
+
+static void ipa_q6_disable_agg_reg(struct ipa_register_write *reg_write,
+ int ep_idx)
+{
+ reg_write->skip_pipeline_clear = 0;
+
+ reg_write->offset = IPA_ENDP_INIT_AGGR_N_OFST_v2_0(ep_idx);
+ reg_write->value =
+ (1 & IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK) <<
+ IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
+ reg_write->value_mask =
+ IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK <<
+ IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
+
+ reg_write->value |=
+ ((0 & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) <<
+ IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT);
+ reg_write->value_mask |=
+ ((IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK <<
+ IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT));
+}
+
+static int ipa_q6_set_ex_path_dis_agg(void)
+{
+ int ep_idx;
+ int client_idx;
+ struct ipa_desc *desc;
+ int num_descs = 0;
+ int index;
+ struct ipa_register_write *reg_write;
+ int retval;
+
+ desc = kcalloc(ipa_ctx->ipa_num_pipes, sizeof(struct ipa_desc),
+ GFP_KERNEL);
+ if (!desc) {
+ IPAERR("failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ /* Set the exception path to AP */
+ for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+ ep_idx = ipa2_get_ep_mapping(client_idx);
+ if (ep_idx == -1)
+ continue;
+
+ if (ipa_ctx->ep[ep_idx].valid &&
+ ipa_ctx->ep[ep_idx].skip_ep_cfg) {
+ BUG_ON(num_descs >= ipa_ctx->ipa_num_pipes);
+ reg_write = kzalloc(sizeof(*reg_write), GFP_KERNEL);
+
+ if (!reg_write) {
+ IPAERR("failed to allocate memory\n");
+ BUG();
+ }
+ reg_write->skip_pipeline_clear = 0;
+ reg_write->offset = IPA_ENDP_STATUS_n_OFST(ep_idx);
+ reg_write->value =
+ (ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS) &
+ IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK) <<
+ IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
+ reg_write->value_mask =
+ IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK <<
+ IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
+
+ desc[num_descs].opcode = IPA_REGISTER_WRITE;
+ desc[num_descs].pyld = reg_write;
+ desc[num_descs].len = sizeof(*reg_write);
+ desc[num_descs].type = IPA_IMM_CMD_DESC;
+ desc[num_descs].callback = ipa_free_buffer;
+ desc[num_descs].user1 = reg_write;
+ num_descs++;
+ }
+ }
+
+ /* Disable AGGR on IPA->Q6 pipes */
+ for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+ ep_idx = ipa2_get_ep_mapping(client_idx);
+ if (ep_idx == -1)
+ continue;
+ if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx) ||
+ IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx)) {
+ reg_write = kzalloc(sizeof(*reg_write), GFP_KERNEL);
+
+ if (!reg_write) {
+ IPAERR("failed to allocate memory\n");
+ BUG();
+ }
+
+ ipa_q6_disable_agg_reg(reg_write, ep_idx);
+
+ desc[num_descs].opcode = IPA_REGISTER_WRITE;
+ desc[num_descs].pyld = reg_write;
+ desc[num_descs].len = sizeof(*reg_write);
+ desc[num_descs].type = IPA_IMM_CMD_DESC;
+ desc[num_descs].callback = ipa_free_buffer;
+ desc[num_descs].user1 = reg_write;
+ num_descs++;
+ }
+ }
+
+ /* Will wait 150msecs for IPA tag process completion */
+ retval = ipa_tag_process(desc, num_descs,
+ msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT));
+ if (retval) {
+ IPAERR("TAG process failed! (error %d)\n", retval);
+ /* For timeout error ipa_free_buffer cb will free user1 */
+ if (retval != -ETIME) {
+ for (index = 0; index < num_descs; index++)
+ kfree(desc[index].user1);
+ retval = -EINVAL;
+ }
+ }
+
+ kfree(desc);
+
+ return retval;
+}
+
+/**
+* ipa_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration
+* in IPA HW before modem shutdown. This is performed in
+* case of SSR.
+*
+* Return codes:
+* 0: success
+* This is a mandatory procedure, in case one of the steps fails, the
+* AP needs to restart.
+*/
+int ipa_q6_pre_shutdown_cleanup(void)
+{
+ /* If uC has notified the APPS upon a ZIP engine error,
+ * APPS need to assert (This is a non recoverable error).
+ */
+ if (ipa_ctx->uc_ctx.uc_zip_error)
+ BUG();
+
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("Q6");
+
+ /*
+ * Do not delay Q6 pipes here. This may result in IPA reading a
+ * DMA_TASK with lock bit set and then Q6 pipe delay is set. In this
+ * situation IPA will be remain locked as the DMA_TASK with unlock
+ * bit will not be read by IPA as pipe delay is enabled. IPA uC will
+ * wait for pipe to be empty before issuing a BAM pipe reset.
+ */
+
+ if (ipa_q6_monitor_holb_mitigation(false)) {
+ IPAERR("Failed to disable HOLB monitroing on Q6 pipes\n");
+ BUG();
+ }
+
+ if (ipa_q6_avoid_holb(false)) {
+ IPAERR("Failed to set HOLB on Q6 pipes\n");
+ BUG();
+ }
+ if (ipa_q6_clean_q6_tables()) {
+ IPAERR("Failed to clean Q6 tables\n");
+ BUG();
+ }
+ if (ipa_q6_set_ex_path_dis_agg()) {
+ IPAERR("Failed to disable aggregation on Q6 pipes\n");
+ BUG();
+ }
+
+ ipa_ctx->q6_proxy_clk_vote_valid = true;
+ return 0;
+}
+
+/**
+* ipa_q6_post_shutdown_cleanup() - A cleanup for the Q6 pipes
+* in IPA HW after modem shutdown. This is performed
+* in case of SSR.
+*
+* Return codes:
+* 0: success
+* This is a mandatory procedure, in case one of the steps fails, the
+* AP needs to restart.
+*/
+int ipa_q6_post_shutdown_cleanup(void)
+{
+ int client_idx;
+ int res;
+
+ /*
+ * Do not delay Q6 pipes here. This may result in IPA reading a
+ * DMA_TASK with lock bit set and then Q6 pipe delay is set. In this
+ * situation IPA will be remain locked as the DMA_TASK with unlock
+ * bit will not be read by IPA as pipe delay is enabled. IPA uC will
+ * wait for pipe to be empty before issuing a BAM pipe reset.
+ */
+
+ if (ipa_q6_avoid_holb(true)) {
+ IPAERR("Failed to set HOLB on Q6 ZIP pipes\n");
+ BUG();
+ }
+
+ if (!ipa_ctx->uc_ctx.uc_loaded) {
+ IPAERR("uC is not loaded, won't reset Q6 pipes\n");
+ return 0;
+ }
+
+ for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
+ if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx) ||
+ IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx) ||
+ IPA_CLIENT_IS_Q6_NON_ZIP_PROD(client_idx) ||
+ IPA_CLIENT_IS_Q6_ZIP_PROD(client_idx)) {
+ res = ipa_uc_reset_pipe(client_idx);
+ if (res)
+ BUG();
+ }
+ return 0;
+}
+
+int _ipa_init_sram_v2(void)
+{
+ u32 *ipa_sram_mmio;
+ unsigned long phys_addr;
+ struct ipa_hw_imm_cmd_dma_shared_mem cmd = {0};
+ struct ipa_desc desc = {0};
+ struct ipa_mem_buffer mem;
+ int rc = 0;
+
+ phys_addr = ipa_ctx->ipa_wrapper_base +
+ ipa_ctx->ctrl->ipa_reg_base_ofst +
+ IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(
+ ipa_ctx->smem_restricted_bytes / 4);
+
+ ipa_sram_mmio = ioremap(phys_addr,
+ ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes);
+ if (!ipa_sram_mmio) {
+ IPAERR("fail to ioremap IPA SRAM\n");
+ return -ENOMEM;
+ }
+
+#define IPA_SRAM_SET(ofst, val) (ipa_sram_mmio[(ofst - 4) / 4] = val)
+
+ IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst) - 4, IPA_MEM_CANARY_VAL);
+ IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst), IPA_MEM_CANARY_VAL);
+ IPA_SRAM_SET(IPA_MEM_PART(v4_rt_ofst) - 4, IPA_MEM_CANARY_VAL);
+ IPA_SRAM_SET(IPA_MEM_PART(v4_rt_ofst), IPA_MEM_CANARY_VAL);
+ IPA_SRAM_SET(IPA_MEM_PART(v6_rt_ofst), IPA_MEM_CANARY_VAL);
+ IPA_SRAM_SET(IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_CANARY_VAL);
+ IPA_SRAM_SET(IPA_MEM_PART(modem_ofst), IPA_MEM_CANARY_VAL);
+ IPA_SRAM_SET(IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_CANARY_VAL);
+ IPA_SRAM_SET(IPA_MEM_PART(uc_info_ofst), IPA_MEM_CANARY_VAL);
+
+ iounmap(ipa_sram_mmio);
+
+ mem.size = IPA_STATUS_CLEAR_SIZE;
+ mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+ return -ENOMEM;
+ }
+ memset(mem.base, 0, mem.size);
+
+ cmd.size = mem.size;
+ cmd.system_addr = mem.phys_base;
+ cmd.local_addr = IPA_STATUS_CLEAR_OFST;
+ desc.opcode = IPA_DMA_SHARED_MEM;
+ desc.pyld = &cmd;
+ desc.len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+ desc.type = IPA_IMM_CMD_DESC;
+
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ rc = -EFAULT;
+ }
+
+ dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+ return rc;
+}
+
+int _ipa_init_sram_v2_5(void)
+{
+ u32 *ipa_sram_mmio;
+ unsigned long phys_addr;
+
+ phys_addr = ipa_ctx->ipa_wrapper_base +
+ ipa_ctx->ctrl->ipa_reg_base_ofst +
+ IPA_SRAM_SW_FIRST_v2_5;
+
+ ipa_sram_mmio = ioremap(phys_addr,
+ ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes);
+ if (!ipa_sram_mmio) {
+ IPAERR("fail to ioremap IPA SRAM\n");
+ return -ENOMEM;
+ }
+
+#define IPA_SRAM_SET(ofst, val) (ipa_sram_mmio[(ofst - 4) / 4] = val)
+
+ IPA_SRAM_SET(IPA_MEM_PART(v4_flt_ofst) - 4, IPA_MEM_CANARY_VAL);
+ IPA_SRAM_SET(IPA_MEM_PART(v4_flt_ofst), IPA_MEM_CANARY_VAL);
+ IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst) - 4, IPA_MEM_CANARY_VAL);
+ IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst), IPA_MEM_CANARY_VAL);
+ IPA_SRAM_SET(IPA_MEM_PART(v4_rt_ofst) - 4, IPA_MEM_CANARY_VAL);
+ IPA_SRAM_SET(IPA_MEM_PART(v4_rt_ofst), IPA_MEM_CANARY_VAL);
+ IPA_SRAM_SET(IPA_MEM_PART(v6_rt_ofst), IPA_MEM_CANARY_VAL);
+ IPA_SRAM_SET(IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_CANARY_VAL);
+ IPA_SRAM_SET(IPA_MEM_PART(modem_hdr_proc_ctx_ofst) - 4,
+ IPA_MEM_CANARY_VAL);
+ IPA_SRAM_SET(IPA_MEM_PART(modem_hdr_proc_ctx_ofst), IPA_MEM_CANARY_VAL);
+ IPA_SRAM_SET(IPA_MEM_PART(modem_ofst), IPA_MEM_CANARY_VAL);
+ IPA_SRAM_SET(IPA_MEM_PART(end_ofst), IPA_MEM_CANARY_VAL);
+
+ iounmap(ipa_sram_mmio);
+
+ return 0;
+}
+
+static inline void ipa_sram_set_canary(u32 *sram_mmio, int offset)
+{
+ /* Set 4 bytes of CANARY before the offset */
+ sram_mmio[(offset - 4) / 4] = IPA_MEM_CANARY_VAL;
+}
+
+int _ipa_init_sram_v2_6L(void)
+{
+ u32 *ipa_sram_mmio;
+ unsigned long phys_addr;
+
+ phys_addr = ipa_ctx->ipa_wrapper_base +
+ ipa_ctx->ctrl->ipa_reg_base_ofst +
+ IPA_SRAM_SW_FIRST_v2_5;
+
+ ipa_sram_mmio = ioremap(phys_addr,
+ ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes);
+ if (!ipa_sram_mmio) {
+ IPAERR("fail to ioremap IPA SRAM\n");
+ return -ENOMEM;
+ }
+
+ /* Consult with ipa_ram_mmap.h on the location of the CANARY values */
+ ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_ofst) - 4);
+ ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_ofst));
+ ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_ofst) - 4);
+ ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_ofst));
+ ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_ofst) - 4);
+ ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_ofst));
+ ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_ofst));
+ ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst));
+ ipa_sram_set_canary(ipa_sram_mmio,
+ IPA_MEM_PART(modem_comp_decomp_ofst) - 4);
+ ipa_sram_set_canary(ipa_sram_mmio,
+ IPA_MEM_PART(modem_comp_decomp_ofst));
+ ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst));
+ ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(end_ofst));
+
+ iounmap(ipa_sram_mmio);
+
+ return 0;
+}
+
+int _ipa_init_hdr_v2(void)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_mem_buffer mem;
+ struct ipa_hdr_init_local cmd;
+ int rc = 0;
+
+ mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
+ mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+ return -ENOMEM;
+ }
+ memset(mem.base, 0, mem.size);
+
+ cmd.hdr_table_src_addr = mem.phys_base;
+ cmd.size_hdr_table = mem.size;
+ cmd.hdr_table_dst_addr = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(modem_hdr_ofst);
+
+ desc.opcode = IPA_HDR_INIT_LOCAL;
+ desc.pyld = &cmd;
+ desc.len = sizeof(struct ipa_hdr_init_local);
+ desc.type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ rc = -EFAULT;
+ }
+
+ dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+ return rc;
+}
+
+int _ipa_init_hdr_v2_5(void)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_mem_buffer mem;
+ struct ipa_hdr_init_local cmd = { 0 };
+ struct ipa_hw_imm_cmd_dma_shared_mem dma_cmd = { 0 };
+
+ mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
+ mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+ return -ENOMEM;
+ }
+ memset(mem.base, 0, mem.size);
+
+ cmd.hdr_table_src_addr = mem.phys_base;
+ cmd.size_hdr_table = mem.size;
+ cmd.hdr_table_dst_addr = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(modem_hdr_ofst);
+
+ desc.opcode = IPA_HDR_INIT_LOCAL;
+ desc.pyld = &cmd;
+ desc.len = sizeof(struct ipa_hdr_init_local);
+ desc.type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ dma_free_coherent(ipa_ctx->pdev,
+ mem.size, mem.base,
+ mem.phys_base);
+ return -EFAULT;
+ }
+
+ dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+ mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) +
+ IPA_MEM_PART(apps_hdr_proc_ctx_size);
+ mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+ return -ENOMEM;
+ }
+ memset(mem.base, 0, mem.size);
+ memset(&desc, 0, sizeof(desc));
+
+ dma_cmd.system_addr = mem.phys_base;
+ dma_cmd.local_addr = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(modem_hdr_proc_ctx_ofst);
+ dma_cmd.size = mem.size;
+ desc.opcode = IPA_DMA_SHARED_MEM;
+ desc.pyld = &dma_cmd;
+ desc.len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+ desc.type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ dma_free_coherent(ipa_ctx->pdev,
+ mem.size,
+ mem.base,
+ mem.phys_base);
+ return -EFAULT;
+ }
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_LOCAL_PKT_PROC_CNTXT_BASE_OFST,
+ dma_cmd.local_addr);
+
+ dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+ return 0;
+}
+
+int _ipa_init_hdr_v2_6L(void)
+{
+ /* Same implementation as IPAv2 */
+ return _ipa_init_hdr_v2();
+}
+
+int _ipa_init_rt4_v2(void)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_mem_buffer mem;
+ struct ipa_ip_v4_routing_init v4_cmd;
+ u32 *entry;
+ int i;
+ int rc = 0;
+
+ for (i = IPA_MEM_PART(v4_modem_rt_index_lo);
+ i <= IPA_MEM_PART(v4_modem_rt_index_hi);
+ i++)
+ ipa_ctx->rt_idx_bitmap[IPA_IP_v4] |= (1 << i);
+ IPADBG("v4 rt bitmap 0x%lx\n", ipa_ctx->rt_idx_bitmap[IPA_IP_v4]);
+
+ mem.size = IPA_MEM_PART(v4_rt_size);
+ mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+ return -ENOMEM;
+ }
+
+ entry = mem.base;
+ for (i = 0; i < IPA_MEM_PART(v4_num_index); i++) {
+ *entry = ipa_ctx->empty_rt_tbl_mem.phys_base;
+ entry++;
+ }
+
+ desc.opcode = IPA_IP_V4_ROUTING_INIT;
+ v4_cmd.ipv4_rules_addr = mem.phys_base;
+ v4_cmd.size_ipv4_rules = mem.size;
+ v4_cmd.ipv4_addr = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v4_rt_ofst);
+ IPADBG("putting Routing IPv4 rules to phys 0x%x",
+ v4_cmd.ipv4_addr);
+
+ desc.pyld = &v4_cmd;
+ desc.len = sizeof(struct ipa_ip_v4_routing_init);
+ desc.type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ rc = -EFAULT;
+ }
+
+ dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+ return rc;
+}
+
+int _ipa_init_rt6_v2(void)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_mem_buffer mem;
+ struct ipa_ip_v6_routing_init v6_cmd;
+ u32 *entry;
+ int i;
+ int rc = 0;
+
+ for (i = IPA_MEM_PART(v6_modem_rt_index_lo);
+ i <= IPA_MEM_PART(v6_modem_rt_index_hi);
+ i++)
+ ipa_ctx->rt_idx_bitmap[IPA_IP_v6] |= (1 << i);
+ IPADBG("v6 rt bitmap 0x%lx\n", ipa_ctx->rt_idx_bitmap[IPA_IP_v6]);
+
+ mem.size = IPA_MEM_PART(v6_rt_size);
+ mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+ return -ENOMEM;
+ }
+
+ entry = mem.base;
+ for (i = 0; i < IPA_MEM_PART(v6_num_index); i++) {
+ *entry = ipa_ctx->empty_rt_tbl_mem.phys_base;
+ entry++;
+ }
+
+ desc.opcode = IPA_IP_V6_ROUTING_INIT;
+ v6_cmd.ipv6_rules_addr = mem.phys_base;
+ v6_cmd.size_ipv6_rules = mem.size;
+ v6_cmd.ipv6_addr = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v6_rt_ofst);
+ IPADBG("putting Routing IPv6 rules to phys 0x%x",
+ v6_cmd.ipv6_addr);
+
+ desc.pyld = &v6_cmd;
+ desc.len = sizeof(struct ipa_ip_v6_routing_init);
+ desc.type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ rc = -EFAULT;
+ }
+
+ dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+ return rc;
+}
+
+int _ipa_init_flt4_v2(void)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_mem_buffer mem;
+ struct ipa_ip_v4_filter_init v4_cmd;
+ u32 *entry;
+ int i;
+ int rc = 0;
+
+ mem.size = IPA_MEM_PART(v4_flt_size);
+ mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+ return -ENOMEM;
+ }
+
+ entry = mem.base;
+
+ *entry = ((0xFFFFF << 1) | 0x1);
+ entry++;
+
+ for (i = 0; i <= ipa_ctx->ipa_num_pipes; i++) {
+ *entry = ipa_ctx->empty_rt_tbl_mem.phys_base;
+ entry++;
+ }
+
+ desc.opcode = IPA_IP_V4_FILTER_INIT;
+ v4_cmd.ipv4_rules_addr = mem.phys_base;
+ v4_cmd.size_ipv4_rules = mem.size;
+ v4_cmd.ipv4_addr = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v4_flt_ofst);
+ IPADBG("putting Filtering IPv4 rules to phys 0x%x",
+ v4_cmd.ipv4_addr);
+
+ desc.pyld = &v4_cmd;
+ desc.len = sizeof(struct ipa_ip_v4_filter_init);
+ desc.type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ rc = -EFAULT;
+ }
+
+ dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+ return rc;
+}
+
+int _ipa_init_flt6_v2(void)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_mem_buffer mem;
+ struct ipa_ip_v6_filter_init v6_cmd;
+ u32 *entry;
+ int i;
+ int rc = 0;
+
+ mem.size = IPA_MEM_PART(v6_flt_size);
+ mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+ return -ENOMEM;
+ }
+
+ entry = mem.base;
+
+ *entry = (0xFFFFF << 1) | 0x1;
+ entry++;
+
+ for (i = 0; i <= ipa_ctx->ipa_num_pipes; i++) {
+ *entry = ipa_ctx->empty_rt_tbl_mem.phys_base;
+ entry++;
+ }
+
+ desc.opcode = IPA_IP_V6_FILTER_INIT;
+ v6_cmd.ipv6_rules_addr = mem.phys_base;
+ v6_cmd.size_ipv6_rules = mem.size;
+ v6_cmd.ipv6_addr = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v6_flt_ofst);
+ IPADBG("putting Filtering IPv6 rules to phys 0x%x",
+ v6_cmd.ipv6_addr);
+
+ desc.pyld = &v6_cmd;
+ desc.len = sizeof(struct ipa_ip_v6_filter_init);
+ desc.type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ rc = -EFAULT;
+ }
+
+ dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+ return rc;
+}
+
+static int ipa_setup_apps_pipes(void)
+{
+ struct ipa_sys_connect_params sys_in;
+ int result = 0;
+
+ /* CMD OUT (A5->IPA) */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_APPS_CMD_PROD;
+ sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+ sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS;
+ sys_in.skip_ep_cfg = true;
+ if (ipa2_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_cmd)) {
+ IPAERR(":setup sys pipe failed.\n");
+ result = -EPERM;
+ goto fail_cmd;
+ }
+ IPADBG("Apps to IPA cmd pipe is connected\n");
+
+ ipa_ctx->ctrl->ipa_init_sram();
+ IPADBG("SRAM initialized\n");
+
+ ipa_ctx->ctrl->ipa_init_hdr();
+ IPADBG("HDR initialized\n");
+
+ ipa_ctx->ctrl->ipa_init_rt4();
+ IPADBG("V4 RT initialized\n");
+
+ ipa_ctx->ctrl->ipa_init_rt6();
+ IPADBG("V6 RT initialized\n");
+
+ ipa_ctx->ctrl->ipa_init_flt4();
+ IPADBG("V4 FLT initialized\n");
+
+ ipa_ctx->ctrl->ipa_init_flt6();
+ IPADBG("V6 FLT initialized\n");
+
+ if (ipa_setup_exception_path()) {
+ IPAERR(":fail to setup excp path\n");
+ result = -EPERM;
+ goto fail_schedule_delayed_work;
+ }
+ IPADBG("Exception path was successfully set");
+
+ if (ipa_setup_dflt_rt_tables()) {
+ IPAERR(":fail to setup dflt routes\n");
+ result = -EPERM;
+ goto fail_schedule_delayed_work;
+ }
+ IPADBG("default routing was set\n");
+
+ /* LAN IN (IPA->A5) */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_APPS_LAN_CONS;
+ sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) {
+ sys_in.ipa_ep_cfg.hdr.hdr_a5_mux = 1;
+ sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_A5_MUX_HEADER_LENGTH;
+ } else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) {
+ sys_in.notify = ipa_lan_rx_cb;
+ sys_in.priv = NULL;
+ sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
+ sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_ENABLE_CS_OFFLOAD_DL;
+ } else {
+ WARN_ON(1);
+ }
+
+ /**
+ * ipa_lan_rx_cb() intended to notify the source EP about packet
+ * being received on the LAN_CONS via calling the source EP call-back.
+ * There could be a race condition with calling this call-back. Other
+ * thread may nullify it - e.g. on EP disconnect.
+ * This lock intended to protect the access to the source EP call-back
+ */
+ spin_lock_init(&ipa_ctx->disconnect_lock);
+ if (ipa2_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_in)) {
+ IPAERR(":setup sys pipe failed.\n");
+ result = -EPERM;
+ goto fail_schedule_delayed_work;
+ }
+
+ /* LAN-WAN OUT (A5->IPA) */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_APPS_LAN_WAN_PROD;
+ sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
+ sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ if (ipa2_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_out)) {
+ IPAERR(":setup sys pipe failed.\n");
+ result = -EPERM;
+ goto fail_data_out;
+ }
+
+ return 0;
+
+fail_data_out:
+ ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in);
+fail_schedule_delayed_work:
+ if (ipa_ctx->dflt_v6_rt_rule_hdl)
+ __ipa_del_rt_rule(ipa_ctx->dflt_v6_rt_rule_hdl);
+ if (ipa_ctx->dflt_v4_rt_rule_hdl)
+ __ipa_del_rt_rule(ipa_ctx->dflt_v4_rt_rule_hdl);
+ if (ipa_ctx->excp_hdr_hdl)
+ __ipa_del_hdr(ipa_ctx->excp_hdr_hdl);
+ ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd);
+fail_cmd:
+ return result;
+}
+
+static void ipa_teardown_apps_pipes(void)
+{
+ ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_out);
+ ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in);
+ __ipa_del_rt_rule(ipa_ctx->dflt_v6_rt_rule_hdl);
+ __ipa_del_rt_rule(ipa_ctx->dflt_v4_rt_rule_hdl);
+ __ipa_del_hdr(ipa_ctx->excp_hdr_hdl);
+ ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd);
+}
+
+#ifdef CONFIG_COMPAT
+long compat_ipa_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int retval = 0;
+ struct ipa_ioc_nat_alloc_mem32 nat_mem32;
+ struct ipa_ioc_nat_alloc_mem nat_mem;
+
+ switch (cmd) {
+ case IPA_IOC_ADD_HDR32:
+ cmd = IPA_IOC_ADD_HDR;
+ break;
+ case IPA_IOC_DEL_HDR32:
+ cmd = IPA_IOC_DEL_HDR;
+ break;
+ case IPA_IOC_ADD_RT_RULE32:
+ cmd = IPA_IOC_ADD_RT_RULE;
+ break;
+ case IPA_IOC_DEL_RT_RULE32:
+ cmd = IPA_IOC_DEL_RT_RULE;
+ break;
+ case IPA_IOC_ADD_FLT_RULE32:
+ cmd = IPA_IOC_ADD_FLT_RULE;
+ break;
+ case IPA_IOC_DEL_FLT_RULE32:
+ cmd = IPA_IOC_DEL_FLT_RULE;
+ break;
+ case IPA_IOC_GET_RT_TBL32:
+ cmd = IPA_IOC_GET_RT_TBL;
+ break;
+ case IPA_IOC_COPY_HDR32:
+ cmd = IPA_IOC_COPY_HDR;
+ break;
+ case IPA_IOC_QUERY_INTF32:
+ cmd = IPA_IOC_QUERY_INTF;
+ break;
+ case IPA_IOC_QUERY_INTF_TX_PROPS32:
+ cmd = IPA_IOC_QUERY_INTF_TX_PROPS;
+ break;
+ case IPA_IOC_QUERY_INTF_RX_PROPS32:
+ cmd = IPA_IOC_QUERY_INTF_RX_PROPS;
+ break;
+ case IPA_IOC_QUERY_INTF_EXT_PROPS32:
+ cmd = IPA_IOC_QUERY_INTF_EXT_PROPS;
+ break;
+ case IPA_IOC_GET_HDR32:
+ cmd = IPA_IOC_GET_HDR;
+ break;
+ case IPA_IOC_ALLOC_NAT_MEM32:
+ if (copy_from_user((u8 *)&nat_mem32, (u8 *)arg,
+ sizeof(struct ipa_ioc_nat_alloc_mem32))) {
+ retval = -EFAULT;
+ goto ret;
+ }
+ memcpy(nat_mem.dev_name, nat_mem32.dev_name,
+ IPA_RESOURCE_NAME_MAX);
+ nat_mem.size = (size_t)nat_mem32.size;
+ nat_mem.offset = (off_t)nat_mem32.offset;
+
+ /* null terminate the string */
+ nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
+
+ if (ipa2_allocate_nat_device(&nat_mem)) {
+ retval = -EFAULT;
+ goto ret;
+ }
+ nat_mem32.offset = (compat_off_t)nat_mem.offset;
+ if (copy_to_user((u8 *)arg, (u8 *)&nat_mem32,
+ sizeof(struct ipa_ioc_nat_alloc_mem32))) {
+ retval = -EFAULT;
+ }
+ret:
+ return retval;
+ case IPA_IOC_V4_INIT_NAT32:
+ cmd = IPA_IOC_V4_INIT_NAT;
+ break;
+ case IPA_IOC_NAT_DMA32:
+ cmd = IPA_IOC_NAT_DMA;
+ break;
+ case IPA_IOC_V4_DEL_NAT32:
+ cmd = IPA_IOC_V4_DEL_NAT;
+ break;
+ case IPA_IOC_GET_NAT_OFFSET32:
+ cmd = IPA_IOC_GET_NAT_OFFSET;
+ break;
+ case IPA_IOC_PULL_MSG32:
+ cmd = IPA_IOC_PULL_MSG;
+ break;
+ case IPA_IOC_RM_ADD_DEPENDENCY32:
+ cmd = IPA_IOC_RM_ADD_DEPENDENCY;
+ break;
+ case IPA_IOC_RM_DEL_DEPENDENCY32:
+ cmd = IPA_IOC_RM_DEL_DEPENDENCY;
+ break;
+ case IPA_IOC_GENERATE_FLT_EQ32:
+ cmd = IPA_IOC_GENERATE_FLT_EQ;
+ break;
+ case IPA_IOC_QUERY_RT_TBL_INDEX32:
+ cmd = IPA_IOC_QUERY_RT_TBL_INDEX;
+ break;
+ case IPA_IOC_WRITE_QMAPID32:
+ cmd = IPA_IOC_WRITE_QMAPID;
+ break;
+ case IPA_IOC_MDFY_FLT_RULE32:
+ cmd = IPA_IOC_MDFY_FLT_RULE;
+ break;
+ case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32:
+ cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD;
+ break;
+ case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32:
+ cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL;
+ break;
+ case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32:
+ cmd = IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED;
+ break;
+ case IPA_IOC_MDFY_RT_RULE32:
+ cmd = IPA_IOC_MDFY_RT_RULE;
+ break;
+ case IPA_IOC_COMMIT_HDR:
+ case IPA_IOC_RESET_HDR:
+ case IPA_IOC_COMMIT_RT:
+ case IPA_IOC_RESET_RT:
+ case IPA_IOC_COMMIT_FLT:
+ case IPA_IOC_RESET_FLT:
+ case IPA_IOC_DUMP:
+ case IPA_IOC_PUT_RT_TBL:
+ case IPA_IOC_PUT_HDR:
+ case IPA_IOC_SET_FLT:
+ case IPA_IOC_QUERY_EP_MAPPING:
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return ipa_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
+static const struct file_operations ipa_drv_fops = {
+ .owner = THIS_MODULE,
+ .open = ipa_open,
+ .read = ipa_read,
+ .unlocked_ioctl = ipa_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_ipa_ioctl,
+#endif
+};
+
+static int ipa_get_clks(struct device *dev)
+{
+ ipa_clk = clk_get(dev, "core_clk");
+ if (IS_ERR(ipa_clk)) {
+ if (ipa_clk != ERR_PTR(-EPROBE_DEFER))
+ IPAERR("fail to get ipa clk\n");
+ return PTR_ERR(ipa_clk);
+ }
+
+ if (smmu_info.present && smmu_info.arm_smmu) {
+ smmu_clk = clk_get(dev, "smmu_clk");
+ if (IS_ERR(smmu_clk)) {
+ if (smmu_clk != ERR_PTR(-EPROBE_DEFER))
+ IPAERR("fail to get smmu clk\n");
+ return PTR_ERR(smmu_clk);
+ }
+
+ if (clk_get_rate(smmu_clk) == 0) {
+ long rate = clk_round_rate(smmu_clk, 1000);
+
+ clk_set_rate(smmu_clk, rate);
+ }
+ }
+
+ if (ipa_ctx->ipa_hw_type < IPA_HW_v2_0) {
+ ipa_cnoc_clk = clk_get(dev, "iface_clk");
+ if (IS_ERR(ipa_cnoc_clk)) {
+ ipa_cnoc_clk = NULL;
+ IPAERR("fail to get cnoc clk\n");
+ return -ENODEV;
+ }
+
+ ipa_clk_src = clk_get(dev, "core_src_clk");
+ if (IS_ERR(ipa_clk_src)) {
+ ipa_clk_src = NULL;
+ IPAERR("fail to get ipa clk src\n");
+ return -ENODEV;
+ }
+
+ sys_noc_ipa_axi_clk = clk_get(dev, "bus_clk");
+ if (IS_ERR(sys_noc_ipa_axi_clk)) {
+ sys_noc_ipa_axi_clk = NULL;
+ IPAERR("fail to get sys_noc_ipa_axi clk\n");
+ return -ENODEV;
+ }
+
+ ipa_inactivity_clk = clk_get(dev, "inactivity_clk");
+ if (IS_ERR(ipa_inactivity_clk)) {
+ ipa_inactivity_clk = NULL;
+ IPAERR("fail to get inactivity clk\n");
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+void _ipa_enable_clks_v2_0(void)
+{
+ IPADBG("enabling gcc_ipa_clk\n");
+ if (ipa_clk) {
+ clk_prepare(ipa_clk);
+ clk_enable(ipa_clk);
+ IPADBG("curr_ipa_clk_rate=%d", ipa_ctx->curr_ipa_clk_rate);
+ clk_set_rate(ipa_clk, ipa_ctx->curr_ipa_clk_rate);
+ ipa_uc_notify_clk_state(true);
+ } else {
+ WARN_ON(1);
+ }
+
+ if (smmu_clk)
+ clk_prepare_enable(smmu_clk);
+ /* Enable the BAM IRQ. */
+ ipa_sps_irq_control_all(true);
+ ipa_suspend_apps_pipes(false);
+}
+
+void _ipa_enable_clks_v1_1(void)
+{
+
+ if (ipa_cnoc_clk) {
+ clk_prepare(ipa_cnoc_clk);
+ clk_enable(ipa_cnoc_clk);
+ clk_set_rate(ipa_cnoc_clk, IPA_CNOC_CLK_RATE);
+ } else {
+ WARN_ON(1);
+ }
+
+ if (ipa_clk_src)
+ clk_set_rate(ipa_clk_src,
+ ipa_ctx->curr_ipa_clk_rate);
+ else
+ WARN_ON(1);
+
+ if (ipa_clk)
+ clk_prepare(ipa_clk);
+ else
+ WARN_ON(1);
+
+ if (sys_noc_ipa_axi_clk)
+ clk_prepare(sys_noc_ipa_axi_clk);
+ else
+ WARN_ON(1);
+
+ if (ipa_inactivity_clk)
+ clk_prepare(ipa_inactivity_clk);
+ else
+ WARN_ON(1);
+
+ if (ipa_clk)
+ clk_enable(ipa_clk);
+ else
+ WARN_ON(1);
+
+ if (sys_noc_ipa_axi_clk)
+ clk_enable(sys_noc_ipa_axi_clk);
+ else
+ WARN_ON(1);
+
+ if (ipa_inactivity_clk)
+ clk_enable(ipa_inactivity_clk);
+ else
+ WARN_ON(1);
+
+}
+
+static unsigned int ipa_get_bus_vote(void)
+{
+ unsigned int idx = 1;
+
+ if (ipa_ctx->curr_ipa_clk_rate == ipa_ctx->ctrl->ipa_clk_rate_svs) {
+ idx = 1;
+ } else if (ipa_ctx->curr_ipa_clk_rate ==
+ ipa_ctx->ctrl->ipa_clk_rate_nominal) {
+ if (ipa_ctx->ctrl->msm_bus_data_ptr->num_usecases <= 2)
+ idx = 1;
+ else
+ idx = 2;
+ } else if (ipa_ctx->curr_ipa_clk_rate ==
+ ipa_ctx->ctrl->ipa_clk_rate_turbo) {
+ idx = ipa_ctx->ctrl->msm_bus_data_ptr->num_usecases - 1;
+ } else {
+ WARN_ON(1);
+ }
+
+ IPADBG("curr %d idx %d\n", ipa_ctx->curr_ipa_clk_rate, idx);
+
+ return idx;
+}
+
+/**
+* ipa_enable_clks() - Turn on IPA clocks
+*
+* Return codes:
+* None
+*/
+void ipa_enable_clks(void)
+{
+ IPADBG("enabling IPA clocks and bus voting\n");
+
+ ipa_ctx->ctrl->ipa_enable_clks();
+
+ if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL)
+ if (msm_bus_scale_client_update_request(ipa_ctx->ipa_bus_hdl,
+ ipa_get_bus_vote()))
+ WARN_ON(1);
+}
+
+void _ipa_disable_clks_v1_1(void)
+{
+
+ if (ipa_inactivity_clk)
+ clk_disable_unprepare(ipa_inactivity_clk);
+ else
+ WARN_ON(1);
+
+ if (sys_noc_ipa_axi_clk)
+ clk_disable_unprepare(sys_noc_ipa_axi_clk);
+ else
+ WARN_ON(1);
+
+ if (ipa_clk)
+ clk_disable_unprepare(ipa_clk);
+ else
+ WARN_ON(1);
+
+ if (ipa_cnoc_clk)
+ clk_disable_unprepare(ipa_cnoc_clk);
+ else
+ WARN_ON(1);
+
+}
+
+void _ipa_disable_clks_v2_0(void)
+{
+ IPADBG("disabling gcc_ipa_clk\n");
+ ipa_suspend_apps_pipes(true);
+ ipa_sps_irq_control_all(false);
+ ipa_uc_notify_clk_state(false);
+ if (ipa_clk)
+ clk_disable_unprepare(ipa_clk);
+ else
+ WARN_ON(1);
+
+ if (smmu_clk)
+ clk_disable_unprepare(smmu_clk);
+}
+
+/**
+* ipa_disable_clks() - Turn off IPA clocks
+*
+* Return codes:
+* None
+*/
+void ipa_disable_clks(void)
+{
+ IPADBG("disabling IPA clocks and bus voting\n");
+
+ ipa_ctx->ctrl->ipa_disable_clks();
+
+ if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL)
+ if (msm_bus_scale_client_update_request(ipa_ctx->ipa_bus_hdl,
+ 0))
+ WARN_ON(1);
+}
+
+/**
+ * ipa_start_tag_process() - Send TAG packet and wait for it to come back
+ *
+ * This function is called prior to clock gating when active client counter
+ * is 1. TAG process ensures that there are no packets inside IPA HW that
+ * were not submitted to peer's BAM. During TAG process all aggregation frames
+ * are (force) closed.
+ *
+ * Return codes:
+ * None
+ */
+static void ipa_start_tag_process(struct work_struct *work)
+{
+ int res;
+
+ IPADBG("starting TAG process\n");
+ /* close aggregation frames on all pipes */
+ res = ipa_tag_aggr_force_close(-1);
+ if (res)
+ IPAERR("ipa_tag_aggr_force_close failed %d\n", res);
+
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS");
+
+ IPADBG("TAG process done\n");
+}
+
+/**
+* ipa2_active_clients_log_mod() - Log a modification in the active clients
+* reference count
+*
+* This method logs any modification in the active clients reference count:
+* It logs the modification in the circular history buffer
+* It logs the modification in the hash table - looking for an entry,
+* creating one if needed and deleting one if needed.
+*
+* @id: ipa2_active client logging info struct to hold the log information
+* @inc: a boolean variable to indicate whether the modification is an increase
+* or decrease
+* @int_ctx: a boolean variable to indicate whether this call is being made from
+* an interrupt context and therefore should allocate GFP_ATOMIC memory
+*
+* Method process:
+* - Hash the unique identifier string
+* - Find the hash in the table
+* 1)If found, increase or decrease the reference count
+* 2)If not found, allocate a new hash table entry struct and initialize it
+* - Remove and deallocate unneeded data structure
+* - Log the call in the circular history buffer (unless it is a simple call)
+*/
+void ipa2_active_clients_log_mod(struct ipa_active_client_logging_info *id,
+ bool inc, bool int_ctx)
+{
+ char temp_str[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN];
+ unsigned long long t;
+ unsigned long nanosec_rem;
+ struct ipa2_active_client_htable_entry *hentry;
+ struct ipa2_active_client_htable_entry *hfound;
+ u32 hkey;
+ char str_to_hash[IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN];
+
+ hfound = NULL;
+ memset(str_to_hash, 0, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN);
+ strlcpy(str_to_hash, id->id_string, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN);
+ hkey = arch_fast_hash(str_to_hash, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN,
+ 0);
+ hash_for_each_possible(ipa_ctx->ipa2_active_clients_logging.htable,
+ hentry, list, hkey) {
+ if (!strcmp(hentry->id_string, id->id_string)) {
+ hentry->count = hentry->count + (inc ? 1 : -1);
+ hfound = hentry;
+ }
+ }
+ if (hfound == NULL) {
+ hentry = NULL;
+ hentry = kzalloc(sizeof(
+ struct ipa2_active_client_htable_entry),
+ int_ctx ? GFP_ATOMIC : GFP_KERNEL);
+ if (hentry == NULL) {
+ IPAERR("failed allocating active clients hash entry");
+ return;
+ }
+ hentry->type = id->type;
+ strlcpy(hentry->id_string, id->id_string,
+ IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN);
+ INIT_HLIST_NODE(&hentry->list);
+ hentry->count = inc ? 1 : -1;
+ hash_add(ipa_ctx->ipa2_active_clients_logging.htable,
+ &hentry->list, hkey);
+ } else if (hfound->count == 0) {
+ hash_del(&hfound->list);
+ kfree(hfound);
+ }
+
+ if (id->type != SIMPLE) {
+ t = local_clock();
+ nanosec_rem = do_div(t, 1000000000) / 1000;
+ snprintf(temp_str, IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN,
+ inc ? "[%5lu.%06lu] ^ %s, %s: %d" :
+ "[%5lu.%06lu] v %s, %s: %d",
+ (unsigned long)t, nanosec_rem,
+ id->id_string, id->file, id->line);
+ ipa2_active_clients_log_insert(temp_str);
+ }
+}
+
+void ipa2_active_clients_log_dec(struct ipa_active_client_logging_info *id,
+ bool int_ctx)
+{
+ ipa2_active_clients_log_mod(id, false, int_ctx);
+}
+
+void ipa2_active_clients_log_inc(struct ipa_active_client_logging_info *id,
+ bool int_ctx)
+{
+ ipa2_active_clients_log_mod(id, true, int_ctx);
+}
+
+/**
+* ipa_inc_client_enable_clks() - Increase active clients counter, and
+* enable ipa clocks if necessary
+*
+* Please do not use this API, use the wrapper macros instead (ipa_i.h)
+* IPA2_ACTIVE_CLIENTS_INC_XXXX();
+*
+* Return codes:
+* None
+*/
+void ipa2_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
+{
+ ipa_active_clients_lock();
+ ipa2_active_clients_log_inc(id, false);
+ ipa_ctx->ipa_active_clients.cnt++;
+ if (ipa_ctx->ipa_active_clients.cnt == 1)
+ ipa_enable_clks();
+ IPADBG("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt);
+ ipa_active_clients_unlock();
+}
+
+/**
+* ipa_inc_client_enable_clks_no_block() - Only increment the number of active
+* clients if no asynchronous actions should be done. Asynchronous actions are
+* locking a mutex and waking up IPA HW.
+*
+* Please do not use this API, use the wrapper macros instead (ipa_i.h)
+*
+*
+* Return codes: 0 for success
+* -EPERM if an asynchronous action should have been done
+*/
+int ipa2_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
+ *id)
+{
+ int res = 0;
+ unsigned long flags;
+
+ if (ipa_active_clients_trylock(&flags) == 0)
+ return -EPERM;
+
+ if (ipa_ctx->ipa_active_clients.cnt == 0) {
+ res = -EPERM;
+ goto bail;
+ }
+
+ ipa2_active_clients_log_inc(id, true);
+
+ ipa_ctx->ipa_active_clients.cnt++;
+ IPADBG("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt);
+bail:
+ ipa_active_clients_trylock_unlock(&flags);
+
+ return res;
+}
+
+/**
+ * ipa_dec_client_disable_clks() - Decrease active clients counter
+ *
+ * In case that there are no active clients this function also starts
+ * TAG process. When TAG progress ends ipa clocks will be gated.
+ * start_tag_process_again flag is set during this function to signal TAG
+ * process to start again as there was another client that may send data to ipa
+ *
+ * Please do not use this API, use the wrapper macros instead (ipa_i.h)
+ * IPA2_ACTIVE_CLIENTS_DEC_XXXX();
+ *
+ * Return codes:
+ * None
+ */
+void ipa2_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
+{
+ struct ipa_active_client_logging_info log_info;
+
+ ipa_active_clients_lock();
+ ipa2_active_clients_log_dec(id, false);
+ ipa_ctx->ipa_active_clients.cnt--;
+ IPADBG("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt);
+ if (ipa_ctx->ipa_active_clients.cnt == 0) {
+ if (ipa_ctx->tag_process_before_gating) {
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info,
+ "TAG_PROCESS");
+ ipa2_active_clients_log_inc(&log_info, false);
+ ipa_ctx->tag_process_before_gating = false;
+ /*
+ * When TAG process ends, active clients will be
+ * decreased
+ */
+ ipa_ctx->ipa_active_clients.cnt = 1;
+ queue_work(ipa_ctx->power_mgmt_wq, &ipa_tag_work);
+ } else {
+ ipa_disable_clks();
+ }
+ }
+ ipa_active_clients_unlock();
+}
+
+/**
+* ipa_inc_acquire_wakelock() - Increase active clients counter, and
+* acquire wakelock if necessary
+*
+* Return codes:
+* None
+*/
+void ipa_inc_acquire_wakelock(enum ipa_wakelock_ref_client ref_client)
+{
+ unsigned long flags;
+
+ if (ref_client >= IPA_WAKELOCK_REF_CLIENT_MAX)
+ return;
+ spin_lock_irqsave(&ipa_ctx->wakelock_ref_cnt.spinlock, flags);
+ if (ipa_ctx->wakelock_ref_cnt.cnt & (1 << ref_client))
+ IPAERR("client enum %d mask already set. ref cnt = %d\n",
+ ref_client, ipa_ctx->wakelock_ref_cnt.cnt);
+ ipa_ctx->wakelock_ref_cnt.cnt |= (1 << ref_client);
+ if (ipa_ctx->wakelock_ref_cnt.cnt)
+ __pm_stay_awake(&ipa_ctx->w_lock);
+ IPADBG("active wakelock ref cnt = %d client enum %d\n",
+ ipa_ctx->wakelock_ref_cnt.cnt, ref_client);
+ spin_unlock_irqrestore(&ipa_ctx->wakelock_ref_cnt.spinlock, flags);
+}
+
+/**
+ * ipa_dec_release_wakelock() - Decrease active clients counter
+ *
+ * In case if the ref count is 0, release the wakelock.
+ *
+ * Return codes:
+ * None
+ */
+void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client)
+{
+ unsigned long flags;
+
+ if (ref_client >= IPA_WAKELOCK_REF_CLIENT_MAX)
+ return;
+ spin_lock_irqsave(&ipa_ctx->wakelock_ref_cnt.spinlock, flags);
+ ipa_ctx->wakelock_ref_cnt.cnt &= ~(1 << ref_client);
+ IPADBG("active wakelock ref cnt = %d client enum %d\n",
+ ipa_ctx->wakelock_ref_cnt.cnt, ref_client);
+ if (ipa_ctx->wakelock_ref_cnt.cnt == 0)
+ __pm_relax(&ipa_ctx->w_lock);
+ spin_unlock_irqrestore(&ipa_ctx->wakelock_ref_cnt.spinlock, flags);
+}
+
+static int ipa_setup_bam_cfg(const struct ipa_plat_drv_res *res)
+{
+ void *ipa_bam_mmio;
+ int reg_val;
+ int retval = 0;
+
+ ipa_bam_mmio = ioremap(res->ipa_mem_base + IPA_BAM_REG_BASE_OFST,
+ IPA_BAM_REMAP_SIZE);
+ if (!ipa_bam_mmio)
+ return -ENOMEM;
+ switch (ipa_ctx->ipa_hw_type) {
+ case IPA_HW_v1_1:
+ reg_val = IPA_BAM_CNFG_BITS_VALv1_1;
+ break;
+ case IPA_HW_v2_0:
+ case IPA_HW_v2_5:
+ case IPA_HW_v2_6L:
+ reg_val = IPA_BAM_CNFG_BITS_VALv2_0;
+ break;
+ default:
+ retval = -EPERM;
+ goto fail;
+ }
+ if (ipa_ctx->ipa_hw_type < IPA_HW_v2_5)
+ ipa_write_reg(ipa_bam_mmio, IPA_BAM_CNFG_BITS_OFST, reg_val);
+fail:
+ iounmap(ipa_bam_mmio);
+
+ return retval;
+}
+
+int ipa2_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+ u32 bandwidth_mbps)
+{
+ enum ipa_voltage_level needed_voltage;
+ u32 clk_rate;
+
+ IPADBG("floor_voltage=%d, bandwidth_mbps=%u",
+ floor_voltage, bandwidth_mbps);
+
+ if (floor_voltage < IPA_VOLTAGE_UNSPECIFIED ||
+ floor_voltage >= IPA_VOLTAGE_MAX) {
+ IPAERR("bad voltage\n");
+ return -EINVAL;
+ }
+
+ if (ipa_ctx->enable_clock_scaling) {
+ IPADBG("Clock scaling is enabled\n");
+ if (bandwidth_mbps >=
+ ipa_ctx->ctrl->clock_scaling_bw_threshold_turbo)
+ needed_voltage = IPA_VOLTAGE_TURBO;
+ else if (bandwidth_mbps >=
+ ipa_ctx->ctrl->clock_scaling_bw_threshold_nominal)
+ needed_voltage = IPA_VOLTAGE_NOMINAL;
+ else
+ needed_voltage = IPA_VOLTAGE_SVS;
+ } else {
+ IPADBG("Clock scaling is disabled\n");
+ needed_voltage = IPA_VOLTAGE_NOMINAL;
+ }
+
+ needed_voltage = max(needed_voltage, floor_voltage);
+ switch (needed_voltage) {
+ case IPA_VOLTAGE_SVS:
+ clk_rate = ipa_ctx->ctrl->ipa_clk_rate_svs;
+ break;
+ case IPA_VOLTAGE_NOMINAL:
+ clk_rate = ipa_ctx->ctrl->ipa_clk_rate_nominal;
+ break;
+ case IPA_VOLTAGE_TURBO:
+ clk_rate = ipa_ctx->ctrl->ipa_clk_rate_turbo;
+ break;
+ default:
+ IPAERR("bad voltage\n");
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ if (clk_rate == ipa_ctx->curr_ipa_clk_rate) {
+ IPADBG("Same voltage\n");
+ return 0;
+ }
+
+ ipa_active_clients_lock();
+ ipa_ctx->curr_ipa_clk_rate = clk_rate;
+ IPADBG("setting clock rate to %u\n", ipa_ctx->curr_ipa_clk_rate);
+ if (ipa_ctx->ipa_active_clients.cnt > 0) {
+ clk_set_rate(ipa_clk, ipa_ctx->curr_ipa_clk_rate);
+ if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL)
+ if (msm_bus_scale_client_update_request(
+ ipa_ctx->ipa_bus_hdl, ipa_get_bus_vote()))
+ WARN_ON(1);
+ } else {
+ IPADBG("clocks are gated, not setting rate\n");
+ }
+ ipa_active_clients_unlock();
+ IPADBG("Done\n");
+ return 0;
+}
+
+static int ipa_init_flt_block(void)
+{
+ int result = 0;
+
+ /*
+ * SW workaround for Improper Filter Behavior when neither Global nor
+ * Pipe Rules are present => configure dummy global filter rule
+ * always which results in a miss
+ */
+ struct ipa_ioc_add_flt_rule *rules;
+ struct ipa_flt_rule_add *rule;
+ struct ipa_ioc_get_rt_tbl rt_lookup;
+ enum ipa_ip_type ip;
+
+ if (ipa_ctx->ipa_hw_type >= IPA_HW_v1_1) {
+ size_t sz = sizeof(struct ipa_ioc_add_flt_rule) +
+ sizeof(struct ipa_flt_rule_add);
+
+ rules = kmalloc(sz, GFP_KERNEL);
+ if (rules == NULL) {
+ IPAERR("fail to alloc mem for dummy filter rule\n");
+ return -ENOMEM;
+ }
+
+ IPADBG("Adding global rules for IPv4 and IPv6");
+ for (ip = IPA_IP_v4; ip < IPA_IP_MAX; ip++) {
+ memset(&rt_lookup, 0,
+ sizeof(struct ipa_ioc_get_rt_tbl));
+ rt_lookup.ip = ip;
+ strlcpy(rt_lookup.name, IPA_DFLT_RT_TBL_NAME,
+ IPA_RESOURCE_NAME_MAX);
+ ipa2_get_rt_tbl(&rt_lookup);
+ ipa2_put_rt_tbl(rt_lookup.hdl);
+
+ memset(rules, 0, sz);
+ rule = &rules->rules[0];
+ rules->commit = 1;
+ rules->ip = ip;
+ rules->global = 1;
+ rules->num_rules = 1;
+ rule->at_rear = 1;
+ if (ip == IPA_IP_v4) {
+ rule->rule.attrib.attrib_mask =
+ IPA_FLT_PROTOCOL | IPA_FLT_DST_ADDR;
+ rule->rule.attrib.u.v4.protocol =
+ IPA_INVALID_L4_PROTOCOL;
+ rule->rule.attrib.u.v4.dst_addr_mask = ~0;
+ rule->rule.attrib.u.v4.dst_addr = ~0;
+ } else if (ip == IPA_IP_v6) {
+ rule->rule.attrib.attrib_mask =
+ IPA_FLT_NEXT_HDR | IPA_FLT_DST_ADDR;
+ rule->rule.attrib.u.v6.next_hdr =
+ IPA_INVALID_L4_PROTOCOL;
+ rule->rule.attrib.u.v6.dst_addr_mask[0] = ~0;
+ rule->rule.attrib.u.v6.dst_addr_mask[1] = ~0;
+ rule->rule.attrib.u.v6.dst_addr_mask[2] = ~0;
+ rule->rule.attrib.u.v6.dst_addr_mask[3] = ~0;
+ rule->rule.attrib.u.v6.dst_addr[0] = ~0;
+ rule->rule.attrib.u.v6.dst_addr[1] = ~0;
+ rule->rule.attrib.u.v6.dst_addr[2] = ~0;
+ rule->rule.attrib.u.v6.dst_addr[3] = ~0;
+ } else {
+ result = -EINVAL;
+ WARN_ON(1);
+ break;
+ }
+ rule->rule.action = IPA_PASS_TO_ROUTING;
+ rule->rule.rt_tbl_hdl = rt_lookup.hdl;
+ rule->rule.retain_hdr = true;
+
+ if (ipa2_add_flt_rule(rules) ||
+ rules->rules[0].status) {
+
+ result = -EINVAL;
+ WARN_ON(1);
+ break;
+ }
+ }
+ kfree(rules);
+ }
+ return result;
+}
+
+static void ipa_sps_process_irq_schedule_rel(void)
+{
+ queue_delayed_work(ipa_ctx->sps_power_mgmt_wq,
+ &ipa_sps_release_resource_work,
+ msecs_to_jiffies(IPA_SPS_PROD_TIMEOUT_MSEC));
+}
+
+/**
+* ipa_suspend_handler() - Handles the suspend interrupt:
+* wakes up the suspended peripheral by requesting its consumer
+* @interrupt: Interrupt type
+* @private_data: The client's private data
+* @interrupt_data: Interrupt specific information data
+*/
+void ipa_suspend_handler(enum ipa_irq_type interrupt,
+ void *private_data,
+ void *interrupt_data)
+{
+ enum ipa_rm_resource_name resource;
+ u32 suspend_data =
+ ((struct ipa_tx_suspend_irq_data *)interrupt_data)->endpoints;
+ u32 bmsk = 1;
+ u32 i = 0;
+ int res;
+ struct ipa_ep_cfg_holb holb_cfg;
+
+ IPADBG("interrupt=%d, interrupt_data=%u\n", interrupt, suspend_data);
+ memset(&holb_cfg, 0, sizeof(holb_cfg));
+ holb_cfg.tmr_val = 0;
+
+ for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
+ if ((suspend_data & bmsk) && (ipa_ctx->ep[i].valid)) {
+ if (IPA_CLIENT_IS_APPS_CONS(ipa_ctx->ep[i].client)) {
+ /*
+ * pipe will be unsuspended as part of
+ * enabling IPA clocks
+ */
+ if (!atomic_read(
+ &ipa_ctx->sps_pm.dec_clients)
+ ) {
+ IPA_ACTIVE_CLIENTS_INC_EP(
+ ipa_ctx->ep[i].client);
+ IPADBG("Pipes un-suspended.\n");
+ IPADBG("Enter poll mode.\n");
+ atomic_set(
+ &ipa_ctx->sps_pm.dec_clients,
+ 1);
+ ipa_sps_process_irq_schedule_rel();
+ }
+ } else {
+ resource = ipa2_get_rm_resource_from_ep(i);
+ res = ipa_rm_request_resource_with_timer(
+ resource);
+ if (res == -EPERM &&
+ IPA_CLIENT_IS_CONS(
+ ipa_ctx->ep[i].client)) {
+ holb_cfg.en = 1;
+ res = ipa2_cfg_ep_holb_by_client(
+ ipa_ctx->ep[i].client, &holb_cfg);
+ if (res) {
+ IPAERR("holb en fail\n");
+ IPAERR("IPAHW stall\n");
+ BUG();
+ }
+ }
+ }
+ }
+ bmsk = bmsk << 1;
+ }
+}
+
+/**
+* ipa2_restore_suspend_handler() - restores the original suspend IRQ handler
+* as it was registered in the IPA init sequence.
+* Return codes:
+* 0: success
+* -EPERM: failed to remove current handler or failed to add original handler
+*/
+int ipa2_restore_suspend_handler(void)
+{
+ int result = 0;
+
+ result = ipa2_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ);
+ if (result) {
+ IPAERR("remove handler for suspend interrupt failed\n");
+ return -EPERM;
+ }
+
+ result = ipa2_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
+ ipa_suspend_handler, true, NULL);
+ if (result) {
+ IPAERR("register handler for suspend interrupt failed\n");
+ result = -EPERM;
+ }
+
+ return result;
+}
+
+static int apps_cons_release_resource(void)
+{
+ return 0;
+}
+
+static int apps_cons_request_resource(void)
+{
+ return 0;
+}
+
+static void ipa_sps_release_resource(struct work_struct *work)
+{
+ mutex_lock(&ipa_ctx->sps_pm.sps_pm_lock);
+ /* check whether still need to decrease client usage */
+ if (atomic_read(&ipa_ctx->sps_pm.dec_clients)) {
+ if (atomic_read(&ipa_ctx->sps_pm.eot_activity)) {
+ IPADBG("EOT pending Re-scheduling\n");
+ ipa_sps_process_irq_schedule_rel();
+ } else {
+ atomic_set(&ipa_ctx->sps_pm.dec_clients, 0);
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("SPS_RESOURCE");
+ }
+ }
+ atomic_set(&ipa_ctx->sps_pm.eot_activity, 0);
+ mutex_unlock(&ipa_ctx->sps_pm.sps_pm_lock);
+}
+
+int ipa_create_apps_resource(void)
+{
+ struct ipa_rm_create_params apps_cons_create_params;
+ struct ipa_rm_perf_profile profile;
+ int result = 0;
+
+ memset(&apps_cons_create_params, 0,
+ sizeof(apps_cons_create_params));
+ apps_cons_create_params.name = IPA_RM_RESOURCE_APPS_CONS;
+ apps_cons_create_params.request_resource = apps_cons_request_resource;
+ apps_cons_create_params.release_resource = apps_cons_release_resource;
+ result = ipa_rm_create_resource(&apps_cons_create_params);
+ if (result) {
+ IPAERR("ipa_rm_create_resource failed\n");
+ return result;
+ }
+
+ profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
+ ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile);
+
+ return result;
+}
+
+
+/**
+* ipa_init() - Initialize the IPA Driver
+* @resource_p: contain platform specific values from DST file
+* @pdev: The platform device structure representing the IPA driver
+*
+* Function initialization process:
+* - Allocate memory for the driver context data struct
+* - Initializing the ipa_ctx with:
+* 1)parsed values from the dts file
+* 2)parameters passed to the module initialization
+* 3)read HW values(such as core memory size)
+* - Map IPA core registers to CPU memory
+* - Restart IPA core(HW reset)
+* - Register IPA BAM to SPS driver and get a BAM handler
+* - Set configuration for IPA BAM via BAM_CNFG_BITS
+* - Initialize the look-aside caches(kmem_cache/slab) for filter,
+* routing and IPA-tree
+* - Create memory pool with 4 objects for DMA operations(each object
+* is 512Bytes long), this object will be use for tx(A5->IPA)
+* - Initialize lists head(routing,filter,hdr,system pipes)
+* - Initialize mutexes (for ipa_ctx and NAT memory mutexes)
+* - Initialize spinlocks (for list related to A5<->IPA pipes)
+* - Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
+* - Initialize Red-Black-Tree(s) for handles of header,routing rule,
+* routing table ,filtering rule
+* - Setup all A5<->IPA pipes by calling to ipa_setup_a5_pipes
+* - Preparing the descriptors for System pipes
+* - Initialize the filter block by committing IPV4 and IPV6 default rules
+* - Create empty routing table in system memory(no committing)
+* - Initialize pipes memory pool with ipa_pipe_mem_init for supported platforms
+* - Create a char-device for IPA
+* - Initialize IPA RM (resource manager)
+*/
+static int ipa_init(const struct ipa_plat_drv_res *resource_p,
+ struct device *ipa_dev)
+{
+ int result = 0;
+ int i;
+ struct sps_bam_props bam_props = { 0 };
+ struct ipa_flt_tbl *flt_tbl;
+ struct ipa_rt_tbl_set *rset;
+ struct ipa_active_client_logging_info log_info;
+
+ IPADBG("IPA Driver initialization started\n");
+
+ /*
+ * since structure alignment is implementation dependent, add test to
+ * avoid different and incompatible data layouts
+ */
+ BUILD_BUG_ON(sizeof(struct ipa_hw_pkt_status) != IPA_PKT_STATUS_SIZE);
+
+ ipa_ctx = kzalloc(sizeof(*ipa_ctx), GFP_KERNEL);
+ if (!ipa_ctx) {
+ IPAERR(":kzalloc err.\n");
+ result = -ENOMEM;
+ goto fail_mem_ctx;
+ }
+
+ ipa_ctx->pdev = ipa_dev;
+ ipa_ctx->uc_pdev = ipa_dev;
+ ipa_ctx->smmu_present = smmu_info.present;
+ if (!ipa_ctx->smmu_present)
+ ipa_ctx->smmu_s1_bypass = true;
+ else
+ ipa_ctx->smmu_s1_bypass = smmu_info.s1_bypass;
+ ipa_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
+ ipa_ctx->ipa_wrapper_size = resource_p->ipa_mem_size;
+ ipa_ctx->ipa_hw_type = resource_p->ipa_hw_type;
+ ipa_ctx->ipa_hw_mode = resource_p->ipa_hw_mode;
+ ipa_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
+ ipa_ctx->ipa_bam_remote_mode = resource_p->ipa_bam_remote_mode;
+ ipa_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt;
+ ipa_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size;
+ ipa_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size;
+ ipa_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset;
+ ipa_ctx->use_dma_zone = resource_p->use_dma_zone;
+ ipa_ctx->tethered_flow_control = resource_p->tethered_flow_control;
+
+ /* Setting up IPA RX Polling Timeout Seconds */
+ ipa_rx_timeout_min_max_calc(&ipa_ctx->ipa_rx_min_timeout_usec,
+ &ipa_ctx->ipa_rx_max_timeout_usec,
+ resource_p->ipa_rx_polling_sleep_msec);
+
+ /* Setting up ipa polling iteration */
+ if ((resource_p->ipa_polling_iteration >= MIN_POLLING_ITERATION)
+ && (resource_p->ipa_polling_iteration <= MAX_POLLING_ITERATION))
+ ipa_ctx->ipa_polling_iteration =
+ resource_p->ipa_polling_iteration;
+ else
+ ipa_ctx->ipa_polling_iteration = MAX_POLLING_ITERATION;
+
+ /* default aggregation parameters */
+ ipa_ctx->aggregation_type = IPA_MBIM_16;
+ ipa_ctx->aggregation_byte_limit = 1;
+ ipa_ctx->aggregation_time_limit = 0;
+ ipa_ctx->ipa2_active_clients_logging.log_rdy = false;
+
+ ipa_ctx->ctrl = kzalloc(sizeof(*ipa_ctx->ctrl), GFP_KERNEL);
+ if (!ipa_ctx->ctrl) {
+ IPAERR("memory allocation error for ctrl\n");
+ result = -ENOMEM;
+ goto fail_mem_ctrl;
+ }
+ result = ipa_controller_static_bind(ipa_ctx->ctrl,
+ ipa_ctx->ipa_hw_type);
+ if (result) {
+ IPAERR("fail to static bind IPA ctrl.\n");
+ result = -EFAULT;
+ goto fail_bind;
+ }
+
+ IPADBG("hdr_lcl=%u ip4_rt=%u ip6_rt=%u ip4_flt=%u ip6_flt=%u\n",
+ ipa_ctx->hdr_tbl_lcl, ipa_ctx->ip4_rt_tbl_lcl,
+ ipa_ctx->ip6_rt_tbl_lcl, ipa_ctx->ip4_flt_tbl_lcl,
+ ipa_ctx->ip6_flt_tbl_lcl);
+
+ if (bus_scale_table) {
+ IPADBG("Use bus scaling info from device tree\n");
+ ipa_ctx->ctrl->msm_bus_data_ptr = bus_scale_table;
+ }
+
+ if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL) {
+ /* get BUS handle */
+ ipa_ctx->ipa_bus_hdl =
+ msm_bus_scale_register_client(
+ ipa_ctx->ctrl->msm_bus_data_ptr);
+ if (!ipa_ctx->ipa_bus_hdl) {
+ IPAERR("fail to register with bus mgr!\n");
+ result = -ENODEV;
+ goto fail_bus_reg;
+ }
+ } else {
+ IPADBG("Skipping bus scaling registration on Virtual plat\n");
+ }
+
+ if (ipa2_active_clients_log_init())
+ goto fail_init_active_client;
+
+ /* get IPA clocks */
+ result = ipa_get_clks(master_dev);
+ if (result)
+ goto fail_clk;
+
+ /* Enable ipa_ctx->enable_clock_scaling */
+ ipa_ctx->enable_clock_scaling = 1;
+ ipa_ctx->curr_ipa_clk_rate = ipa_ctx->ctrl->ipa_clk_rate_turbo;
+
+ /* enable IPA clocks explicitly to allow the initialization */
+ ipa_enable_clks();
+
+ /* setup IPA register access */
+ ipa_ctx->mmio = ioremap(resource_p->ipa_mem_base +
+ ipa_ctx->ctrl->ipa_reg_base_ofst,
+ resource_p->ipa_mem_size);
+ if (!ipa_ctx->mmio) {
+ IPAERR(":ipa-base ioremap err.\n");
+ result = -EFAULT;
+ goto fail_remap;
+ }
+
+ result = ipa_init_hw();
+ if (result) {
+ IPAERR(":error initializing HW.\n");
+ result = -ENODEV;
+ goto fail_init_hw;
+ }
+ IPADBG("IPA HW initialization sequence completed");
+
+ ipa_ctx->ipa_num_pipes = ipa_get_num_pipes();
+ ipa_ctx->ctrl->ipa_sram_read_settings();
+ IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n",
+ ipa_ctx->smem_sz, ipa_ctx->smem_restricted_bytes);
+
+ if (ipa_ctx->smem_reqd_sz >
+ ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes) {
+ IPAERR("SW expect more core memory, needed %d, avail %d\n",
+ ipa_ctx->smem_reqd_sz, ipa_ctx->smem_sz -
+ ipa_ctx->smem_restricted_bytes);
+ result = -ENOMEM;
+ goto fail_init_hw;
+ }
+
+ mutex_init(&ipa_ctx->ipa_active_clients.mutex);
+ spin_lock_init(&ipa_ctx->ipa_active_clients.spinlock);
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
+ ipa2_active_clients_log_inc(&log_info, false);
+ ipa_ctx->ipa_active_clients.cnt = 1;
+
+ /* Create workqueues for power management */
+ ipa_ctx->power_mgmt_wq =
+ create_singlethread_workqueue("ipa_power_mgmt");
+ if (!ipa_ctx->power_mgmt_wq) {
+ IPAERR("failed to create power mgmt wq\n");
+ result = -ENOMEM;
+ goto fail_init_hw;
+ }
+
+ ipa_ctx->sps_power_mgmt_wq =
+ create_singlethread_workqueue("sps_ipa_power_mgmt");
+ if (!ipa_ctx->sps_power_mgmt_wq) {
+ IPAERR("failed to create sps power mgmt wq\n");
+ result = -ENOMEM;
+ goto fail_create_sps_wq;
+ }
+
+ /* register IPA with SPS driver */
+ bam_props.phys_addr = resource_p->bam_mem_base;
+ bam_props.virt_size = resource_p->bam_mem_size;
+ bam_props.irq = resource_p->bam_irq;
+ bam_props.num_pipes = ipa_ctx->ipa_num_pipes;
+ bam_props.summing_threshold = IPA_SUMMING_THRESHOLD;
+ bam_props.event_threshold = IPA_EVENT_THRESHOLD;
+ bam_props.options |= SPS_BAM_NO_LOCAL_CLK_GATING;
+ if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL)
+ bam_props.options |= SPS_BAM_OPT_IRQ_WAKEUP;
+ if (ipa_ctx->ipa_bam_remote_mode == true)
+ bam_props.manage |= SPS_BAM_MGR_DEVICE_REMOTE;
+ if (!ipa_ctx->smmu_s1_bypass)
+ bam_props.options |= SPS_BAM_SMMU_EN;
+ bam_props.options |= SPS_BAM_CACHED_WP;
+ bam_props.ee = resource_p->ee;
+ bam_props.ipc_loglevel = 3;
+
+ result = sps_register_bam_device(&bam_props, &ipa_ctx->bam_handle);
+ if (result) {
+ IPAERR(":bam register err.\n");
+ result = -EPROBE_DEFER;
+ goto fail_register_bam_device;
+ }
+ IPADBG("IPA BAM is registered\n");
+
+ if (ipa_setup_bam_cfg(resource_p)) {
+ IPAERR(":bam cfg err.\n");
+ result = -ENODEV;
+ goto fail_flt_rule_cache;
+ }
+
+ /* init the lookaside cache */
+ ipa_ctx->flt_rule_cache = kmem_cache_create("IPA_FLT",
+ sizeof(struct ipa_flt_entry), 0, 0, NULL);
+ if (!ipa_ctx->flt_rule_cache) {
+ IPAERR(":ipa flt cache create failed\n");
+ result = -ENOMEM;
+ goto fail_flt_rule_cache;
+ }
+ ipa_ctx->rt_rule_cache = kmem_cache_create("IPA_RT",
+ sizeof(struct ipa_rt_entry), 0, 0, NULL);
+ if (!ipa_ctx->rt_rule_cache) {
+ IPAERR(":ipa rt cache create failed\n");
+ result = -ENOMEM;
+ goto fail_rt_rule_cache;
+ }
+ ipa_ctx->hdr_cache = kmem_cache_create("IPA_HDR",
+ sizeof(struct ipa_hdr_entry), 0, 0, NULL);
+ if (!ipa_ctx->hdr_cache) {
+ IPAERR(":ipa hdr cache create failed\n");
+ result = -ENOMEM;
+ goto fail_hdr_cache;
+ }
+ ipa_ctx->hdr_offset_cache =
+ kmem_cache_create("IPA_HDR_OFFSET",
+ sizeof(struct ipa_hdr_offset_entry), 0, 0, NULL);
+ if (!ipa_ctx->hdr_offset_cache) {
+ IPAERR(":ipa hdr off cache create failed\n");
+ result = -ENOMEM;
+ goto fail_hdr_offset_cache;
+ }
+ ipa_ctx->hdr_proc_ctx_cache = kmem_cache_create("IPA_HDR_PROC_CTX",
+ sizeof(struct ipa_hdr_proc_ctx_entry), 0, 0, NULL);
+ if (!ipa_ctx->hdr_proc_ctx_cache) {
+ IPAERR(":ipa hdr proc ctx cache create failed\n");
+ result = -ENOMEM;
+ goto fail_hdr_proc_ctx_cache;
+ }
+ ipa_ctx->hdr_proc_ctx_offset_cache =
+ kmem_cache_create("IPA_HDR_PROC_CTX_OFFSET",
+ sizeof(struct ipa_hdr_proc_ctx_offset_entry), 0, 0, NULL);
+ if (!ipa_ctx->hdr_proc_ctx_offset_cache) {
+ IPAERR(":ipa hdr proc ctx off cache create failed\n");
+ result = -ENOMEM;
+ goto fail_hdr_proc_ctx_offset_cache;
+ }
+ ipa_ctx->rt_tbl_cache = kmem_cache_create("IPA_RT_TBL",
+ sizeof(struct ipa_rt_tbl), 0, 0, NULL);
+ if (!ipa_ctx->rt_tbl_cache) {
+ IPAERR(":ipa rt tbl cache create failed\n");
+ result = -ENOMEM;
+ goto fail_rt_tbl_cache;
+ }
+ ipa_ctx->tx_pkt_wrapper_cache =
+ kmem_cache_create("IPA_TX_PKT_WRAPPER",
+ sizeof(struct ipa_tx_pkt_wrapper), 0, 0, NULL);
+ if (!ipa_ctx->tx_pkt_wrapper_cache) {
+ IPAERR(":ipa tx pkt wrapper cache create failed\n");
+ result = -ENOMEM;
+ goto fail_tx_pkt_wrapper_cache;
+ }
+ ipa_ctx->rx_pkt_wrapper_cache =
+ kmem_cache_create("IPA_RX_PKT_WRAPPER",
+ sizeof(struct ipa_rx_pkt_wrapper), 0, 0, NULL);
+ if (!ipa_ctx->rx_pkt_wrapper_cache) {
+ IPAERR(":ipa rx pkt wrapper cache create failed\n");
+ result = -ENOMEM;
+ goto fail_rx_pkt_wrapper_cache;
+ }
+
+ /* Setup DMA pool */
+ ipa_ctx->dma_pool = dma_pool_create("ipa_tx", ipa_ctx->pdev,
+ IPA_NUM_DESC_PER_SW_TX * sizeof(struct sps_iovec),
+ 0, 0);
+ if (!ipa_ctx->dma_pool) {
+ IPAERR("cannot alloc DMA pool.\n");
+ result = -ENOMEM;
+ goto fail_dma_pool;
+ }
+
+ ipa_ctx->glob_flt_tbl[IPA_IP_v4].in_sys = !ipa_ctx->ip4_flt_tbl_lcl;
+ ipa_ctx->glob_flt_tbl[IPA_IP_v6].in_sys = !ipa_ctx->ip6_flt_tbl_lcl;
+
+ /* init the various list heads */
+ INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v4].head_flt_rule_list);
+ INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v6].head_flt_rule_list);
+ INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_hdr_entry_list);
+ for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+ INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_offset_list[i]);
+ INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_free_offset_list[i]);
+ }
+ INIT_LIST_HEAD(&ipa_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list);
+ for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
+ INIT_LIST_HEAD(&ipa_ctx->hdr_proc_ctx_tbl.head_offset_list[i]);
+ INIT_LIST_HEAD(&ipa_ctx->
+ hdr_proc_ctx_tbl.head_free_offset_list[i]);
+ }
+ INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
+ INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
+ for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
+ flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v4];
+ INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+ flt_tbl->in_sys = !ipa_ctx->ip4_flt_tbl_lcl;
+
+ flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v6];
+ INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+ flt_tbl->in_sys = !ipa_ctx->ip6_flt_tbl_lcl;
+ }
+
+ rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v4];
+ INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+ rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v6];
+ INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+
+ INIT_LIST_HEAD(&ipa_ctx->intf_list);
+ INIT_LIST_HEAD(&ipa_ctx->msg_list);
+ INIT_LIST_HEAD(&ipa_ctx->pull_msg_list);
+ init_waitqueue_head(&ipa_ctx->msg_waitq);
+ mutex_init(&ipa_ctx->msg_lock);
+
+ mutex_init(&ipa_ctx->lock);
+ mutex_init(&ipa_ctx->nat_mem.lock);
+
+ idr_init(&ipa_ctx->ipa_idr);
+ spin_lock_init(&ipa_ctx->idr_lock);
+
+ /* wlan related member */
+ memset(&ipa_ctx->wc_memb, 0, sizeof(ipa_ctx->wc_memb));
+ spin_lock_init(&ipa_ctx->wc_memb.wlan_spinlock);
+ spin_lock_init(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock);
+ INIT_LIST_HEAD(&ipa_ctx->wc_memb.wlan_comm_desc_list);
+ /*
+ * setup an empty routing table in system memory, this will be used
+ * to delete a routing table cleanly and safely
+ */
+ ipa_ctx->empty_rt_tbl_mem.size = IPA_ROUTING_RULE_BYTE_SIZE;
+
+ ipa_ctx->empty_rt_tbl_mem.base =
+ dma_alloc_coherent(ipa_ctx->pdev,
+ ipa_ctx->empty_rt_tbl_mem.size,
+ &ipa_ctx->empty_rt_tbl_mem.phys_base,
+ GFP_KERNEL);
+ if (!ipa_ctx->empty_rt_tbl_mem.base) {
+ IPAERR("DMA buff alloc fail %d bytes for empty routing tbl\n",
+ ipa_ctx->empty_rt_tbl_mem.size);
+ result = -ENOMEM;
+ goto fail_apps_pipes;
+ }
+ memset(ipa_ctx->empty_rt_tbl_mem.base, 0,
+ ipa_ctx->empty_rt_tbl_mem.size);
+ IPADBG("empty routing table was allocated in system memory");
+
+ /* setup the A5-IPA pipes */
+ if (ipa_setup_apps_pipes()) {
+ IPAERR(":failed to setup IPA-Apps pipes.\n");
+ result = -ENODEV;
+ goto fail_empty_rt_tbl;
+ }
+ IPADBG("IPA System2Bam pipes were connected\n");
+
+ if (ipa_init_flt_block()) {
+ IPAERR("fail to setup dummy filter rules\n");
+ result = -ENODEV;
+ goto fail_empty_rt_tbl;
+ }
+ IPADBG("filter block was set with dummy filter rules");
+
+ /* setup the IPA pipe mem pool */
+ if (resource_p->ipa_pipe_mem_size)
+ ipa_pipe_mem_init(resource_p->ipa_pipe_mem_start_ofst,
+ resource_p->ipa_pipe_mem_size);
+
+ ipa_ctx->class = class_create(THIS_MODULE, DRV_NAME);
+
+ result = alloc_chrdev_region(&ipa_ctx->dev_num, 0, 1, DRV_NAME);
+ if (result) {
+ IPAERR("alloc_chrdev_region err.\n");
+ result = -ENODEV;
+ goto fail_alloc_chrdev_region;
+ }
+
+ ipa_ctx->dev = device_create(ipa_ctx->class, NULL, ipa_ctx->dev_num,
+ ipa_ctx, DRV_NAME);
+ if (IS_ERR(ipa_ctx->dev)) {
+ IPAERR(":device_create err.\n");
+ result = -ENODEV;
+ goto fail_device_create;
+ }
+
+ cdev_init(&ipa_ctx->cdev, &ipa_drv_fops);
+ ipa_ctx->cdev.owner = THIS_MODULE;
+ ipa_ctx->cdev.ops = &ipa_drv_fops; /* from LDD3 */
+
+ result = cdev_add(&ipa_ctx->cdev, ipa_ctx->dev_num, 1);
+ if (result) {
+ IPAERR(":cdev_add err=%d\n", -result);
+ result = -ENODEV;
+ goto fail_cdev_add;
+ }
+ IPADBG("ipa cdev added successful. major:%d minor:%d\n",
+ MAJOR(ipa_ctx->dev_num),
+ MINOR(ipa_ctx->dev_num));
+
+ if (create_nat_device()) {
+ IPAERR("unable to create nat device\n");
+ result = -ENODEV;
+ goto fail_nat_dev_add;
+ }
+
+
+
+ /* Create a wakeup source. */
+ wakeup_source_init(&ipa_ctx->w_lock, "IPA_WS");
+ spin_lock_init(&ipa_ctx->wakelock_ref_cnt.spinlock);
+
+ /* Initialize the SPS PM lock. */
+ mutex_init(&ipa_ctx->sps_pm.sps_pm_lock);
+
+ /* Initialize IPA RM (resource manager) */
+ result = ipa_rm_initialize();
+ if (result) {
+ IPAERR("RM initialization failed (%d)\n", -result);
+ result = -ENODEV;
+ goto fail_ipa_rm_init;
+ }
+ IPADBG("IPA resource manager initialized");
+
+ result = ipa_create_apps_resource();
+ if (result) {
+ IPAERR("Failed to create APPS_CONS resource\n");
+ result = -ENODEV;
+ goto fail_create_apps_resource;
+ }
+
+ /*register IPA IRQ handler*/
+ result = ipa_interrupts_init(resource_p->ipa_irq, 0,
+ master_dev);
+ if (result) {
+ IPAERR("ipa interrupts initialization failed\n");
+ result = -ENODEV;
+ goto fail_ipa_interrupts_init;
+ }
+
+ /*add handler for suspend interrupt*/
+ result = ipa_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
+ ipa_suspend_handler, false, NULL);
+ if (result) {
+ IPAERR("register handler for suspend interrupt failed\n");
+ result = -ENODEV;
+ goto fail_add_interrupt_handler;
+ }
+
+ if (ipa_ctx->use_ipa_teth_bridge) {
+ /* Initialize the tethering bridge driver */
+ result = teth_bridge_driver_init();
+ if (result) {
+ IPAERR(":teth_bridge init failed (%d)\n", -result);
+ result = -ENODEV;
+ goto fail_add_interrupt_handler;
+ }
+ IPADBG("teth_bridge initialized");
+ }
+
+ ipa_debugfs_init();
+
+ result = ipa_uc_interface_init();
+ if (result)
+ IPAERR(":ipa Uc interface init failed (%d)\n", -result);
+ else
+ IPADBG(":ipa Uc interface init ok\n");
+
+ result = ipa_wdi_init();
+ if (result)
+ IPAERR(":wdi init failed (%d)\n", -result);
+ else
+ IPADBG(":wdi init ok\n");
+
+ result = ipa_ntn_init();
+ if (result)
+ IPAERR(":ntn init failed (%d)\n", -result);
+ else
+ IPADBG(":ntn init ok\n");
+
+ ipa_ctx->q6_proxy_clk_vote_valid = true;
+
+ ipa_register_panic_hdlr();
+
+ pr_info("IPA driver initialization was successful.\n");
+
+ return 0;
+
+fail_add_interrupt_handler:
+ free_irq(resource_p->ipa_irq, master_dev);
+fail_ipa_interrupts_init:
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
+fail_create_apps_resource:
+ ipa_rm_exit();
+fail_ipa_rm_init:
+fail_nat_dev_add:
+ cdev_del(&ipa_ctx->cdev);
+fail_cdev_add:
+ device_destroy(ipa_ctx->class, ipa_ctx->dev_num);
+fail_device_create:
+ unregister_chrdev_region(ipa_ctx->dev_num, 1);
+fail_alloc_chrdev_region:
+ if (ipa_ctx->pipe_mem_pool)
+ gen_pool_destroy(ipa_ctx->pipe_mem_pool);
+fail_empty_rt_tbl:
+ ipa_teardown_apps_pipes();
+ dma_free_coherent(ipa_ctx->pdev,
+ ipa_ctx->empty_rt_tbl_mem.size,
+ ipa_ctx->empty_rt_tbl_mem.base,
+ ipa_ctx->empty_rt_tbl_mem.phys_base);
+fail_apps_pipes:
+ idr_destroy(&ipa_ctx->ipa_idr);
+fail_dma_pool:
+ kmem_cache_destroy(ipa_ctx->rx_pkt_wrapper_cache);
+fail_rx_pkt_wrapper_cache:
+ kmem_cache_destroy(ipa_ctx->tx_pkt_wrapper_cache);
+fail_tx_pkt_wrapper_cache:
+ kmem_cache_destroy(ipa_ctx->rt_tbl_cache);
+fail_rt_tbl_cache:
+ kmem_cache_destroy(ipa_ctx->hdr_proc_ctx_offset_cache);
+fail_hdr_proc_ctx_offset_cache:
+ kmem_cache_destroy(ipa_ctx->hdr_proc_ctx_cache);
+fail_hdr_proc_ctx_cache:
+ kmem_cache_destroy(ipa_ctx->hdr_offset_cache);
+fail_hdr_offset_cache:
+ kmem_cache_destroy(ipa_ctx->hdr_cache);
+fail_hdr_cache:
+ kmem_cache_destroy(ipa_ctx->rt_rule_cache);
+fail_rt_rule_cache:
+ kmem_cache_destroy(ipa_ctx->flt_rule_cache);
+fail_flt_rule_cache:
+ sps_deregister_bam_device(ipa_ctx->bam_handle);
+fail_register_bam_device:
+ destroy_workqueue(ipa_ctx->sps_power_mgmt_wq);
+fail_create_sps_wq:
+ destroy_workqueue(ipa_ctx->power_mgmt_wq);
+fail_init_hw:
+ iounmap(ipa_ctx->mmio);
+fail_remap:
+ ipa_disable_clks();
+fail_clk:
+ ipa2_active_clients_log_destroy();
+fail_init_active_client:
+ msm_bus_scale_unregister_client(ipa_ctx->ipa_bus_hdl);
+fail_bus_reg:
+ if (bus_scale_table) {
+ msm_bus_cl_clear_pdata(bus_scale_table);
+ bus_scale_table = NULL;
+ }
+fail_bind:
+ kfree(ipa_ctx->ctrl);
+fail_mem_ctrl:
+ kfree(ipa_ctx);
+ ipa_ctx = NULL;
+fail_mem_ctx:
+ return result;
+}
+
+static int get_ipa_dts_configuration(struct platform_device *pdev,
+ struct ipa_plat_drv_res *ipa_drv_res)
+{
+ int result;
+ struct resource *resource;
+
+ /* initialize ipa_res */
+ ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
+ ipa_drv_res->ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
+ ipa_drv_res->ipa_hw_type = 0;
+ ipa_drv_res->ipa_hw_mode = 0;
+ ipa_drv_res->ipa_bam_remote_mode = false;
+ ipa_drv_res->modem_cfg_emb_pipe_flt = false;
+ ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
+ ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
+
+ smmu_info.disable_htw = of_property_read_bool(pdev->dev.of_node,
+ "qcom,smmu-disable-htw");
+
+ /* Get IPA HW Version */
+ result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
+ &ipa_drv_res->ipa_hw_type);
+ if ((result) || (ipa_drv_res->ipa_hw_type == 0)) {
+ IPAERR(":get resource failed for ipa-hw-ver!\n");
+ return -ENODEV;
+ }
+ IPADBG(": ipa_hw_type = %d", ipa_drv_res->ipa_hw_type);
+
+ /* Get IPA HW mode */
+ result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-mode",
+ &ipa_drv_res->ipa_hw_mode);
+ if (result)
+ IPADBG("using default (IPA_MODE_NORMAL) for ipa-hw-mode\n");
+ else
+ IPADBG(": found ipa_drv_res->ipa_hw_mode = %d",
+ ipa_drv_res->ipa_hw_mode);
+
+ /* Get IPA WAN / LAN RX pool sizes */
+ result = of_property_read_u32(pdev->dev.of_node,
+ "qcom,wan-rx-ring-size",
+ &ipa_drv_res->wan_rx_ring_size);
+ if (result)
+ IPADBG("using default for wan-rx-ring-size = %u\n",
+ ipa_drv_res->wan_rx_ring_size);
+ else
+ IPADBG(": found ipa_drv_res->wan-rx-ring-size = %u",
+ ipa_drv_res->wan_rx_ring_size);
+
+ result = of_property_read_u32(pdev->dev.of_node,
+ "qcom,lan-rx-ring-size",
+ &ipa_drv_res->lan_rx_ring_size);
+ if (result)
+ IPADBG("using default for lan-rx-ring-size = %u\n",
+ ipa_drv_res->lan_rx_ring_size);
+ else
+ IPADBG(": found ipa_drv_res->lan-rx-ring-size = %u",
+ ipa_drv_res->lan_rx_ring_size);
+
+ ipa_drv_res->use_ipa_teth_bridge =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,use-ipa-tethering-bridge");
+ IPADBG(": using TBDr = %s",
+ ipa_drv_res->use_ipa_teth_bridge
+ ? "True" : "False");
+
+ ipa_drv_res->ipa_bam_remote_mode =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,ipa-bam-remote-mode");
+ IPADBG(": ipa bam remote mode = %s\n",
+ ipa_drv_res->ipa_bam_remote_mode
+ ? "True" : "False");
+
+ ipa_drv_res->modem_cfg_emb_pipe_flt =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,modem-cfg-emb-pipe-flt");
+ IPADBG(": modem configure embedded pipe filtering = %s\n",
+ ipa_drv_res->modem_cfg_emb_pipe_flt
+ ? "True" : "False");
+
+ ipa_drv_res->skip_uc_pipe_reset =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,skip-uc-pipe-reset");
+ IPADBG(": skip uC pipe reset = %s\n",
+ ipa_drv_res->skip_uc_pipe_reset
+ ? "True" : "False");
+
+ ipa_drv_res->use_dma_zone =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,use-dma-zone");
+ IPADBG(": use dma zone = %s\n",
+ ipa_drv_res->use_dma_zone
+ ? "True" : "False");
+
+ ipa_drv_res->tethered_flow_control =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,tethered-flow-control");
+ IPADBG(": Use apps based flow control = %s\n",
+ ipa_drv_res->tethered_flow_control
+ ? "True" : "False");
+
+ /* Get IPA wrapper address */
+ resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ipa-base");
+ if (!resource) {
+ IPAERR(":get resource failed for ipa-base!\n");
+ return -ENODEV;
+ }
+ ipa_drv_res->ipa_mem_base = resource->start;
+ ipa_drv_res->ipa_mem_size = resource_size(resource);
+ IPADBG(": ipa-base = 0x%x, size = 0x%x\n",
+ ipa_drv_res->ipa_mem_base,
+ ipa_drv_res->ipa_mem_size);
+
+ smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
+ smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
+
+ /* Get IPA BAM address */
+ resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "bam-base");
+ if (!resource) {
+ IPAERR(":get resource failed for bam-base!\n");
+ return -ENODEV;
+ }
+ ipa_drv_res->bam_mem_base = resource->start;
+ ipa_drv_res->bam_mem_size = resource_size(resource);
+ IPADBG(": bam-base = 0x%x, size = 0x%x\n",
+ ipa_drv_res->bam_mem_base,
+ ipa_drv_res->bam_mem_size);
+
+ /* Get IPA pipe mem start ofst */
+ resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ipa-pipe-mem");
+ if (!resource) {
+ IPADBG(":not using pipe memory - resource nonexisting\n");
+ } else {
+ ipa_drv_res->ipa_pipe_mem_start_ofst = resource->start;
+ ipa_drv_res->ipa_pipe_mem_size = resource_size(resource);
+ IPADBG(":using pipe memory - at 0x%x of size 0x%x\n",
+ ipa_drv_res->ipa_pipe_mem_start_ofst,
+ ipa_drv_res->ipa_pipe_mem_size);
+ }
+
+ /* Get IPA IRQ number */
+ resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "ipa-irq");
+ if (!resource) {
+ IPAERR(":get resource failed for ipa-irq!\n");
+ return -ENODEV;
+ }
+ ipa_drv_res->ipa_irq = resource->start;
+ IPADBG(":ipa-irq = %d\n", ipa_drv_res->ipa_irq);
+
+ /* Get IPA BAM IRQ number */
+ resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "bam-irq");
+ if (!resource) {
+ IPAERR(":get resource failed for bam-irq!\n");
+ return -ENODEV;
+ }
+ ipa_drv_res->bam_irq = resource->start;
+ IPADBG(":ibam-irq = %d\n", ipa_drv_res->bam_irq);
+
+ result = of_property_read_u32(pdev->dev.of_node, "qcom,ee",
+ &ipa_drv_res->ee);
+ if (result)
+ ipa_drv_res->ee = 0;
+
+ /* Get IPA RX Polling Timeout Seconds */
+ result = of_property_read_u32(pdev->dev.of_node,
+ "qcom,rx-polling-sleep-ms",
+ &ipa_drv_res->ipa_rx_polling_sleep_msec);
+
+ if (result) {
+ ipa_drv_res->ipa_rx_polling_sleep_msec = ONE_MSEC;
+ IPADBG("using default polling timeout of 1MSec\n");
+ } else {
+ IPADBG(": found ipa_drv_res->ipa_rx_polling_sleep_sec = %d",
+ ipa_drv_res->ipa_rx_polling_sleep_msec);
+ }
+
+ /* Get IPA Polling Iteration */
+ result = of_property_read_u32(pdev->dev.of_node,
+ "qcom,ipa-polling-iteration",
+ &ipa_drv_res->ipa_polling_iteration);
+ if (result) {
+ ipa_drv_res->ipa_polling_iteration = MAX_POLLING_ITERATION;
+ IPADBG("using default polling iteration\n");
+ } else {
+ IPADBG(": found ipa_drv_res->ipa_polling_iteration = %d",
+ ipa_drv_res->ipa_polling_iteration);
+ }
+
+ return 0;
+}
+
+static int ipa_smmu_wlan_cb_probe(struct device *dev)
+{
+ struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx();
+ int disable_htw = 1;
+ int atomic_ctx = 1;
+ int fast = 1;
+ int bypass = 1;
+ int ret;
+
+ IPADBG("sub pdev=%p\n", dev);
+
+ cb->dev = dev;
+ cb->iommu = iommu_domain_alloc(msm_iommu_get_bus(dev));
+ if (!cb->iommu) {
+ IPAERR("could not alloc iommu domain\n");
+ /* assume this failure is because iommu driver is not ready */
+ return -EPROBE_DEFER;
+ }
+ cb->valid = true;
+
+ if (smmu_info.disable_htw) {
+ ret = iommu_domain_set_attr(cb->iommu,
+ DOMAIN_ATTR_COHERENT_HTW_DISABLE,
+ &disable_htw);
+ if (ret) {
+ IPAERR("couldn't disable coherent HTW\n");
+ cb->valid = false;
+ return -EIO;
+ }
+ }
+
+ if (smmu_info.s1_bypass) {
+ if (iommu_domain_set_attr(cb->iommu,
+ DOMAIN_ATTR_S1_BYPASS,
+ &bypass)) {
+ IPAERR("couldn't set bypass\n");
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU S1 BYPASS\n");
+ } else {
+ if (iommu_domain_set_attr(cb->iommu,
+ DOMAIN_ATTR_ATOMIC,
+ &atomic_ctx)) {
+ IPAERR("couldn't set domain as atomic\n");
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU atomic set\n");
+ if (smmu_info.fast_map) {
+ if (iommu_domain_set_attr(cb->iommu,
+ DOMAIN_ATTR_FAST,
+ &fast)) {
+ IPAERR("couldn't set fast map\n");
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU fast map set\n");
+ }
+ }
+
+ ret = iommu_attach_device(cb->iommu, dev);
+ if (ret) {
+ IPAERR("could not attach device ret=%d\n", ret);
+ cb->valid = false;
+ return ret;
+ }
+
+ if (!smmu_info.s1_bypass) {
+ IPAERR("map IPA region to WLAN_CB IOMMU\n");
+ ret = ipa_iommu_map(cb->iommu,
+ rounddown(smmu_info.ipa_base, PAGE_SIZE),
+ rounddown(smmu_info.ipa_base, PAGE_SIZE),
+ roundup(smmu_info.ipa_size, PAGE_SIZE),
+ IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+ if (ret) {
+ IPAERR("map IPA to WLAN_CB IOMMU failed ret=%d\n",
+ ret);
+ arm_iommu_detach_device(cb->dev);
+ cb->valid = false;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ipa_smmu_uc_cb_probe(struct device *dev)
+{
+ struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx();
+ int disable_htw = 1;
+ int atomic_ctx = 1;
+ int ret;
+ int fast = 1;
+ int bypass = 1;
+ u32 iova_ap_mapping[2];
+
+ IPADBG("UC CB PROBE sub pdev=%p\n", dev);
+
+ ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
+ iova_ap_mapping, 2);
+ if (ret) {
+ IPAERR("Fail to read UC start/size iova addresses\n");
+ return ret;
+ }
+ cb->va_start = iova_ap_mapping[0];
+ cb->va_size = iova_ap_mapping[1];
+ cb->va_end = cb->va_start + cb->va_size;
+ IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
+
+ if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
+ IPAERR("DMA set mask failed\n");
+ return -EOPNOTSUPP;
+ }
+
+ IPADBG("UC CB PROBE=%p create IOMMU mapping\n", dev);
+
+ cb->dev = dev;
+ cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
+ cb->va_start, cb->va_size);
+ if (IS_ERR_OR_NULL(cb->mapping)) {
+ IPADBG("Fail to create mapping\n");
+ /* assume this failure is because iommu driver is not ready */
+ return -EPROBE_DEFER;
+ }
+ IPADBG("SMMU mapping created\n");
+ cb->valid = true;
+
+ IPADBG("UC CB PROBE sub pdev=%p disable htw\n", dev);
+ if (smmu_info.disable_htw) {
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_COHERENT_HTW_DISABLE,
+ &disable_htw)) {
+ IPAERR("couldn't disable coherent HTW\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
+ }
+ }
+
+ IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
+ if (smmu_info.s1_bypass) {
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_S1_BYPASS,
+ &bypass)) {
+ IPAERR("couldn't set bypass\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU S1 BYPASS\n");
+ } else {
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_ATOMIC,
+ &atomic_ctx)) {
+ IPAERR("couldn't set domain as atomic\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU atomic set\n");
+ if (smmu_info.fast_map) {
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_FAST,
+ &fast)) {
+ IPAERR("couldn't set fast map\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU fast map set\n");
+ }
+ }
+
+ IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev);
+ ret = arm_iommu_attach_device(cb->dev, cb->mapping);
+ if (ret) {
+ IPAERR("could not attach device ret=%d\n", ret);
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return ret;
+ }
+
+ cb->next_addr = cb->va_end;
+ ipa_ctx->uc_pdev = dev;
+
+ IPADBG("UC CB PROBE pdev=%p attached\n", dev);
+ return 0;
+}
+
+static int ipa_smmu_ap_cb_probe(struct device *dev)
+{
+ struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx();
+ int result;
+ int disable_htw = 1;
+ int atomic_ctx = 1;
+ int fast = 1;
+ int bypass = 1;
+ u32 iova_ap_mapping[2];
+
+ IPADBG("AP CB probe: sub pdev=%p\n", dev);
+
+ result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
+ iova_ap_mapping, 2);
+ if (result) {
+ IPAERR("Fail to read AP start/size iova addresses\n");
+ return result;
+ }
+ cb->va_start = iova_ap_mapping[0];
+ cb->va_size = iova_ap_mapping[1];
+ cb->va_end = cb->va_start + cb->va_size;
+ IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
+
+ if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
+ IPAERR("DMA set mask failed\n");
+ return -EOPNOTSUPP;
+ }
+
+ cb->dev = dev;
+ cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
+ cb->va_start,
+ cb->va_size);
+ if (IS_ERR_OR_NULL(cb->mapping)) {
+ IPADBG("Fail to create mapping\n");
+ /* assume this failure is because iommu driver is not ready */
+ return -EPROBE_DEFER;
+ }
+ IPADBG("SMMU mapping created\n");
+ cb->valid = true;
+
+ if (smmu_info.disable_htw) {
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_COHERENT_HTW_DISABLE,
+ &disable_htw)) {
+ IPAERR("couldn't disable coherent HTW\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU disable HTW\n");
+ }
+
+ if (smmu_info.s1_bypass) {
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_S1_BYPASS,
+ &bypass)) {
+ IPAERR("couldn't set bypass\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU S1 BYPASS\n");
+ } else {
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_ATOMIC,
+ &atomic_ctx)) {
+ IPAERR("couldn't set domain as atomic\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU atomic set\n");
+
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_FAST,
+ &fast)) {
+ IPAERR("couldn't set fast map\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU fast map set\n");
+ }
+
+ result = arm_iommu_attach_device(cb->dev, cb->mapping);
+ if (result) {
+ IPAERR("couldn't attach to IOMMU ret=%d\n", result);
+ cb->valid = false;
+ return result;
+ }
+
+ if (!smmu_info.s1_bypass) {
+ IPAERR("map IPA region to AP_CB IOMMU\n");
+ result = ipa_iommu_map(cb->mapping->domain,
+ rounddown(smmu_info.ipa_base, PAGE_SIZE),
+ rounddown(smmu_info.ipa_base, PAGE_SIZE),
+ roundup(smmu_info.ipa_size, PAGE_SIZE),
+ IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+ if (result) {
+ IPAERR("map IPA region to AP_CB IOMMU failed ret=%d\n",
+ result);
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return result;
+ }
+ }
+
+ smmu_info.present = true;
+
+ if (!bus_scale_table)
+ bus_scale_table = msm_bus_cl_get_pdata(ipa_pdev);
+
+ /* Proceed to real initialization */
+ result = ipa_init(&ipa_res, dev);
+ if (result) {
+ IPAERR("ipa_init failed\n");
+ arm_iommu_detach_device(cb->dev);
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return result;
+ }
+
+ return result;
+}
+
+int ipa_plat_drv_probe(struct platform_device *pdev_p,
+ struct ipa_api_controller *api_ctrl,
+ const struct of_device_id *pdrv_match)
+{
+ int result;
+ struct device *dev = &pdev_p->dev;
+
+ IPADBG("IPA driver probing started\n");
+
+ if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb"))
+ return ipa_smmu_ap_cb_probe(dev);
+
+ if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-wlan-cb"))
+ return ipa_smmu_wlan_cb_probe(dev);
+
+ if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-uc-cb"))
+ return ipa_smmu_uc_cb_probe(dev);
+
+ master_dev = dev;
+ if (!ipa_pdev)
+ ipa_pdev = pdev_p;
+
+ result = get_ipa_dts_configuration(pdev_p, &ipa_res);
+ if (result) {
+ IPAERR("IPA dts parsing failed\n");
+ return result;
+ }
+
+ result = ipa2_bind_api_controller(ipa_res.ipa_hw_type, api_ctrl);
+ if (result) {
+ IPAERR("IPA API binding failed\n");
+ return result;
+ }
+
+ if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
+ if (of_property_read_bool(pdev_p->dev.of_node,
+ "qcom,smmu-s1-bypass"))
+ smmu_info.s1_bypass = true;
+ if (of_property_read_bool(pdev_p->dev.of_node,
+ "qcom,smmu-fast-map"))
+ smmu_info.fast_map = true;
+ smmu_info.arm_smmu = true;
+ pr_info("IPA smmu_info.s1_bypass=%d smmu_info.fast_map=%d\n",
+ smmu_info.s1_bypass, smmu_info.fast_map);
+ result = of_platform_populate(pdev_p->dev.of_node,
+ pdrv_match, NULL, &pdev_p->dev);
+ } else if (of_property_read_bool(pdev_p->dev.of_node,
+ "qcom,msm-smmu")) {
+ IPAERR("Legacy IOMMU not supported\n");
+ result = -EOPNOTSUPP;
+ } else {
+ if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(32)) ||
+ dma_set_coherent_mask(&pdev_p->dev,
+ DMA_BIT_MASK(32))) {
+ IPAERR("DMA set mask failed\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!bus_scale_table)
+ bus_scale_table = msm_bus_cl_get_pdata(pdev_p);
+
+ /* Proceed to real initialization */
+ result = ipa_init(&ipa_res, dev);
+ if (result) {
+ IPAERR("ipa_init failed\n");
+ return result;
+ }
+ }
+
+ return result;
+}
+
+/**
+ * ipa2_ap_suspend() - suspend callback for runtime_pm
+ * @dev: pointer to device
+ *
+ * This callback will be invoked by the runtime_pm framework when an AP suspend
+ * operation is invoked, usually by pressing a suspend button.
+ *
+ * Returns -EAGAIN to runtime_pm framework in case IPA is in use by AP.
+ * This will postpone the suspend operation until IPA is no longer used by AP.
+*/
+int ipa2_ap_suspend(struct device *dev)
+{
+ int i;
+
+ IPADBG("Enter...\n");
+
+ /* In case there is a tx/rx handler in polling mode fail to suspend */
+ for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
+ if (ipa_ctx->ep[i].sys &&
+ atomic_read(&ipa_ctx->ep[i].sys->curr_polling_state)) {
+ IPAERR("EP %d is in polling state, do not suspend\n",
+ i);
+ return -EAGAIN;
+ }
+ }
+
+ /* release SPS IPA resource without waiting for inactivity timer */
+ atomic_set(&ipa_ctx->sps_pm.eot_activity, 0);
+ ipa_sps_release_resource(NULL);
+ IPADBG("Exit\n");
+
+ return 0;
+}
+
+/**
+* ipa2_ap_resume() - resume callback for runtime_pm
+* @dev: pointer to device
+*
+* This callback will be invoked by the runtime_pm framework when an AP resume
+* operation is invoked.
+*
+* Always returns 0 since resume should always succeed.
+*/
+int ipa2_ap_resume(struct device *dev)
+{
+ return 0;
+}
+
+struct ipa_context *ipa_get_ctx(void)
+{
+ return ipa_ctx;
+}
+
+int ipa_iommu_map(struct iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr, size_t size, int prot)
+{
+ struct ipa_smmu_cb_ctx *ap_cb = ipa2_get_smmu_ctx();
+ struct ipa_smmu_cb_ctx *uc_cb = ipa2_get_uc_smmu_ctx();
+
+ IPADBG("domain =0x%p iova 0x%lx\n", domain, iova);
+ IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);
+
+ /* make sure no overlapping */
+ if (domain == ipa2_get_smmu_domain()) {
+ if (iova >= ap_cb->va_start && iova < ap_cb->va_end) {
+ IPAERR("iommu AP overlap addr 0x%lx\n", iova);
+ ipa_assert();
+ return -EFAULT;
+ }
+ } else if (domain == ipa2_get_wlan_smmu_domain()) {
+ /* wlan is one time map */
+ } else if (domain == ipa2_get_uc_smmu_domain()) {
+ if (iova >= uc_cb->va_start && iova < uc_cb->va_end) {
+ IPAERR("iommu uC overlap addr 0x%lx\n", iova);
+ ipa_assert();
+ return -EFAULT;
+ }
+ } else {
+ IPAERR("Unexpected domain 0x%p\n", domain);
+ ipa_assert();
+ return -EFAULT;
+ }
+
+ return iommu_map(domain, iova, paddr, size, prot);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA HW device driver");
+
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
new file mode 100644
index 0000000..fd37395
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
@@ -0,0 +1,897 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <asm/barrier.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include "ipa_i.h"
+
+/*
+ * These values were determined empirically and shows good E2E bi-
+ * directional throughputs
+ */
+#define IPA_HOLB_TMR_EN 0x1
+#define IPA_HOLB_TMR_DIS 0x0
+#define IPA_HOLB_TMR_DEFAULT_VAL 0x1ff
+
+#define IPA_PKT_FLUSH_TO_US 100
+
+int ipa_enable_data_path(u32 clnt_hdl)
+{
+ struct ipa_ep_context *ep = &ipa_ctx->ep[clnt_hdl];
+ struct ipa_ep_cfg_holb holb_cfg;
+ struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+ int res = 0;
+
+ IPADBG("Enabling data path\n");
+ /* From IPA 2.0, disable HOLB */
+ if ((ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) &&
+ IPA_CLIENT_IS_CONS(ep->client)) {
+ memset(&holb_cfg, 0, sizeof(holb_cfg));
+ holb_cfg.en = IPA_HOLB_TMR_DIS;
+ holb_cfg.tmr_val = 0;
+ res = ipa2_cfg_ep_holb(clnt_hdl, &holb_cfg);
+ }
+
+ /* Enable the pipe */
+ if (IPA_CLIENT_IS_CONS(ep->client) &&
+ (ep->keep_ipa_awake ||
+ ipa_ctx->resume_on_connect[ep->client] ||
+ !ipa_should_pipe_be_suspended(ep->client))) {
+ memset(&ep_cfg_ctrl, 0, sizeof(ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_suspend = false;
+ ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ }
+
+ return res;
+}
+
+int ipa_disable_data_path(u32 clnt_hdl)
+{
+ struct ipa_ep_context *ep = &ipa_ctx->ep[clnt_hdl];
+ struct ipa_ep_cfg_holb holb_cfg;
+ struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+ u32 aggr_init;
+ int res = 0;
+
+ IPADBG("Disabling data path\n");
+ /* On IPA 2.0, enable HOLB in order to prevent IPA from stalling */
+ if ((ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) &&
+ IPA_CLIENT_IS_CONS(ep->client)) {
+ memset(&holb_cfg, 0, sizeof(holb_cfg));
+ holb_cfg.en = IPA_HOLB_TMR_EN;
+ holb_cfg.tmr_val = 0;
+ res = ipa2_cfg_ep_holb(clnt_hdl, &holb_cfg);
+ }
+
+ /* Suspend the pipe */
+ if (IPA_CLIENT_IS_CONS(ep->client)) {
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_suspend = true;
+ ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ }
+
+ udelay(IPA_PKT_FLUSH_TO_US);
+ aggr_init = ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_AGGR_N_OFST_v2_0(clnt_hdl));
+ if (((aggr_init & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) >>
+ IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT) == IPA_ENABLE_AGGR) {
+ res = ipa_tag_aggr_force_close(clnt_hdl);
+ if (res) {
+ IPAERR("tag process timeout, client:%d err:%d\n",
+ clnt_hdl, res);
+ BUG();
+ }
+ }
+
+ return res;
+}
+
+static int ipa2_smmu_map_peer_bam(unsigned long dev)
+{
+ phys_addr_t base;
+ u32 size;
+ struct iommu_domain *smmu_domain;
+ struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx();
+
+ if (!ipa_ctx->smmu_s1_bypass) {
+ if (ipa_ctx->peer_bam_map_cnt == 0) {
+ if (sps_get_bam_addr(dev, &base, &size)) {
+ IPAERR("Fail to get addr\n");
+ return -EINVAL;
+ }
+ smmu_domain = ipa2_get_smmu_domain();
+ if (smmu_domain != NULL) {
+ if (ipa_iommu_map(smmu_domain,
+ cb->va_end,
+ rounddown(base, PAGE_SIZE),
+ roundup(size + base -
+ rounddown(base, PAGE_SIZE), PAGE_SIZE),
+ IOMMU_READ | IOMMU_WRITE |
+ IOMMU_DEVICE)) {
+ IPAERR("Fail to ipa_iommu_map\n");
+ return -EINVAL;
+ }
+ }
+
+ ipa_ctx->peer_bam_iova = cb->va_end;
+ ipa_ctx->peer_bam_pa = base;
+ ipa_ctx->peer_bam_map_size = size;
+ ipa_ctx->peer_bam_dev = dev;
+
+ IPADBG("Peer bam %lu mapped\n", dev);
+ } else {
+ WARN_ON(dev != ipa_ctx->peer_bam_dev);
+ }
+
+ ipa_ctx->peer_bam_map_cnt++;
+ }
+
+ return 0;
+}
+
+static int ipa_connect_configure_sps(const struct ipa_connect_params *in,
+ struct ipa_ep_context *ep, int ipa_ep_idx)
+{
+ int result = -EFAULT;
+
+ /* Default Config */
+ ep->ep_hdl = sps_alloc_endpoint();
+
+ if (ipa2_smmu_map_peer_bam(in->client_bam_hdl)) {
+ IPAERR("fail to iommu map peer BAM.\n");
+ return -EFAULT;
+ }
+
+ if (ep->ep_hdl == NULL) {
+ IPAERR("SPS EP alloc failed EP.\n");
+ return -EFAULT;
+ }
+
+ result = sps_get_config(ep->ep_hdl,
+ &ep->connect);
+ if (result) {
+ IPAERR("fail to get config.\n");
+ return -EFAULT;
+ }
+
+ /* Specific Config */
+ if (IPA_CLIENT_IS_CONS(in->client)) {
+ ep->connect.mode = SPS_MODE_SRC;
+ ep->connect.destination =
+ in->client_bam_hdl;
+ ep->connect.dest_iova = ipa_ctx->peer_bam_iova;
+ ep->connect.source = ipa_ctx->bam_handle;
+ ep->connect.dest_pipe_index =
+ in->client_ep_idx;
+ ep->connect.src_pipe_index = ipa_ep_idx;
+ } else {
+ ep->connect.mode = SPS_MODE_DEST;
+ ep->connect.source = in->client_bam_hdl;
+ ep->connect.source_iova = ipa_ctx->peer_bam_iova;
+ ep->connect.destination = ipa_ctx->bam_handle;
+ ep->connect.src_pipe_index = in->client_ep_idx;
+ ep->connect.dest_pipe_index = ipa_ep_idx;
+ }
+
+ return 0;
+}
+
+static int ipa_connect_allocate_fifo(const struct ipa_connect_params *in,
+ struct sps_mem_buffer *mem_buff_ptr,
+ bool *fifo_in_pipe_mem_ptr,
+ u32 *fifo_pipe_mem_ofst_ptr,
+ u32 fifo_size, int ipa_ep_idx)
+{
+ dma_addr_t dma_addr;
+ u32 ofst;
+ int result = -EFAULT;
+ struct iommu_domain *smmu_domain;
+
+ mem_buff_ptr->size = fifo_size;
+ if (in->pipe_mem_preferred) {
+ if (ipa_pipe_mem_alloc(&ofst, fifo_size)) {
+ IPAERR("FIFO pipe mem alloc fail ep %u\n",
+ ipa_ep_idx);
+ mem_buff_ptr->base =
+ dma_alloc_coherent(ipa_ctx->pdev,
+ mem_buff_ptr->size,
+ &dma_addr, GFP_KERNEL);
+ } else {
+ memset(mem_buff_ptr, 0, sizeof(struct sps_mem_buffer));
+ result = sps_setup_bam2bam_fifo(mem_buff_ptr, ofst,
+ fifo_size, 1);
+ WARN_ON(result);
+ *fifo_in_pipe_mem_ptr = 1;
+ dma_addr = mem_buff_ptr->phys_base;
+ *fifo_pipe_mem_ofst_ptr = ofst;
+ }
+ } else {
+ mem_buff_ptr->base =
+ dma_alloc_coherent(ipa_ctx->pdev, mem_buff_ptr->size,
+ &dma_addr, GFP_KERNEL);
+ }
+ if (ipa_ctx->smmu_s1_bypass) {
+ mem_buff_ptr->phys_base = dma_addr;
+ } else {
+ mem_buff_ptr->iova = dma_addr;
+ smmu_domain = ipa2_get_smmu_domain();
+ if (smmu_domain != NULL) {
+ mem_buff_ptr->phys_base =
+ iommu_iova_to_phys(smmu_domain, dma_addr);
+ }
+ }
+ if (mem_buff_ptr->base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * ipa2_connect() - low-level IPA client connect
+ * @in: [in] input parameters from client
+ * @sps: [out] sps output from IPA needed by client for sps_connect
+ * @clnt_hdl: [out] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to connect to
+ * IPA in BAM-BAM mode. these peripherals are USB and HSIC. this api
+ * expects caller to take responsibility to add any needed headers, routing
+ * and filtering tables and rules as needed.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_connect(const struct ipa_connect_params *in,
+ struct ipa_sps_params *sps, u32 *clnt_hdl)
+{
+ int ipa_ep_idx;
+ int result = -EFAULT;
+ struct ipa_ep_context *ep;
+ struct ipa_ep_cfg_status ep_status;
+ unsigned long base;
+ struct iommu_domain *smmu_domain;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ IPADBG("connecting client\n");
+
+ if (in == NULL || sps == NULL || clnt_hdl == NULL ||
+ in->client >= IPA_CLIENT_MAX ||
+ in->desc_fifo_sz == 0 || in->data_fifo_sz == 0) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ ipa_ep_idx = ipa2_get_ep_mapping(in->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("fail to alloc EP.\n");
+ goto fail;
+ }
+
+ ep = &ipa_ctx->ep[ipa_ep_idx];
+
+ if (ep->valid) {
+ IPAERR("EP already allocated.\n");
+ goto fail;
+ }
+
+ memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+ IPA_ACTIVE_CLIENTS_INC_EP(in->client);
+
+
+ ep->skip_ep_cfg = in->skip_ep_cfg;
+ ep->valid = 1;
+ ep->client = in->client;
+ ep->client_notify = in->notify;
+ ep->priv = in->priv;
+ ep->keep_ipa_awake = in->keep_ipa_awake;
+
+ /* Notify uc to start monitoring holb on USB BAM Producer pipe. */
+ if (IPA_CLIENT_IS_USB_CONS(in->client)) {
+ ipa_uc_monitor_holb(in->client, true);
+ IPADBG("Enabling holb monitor for client:%d", in->client);
+ }
+
+ result = ipa_enable_data_path(ipa_ep_idx);
+ if (result) {
+ IPAERR("enable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx);
+ goto ipa_cfg_ep_fail;
+ }
+
+ if (!ep->skip_ep_cfg) {
+ if (ipa2_cfg_ep(ipa_ep_idx, &in->ipa_ep_cfg)) {
+ IPAERR("fail to configure EP.\n");
+ goto ipa_cfg_ep_fail;
+ }
+ /* Setting EP status 0 */
+ memset(&ep_status, 0, sizeof(ep_status));
+ if (ipa2_cfg_ep_status(ipa_ep_idx, &ep_status)) {
+ IPAERR("fail to configure status of EP.\n");
+ goto ipa_cfg_ep_fail;
+ }
+ IPADBG("ep configuration successful\n");
+ } else {
+ IPADBG("Skipping endpoint configuration.\n");
+ }
+
+ result = ipa_connect_configure_sps(in, ep, ipa_ep_idx);
+ if (result) {
+ IPAERR("fail to configure SPS.\n");
+ goto ipa_cfg_ep_fail;
+ }
+
+ if (!ipa_ctx->smmu_s1_bypass &&
+ (in->desc.base == NULL ||
+ in->data.base == NULL)) {
+ IPAERR(" allocate FIFOs data_fifo=0x%p desc_fifo=0x%p.\n",
+ in->data.base, in->desc.base);
+ goto desc_mem_alloc_fail;
+ }
+
+ if (in->desc.base == NULL) {
+ result = ipa_connect_allocate_fifo(in, &ep->connect.desc,
+ &ep->desc_fifo_in_pipe_mem,
+ &ep->desc_fifo_pipe_mem_ofst,
+ in->desc_fifo_sz, ipa_ep_idx);
+ if (result) {
+ IPAERR("fail to allocate DESC FIFO.\n");
+ goto desc_mem_alloc_fail;
+ }
+ } else {
+ IPADBG("client allocated DESC FIFO\n");
+ ep->connect.desc = in->desc;
+ ep->desc_fifo_client_allocated = 1;
+ }
+ IPADBG("Descriptor FIFO pa=%pa, size=%d\n", &ep->connect.desc.phys_base,
+ ep->connect.desc.size);
+
+ if (in->data.base == NULL) {
+ result = ipa_connect_allocate_fifo(in, &ep->connect.data,
+ &ep->data_fifo_in_pipe_mem,
+ &ep->data_fifo_pipe_mem_ofst,
+ in->data_fifo_sz, ipa_ep_idx);
+ if (result) {
+ IPAERR("fail to allocate DATA FIFO.\n");
+ goto data_mem_alloc_fail;
+ }
+ } else {
+ IPADBG("client allocated DATA FIFO\n");
+ ep->connect.data = in->data;
+ ep->data_fifo_client_allocated = 1;
+ }
+ IPADBG("Data FIFO pa=%pa, size=%d\n", &ep->connect.data.phys_base,
+ ep->connect.data.size);
+
+ if (!ipa_ctx->smmu_s1_bypass) {
+ ep->connect.data.iova = ep->connect.data.phys_base;
+ base = ep->connect.data.iova;
+ smmu_domain = ipa2_get_smmu_domain();
+ if (smmu_domain != NULL) {
+ if (ipa_iommu_map(smmu_domain,
+ rounddown(base, PAGE_SIZE),
+ rounddown(base, PAGE_SIZE),
+ roundup(ep->connect.data.size + base -
+ rounddown(base, PAGE_SIZE), PAGE_SIZE),
+ IOMMU_READ | IOMMU_WRITE)) {
+ IPAERR("Fail to ipa_iommu_map data FIFO\n");
+ goto iommu_map_data_fail;
+ }
+ }
+ ep->connect.desc.iova = ep->connect.desc.phys_base;
+ base = ep->connect.desc.iova;
+ if (smmu_domain != NULL) {
+ if (ipa_iommu_map(smmu_domain,
+ rounddown(base, PAGE_SIZE),
+ rounddown(base, PAGE_SIZE),
+ roundup(ep->connect.desc.size + base -
+ rounddown(base, PAGE_SIZE), PAGE_SIZE),
+ IOMMU_READ | IOMMU_WRITE)) {
+ IPAERR("Fail to ipa_iommu_map desc FIFO\n");
+ goto iommu_map_desc_fail;
+ }
+ }
+ }
+
+ if ((ipa_ctx->ipa_hw_type == IPA_HW_v2_0 ||
+ ipa_ctx->ipa_hw_type == IPA_HW_v2_5 ||
+ ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) &&
+ IPA_CLIENT_IS_USB_CONS(in->client))
+ ep->connect.event_thresh = IPA_USB_EVENT_THRESHOLD;
+ else
+ ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
+ ep->connect.options = SPS_O_AUTO_ENABLE; /* BAM-to-BAM */
+
+ result = ipa_sps_connect_safe(ep->ep_hdl, &ep->connect, in->client);
+ if (result) {
+ IPAERR("sps_connect fails.\n");
+ goto sps_connect_fail;
+ }
+
+ sps->ipa_bam_hdl = ipa_ctx->bam_handle;
+ sps->ipa_ep_idx = ipa_ep_idx;
+ *clnt_hdl = ipa_ep_idx;
+ memcpy(&sps->desc, &ep->connect.desc, sizeof(struct sps_mem_buffer));
+ memcpy(&sps->data, &ep->connect.data, sizeof(struct sps_mem_buffer));
+
+ ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+ if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->client))
+ ipa_install_dflt_flt_rules(ipa_ep_idx);
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->client);
+
+ IPADBG("client %d (ep: %d) connected\n", in->client, ipa_ep_idx);
+
+ return 0;
+
+sps_connect_fail:
+ if (!ipa_ctx->smmu_s1_bypass) {
+ base = ep->connect.desc.iova;
+ smmu_domain = ipa2_get_smmu_domain();
+ if (smmu_domain != NULL) {
+ iommu_unmap(smmu_domain,
+ rounddown(base, PAGE_SIZE),
+ roundup(ep->connect.desc.size + base -
+ rounddown(base, PAGE_SIZE), PAGE_SIZE));
+ }
+ }
+iommu_map_desc_fail:
+ if (!ipa_ctx->smmu_s1_bypass) {
+ base = ep->connect.data.iova;
+ smmu_domain = ipa2_get_smmu_domain();
+ if (smmu_domain != NULL) {
+ iommu_unmap(smmu_domain,
+ rounddown(base, PAGE_SIZE),
+ roundup(ep->connect.data.size + base -
+ rounddown(base, PAGE_SIZE), PAGE_SIZE));
+ }
+ }
+iommu_map_data_fail:
+ if (!ep->data_fifo_client_allocated) {
+ if (!ep->data_fifo_in_pipe_mem)
+ dma_free_coherent(ipa_ctx->pdev,
+ ep->connect.data.size,
+ ep->connect.data.base,
+ ep->connect.data.phys_base);
+ else
+ ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
+ ep->connect.data.size);
+ }
+data_mem_alloc_fail:
+ if (!ep->desc_fifo_client_allocated) {
+ if (!ep->desc_fifo_in_pipe_mem)
+ dma_free_coherent(ipa_ctx->pdev,
+ ep->connect.desc.size,
+ ep->connect.desc.base,
+ ep->connect.desc.phys_base);
+ else
+ ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
+ ep->connect.desc.size);
+ }
+desc_mem_alloc_fail:
+ sps_free_endpoint(ep->ep_hdl);
+ipa_cfg_ep_fail:
+ memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->client);
+fail:
+ return result;
+}
+
+static int ipa2_smmu_unmap_peer_bam(unsigned long dev)
+{
+ size_t len;
+ struct iommu_domain *smmu_domain;
+ struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx();
+
+ if (!ipa_ctx->smmu_s1_bypass) {
+ WARN_ON(dev != ipa_ctx->peer_bam_dev);
+ ipa_ctx->peer_bam_map_cnt--;
+ if (ipa_ctx->peer_bam_map_cnt == 0) {
+ len = roundup(ipa_ctx->peer_bam_map_size +
+ ipa_ctx->peer_bam_pa -
+ rounddown(ipa_ctx->peer_bam_pa,
+ PAGE_SIZE), PAGE_SIZE);
+ smmu_domain = ipa2_get_smmu_domain();
+ if (smmu_domain != NULL) {
+ if (iommu_unmap(smmu_domain,
+ cb->va_end, len) != len) {
+ IPAERR("Fail to iommu_unmap\n");
+ return -EINVAL;
+ }
+ IPADBG("Peer bam %lu unmapped\n", dev);
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ipa2_disconnect() - low-level IPA client disconnect
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to disconnect
+ * from IPA in BAM-BAM mode. this api expects caller to take responsibility to
+ * free any needed headers, routing and filtering tables and rules as needed.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_disconnect(u32 clnt_hdl)
+{
+ int result;
+ struct ipa_ep_context *ep;
+ unsigned long peer_bam;
+ unsigned long base;
+ struct iommu_domain *smmu_domain;
+ struct ipa_disable_force_clear_datapath_req_msg_v01 req = {0};
+ int res;
+ enum ipa_client_type client_type;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+ client_type = ipa2_get_client_mapping(clnt_hdl);
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_INC_EP(client_type);
+
+ /* For USB 2.0 controller, first the ep will be disabled.
+ * so this sequence is not needed again when disconnecting the pipe.
+ */
+ if (!ep->ep_disabled) {
+ /* Set Disconnect in Progress flag. */
+ spin_lock(&ipa_ctx->disconnect_lock);
+ ep->disconnect_in_progress = true;
+ spin_unlock(&ipa_ctx->disconnect_lock);
+
+ /* Notify uc to stop monitoring holb on USB BAM
+ * Producer pipe.
+ */
+ if (IPA_CLIENT_IS_USB_CONS(ep->client)) {
+ ipa_uc_monitor_holb(ep->client, false);
+ IPADBG("Disabling holb monitor for client: %d\n",
+ ep->client);
+ }
+
+ result = ipa_disable_data_path(clnt_hdl);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n",
+ result, clnt_hdl);
+ return -EPERM;
+ }
+ }
+
+ result = sps_disconnect(ep->ep_hdl);
+ if (result) {
+ IPAERR("SPS disconnect failed.\n");
+ return -EPERM;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ep->client))
+ peer_bam = ep->connect.destination;
+ else
+ peer_bam = ep->connect.source;
+
+ if (ipa2_smmu_unmap_peer_bam(peer_bam)) {
+ IPAERR("fail to iommu unmap peer BAM.\n");
+ return -EPERM;
+ }
+
+ if (!ep->desc_fifo_client_allocated &&
+ ep->connect.desc.base) {
+ if (!ep->desc_fifo_in_pipe_mem)
+ dma_free_coherent(ipa_ctx->pdev,
+ ep->connect.desc.size,
+ ep->connect.desc.base,
+ ep->connect.desc.phys_base);
+ else
+ ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
+ ep->connect.desc.size);
+ }
+
+ if (!ep->data_fifo_client_allocated &&
+ ep->connect.data.base) {
+ if (!ep->data_fifo_in_pipe_mem)
+ dma_free_coherent(ipa_ctx->pdev,
+ ep->connect.data.size,
+ ep->connect.data.base,
+ ep->connect.data.phys_base);
+ else
+ ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
+ ep->connect.data.size);
+ }
+
+ if (!ipa_ctx->smmu_s1_bypass) {
+ base = ep->connect.desc.iova;
+ smmu_domain = ipa2_get_smmu_domain();
+ if (smmu_domain != NULL) {
+ iommu_unmap(smmu_domain,
+ rounddown(base, PAGE_SIZE),
+ roundup(ep->connect.desc.size + base -
+ rounddown(base, PAGE_SIZE), PAGE_SIZE));
+ }
+ }
+
+ if (!ipa_ctx->smmu_s1_bypass) {
+ base = ep->connect.data.iova;
+ smmu_domain = ipa2_get_smmu_domain();
+ if (smmu_domain != NULL) {
+ iommu_unmap(smmu_domain,
+ rounddown(base, PAGE_SIZE),
+ roundup(ep->connect.data.size + base -
+ rounddown(base, PAGE_SIZE), PAGE_SIZE));
+ }
+ }
+
+ result = sps_free_endpoint(ep->ep_hdl);
+ if (result) {
+ IPAERR("SPS de-alloc EP failed.\n");
+ return -EPERM;
+ }
+
+ ipa_delete_dflt_flt_rules(clnt_hdl);
+
+ /* If APPS flow control is not enabled, send a message to modem to
+ * enable flow control honoring.
+ */
+ if (!ipa_ctx->tethered_flow_control && ep->qmi_request_sent) {
+ /* Send a message to modem to disable flow control honoring. */
+ req.request_id = clnt_hdl;
+ res = qmi_disable_force_clear_datapath_send(&req);
+ if (res) {
+ IPADBG("disable_force_clear_datapath failed %d\n",
+ res);
+ }
+ }
+
+ spin_lock(&ipa_ctx->disconnect_lock);
+ memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
+ spin_unlock(&ipa_ctx->disconnect_lock);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(client_type);
+
+ IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+ return 0;
+}
+
+/**
+* ipa2_reset_endpoint() - reset an endpoint from BAM perspective
+* @clnt_hdl: [in] IPA client handle
+*
+* Returns: 0 on success, negative on failure
+*
+* Note: Should not be called from atomic context
+*/
+int ipa2_reset_endpoint(u32 clnt_hdl)
+{
+ int res;
+ struct ipa_ep_context *ep;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes) {
+ IPAERR("Bad parameters.\n");
+ return -EFAULT;
+ }
+ ep = &ipa_ctx->ep[clnt_hdl];
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ res = sps_disconnect(ep->ep_hdl);
+ if (res) {
+ IPAERR("sps_disconnect() failed, res=%d.\n", res);
+ goto bail;
+ } else {
+ res = ipa_sps_connect_safe(ep->ep_hdl, &ep->connect,
+ ep->client);
+ if (res) {
+ IPAERR("sps_connect() failed, res=%d.\n", res);
+ goto bail;
+ }
+ }
+
+bail:
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ return res;
+}
+
+/**
+ * ipa2_clear_endpoint_delay() - Remove ep delay set on the IPA pipe before
+ * client disconnect.
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to remove
+ * ep delay on IPA consumer ipe before disconnect in BAM-BAM mode. this api
+ * expects caller to take responsibility to free any needed headers, routing
+ * and filtering tables and rules as needed.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_clear_endpoint_delay(u32 clnt_hdl)
+{
+ struct ipa_ep_context *ep;
+ struct ipa_ep_cfg_ctrl ep_ctrl = {0};
+ struct ipa_enable_force_clear_datapath_req_msg_v01 req = {0};
+ int res;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+
+ if (!ipa_ctx->tethered_flow_control) {
+ IPADBG("APPS flow control is not enabled\n");
+ /* Send a message to modem to disable flow control honoring. */
+ req.request_id = clnt_hdl;
+ req.source_pipe_bitmask = 1 << clnt_hdl;
+ res = qmi_enable_force_clear_datapath_send(&req);
+ if (res) {
+ IPADBG("enable_force_clear_datapath failed %d\n",
+ res);
+ }
+ ep->qmi_request_sent = true;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ /* Set disconnect in progress flag so further flow control events are
+ * not honored.
+ */
+ spin_lock(&ipa_ctx->disconnect_lock);
+ ep->disconnect_in_progress = true;
+ spin_unlock(&ipa_ctx->disconnect_lock);
+
+ /* If flow is disabled at this point, restore the ep state.*/
+ ep_ctrl.ipa_ep_delay = false;
+ ep_ctrl.ipa_ep_suspend = false;
+ ipa2_cfg_ep_ctrl(clnt_hdl, &ep_ctrl);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ IPADBG("client (ep: %d) removed ep delay\n", clnt_hdl);
+
+ return 0;
+}
+
+/**
+ * ipa2_disable_endpoint() - low-level IPA client disable endpoint
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to
+ * disable the pipe from IPA in BAM-BAM mode.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_disable_endpoint(u32 clnt_hdl)
+{
+ int result;
+ struct ipa_ep_context *ep;
+ enum ipa_client_type client_type;
+ unsigned long bam;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+ client_type = ipa2_get_client_mapping(clnt_hdl);
+ IPA_ACTIVE_CLIENTS_INC_EP(client_type);
+
+ /* Set Disconnect in Progress flag. */
+ spin_lock(&ipa_ctx->disconnect_lock);
+ ep->disconnect_in_progress = true;
+ spin_unlock(&ipa_ctx->disconnect_lock);
+
+ /* Notify uc to stop monitoring holb on USB BAM Producer pipe. */
+ if (IPA_CLIENT_IS_USB_CONS(ep->client)) {
+ ipa_uc_monitor_holb(ep->client, false);
+ IPADBG("Disabling holb monitor for client: %d\n", ep->client);
+ }
+
+ result = ipa_disable_data_path(clnt_hdl);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ clnt_hdl);
+ goto fail;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ep->client))
+ bam = ep->connect.source;
+ else
+ bam = ep->connect.destination;
+
+ result = sps_pipe_reset(bam, clnt_hdl);
+ if (result) {
+ IPAERR("SPS pipe reset failed.\n");
+ goto fail;
+ }
+
+ ep->ep_disabled = true;
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(client_type);
+
+ IPADBG("client (ep: %d) disabled\n", clnt_hdl);
+
+ return 0;
+
+fail:
+ IPA_ACTIVE_CLIENTS_DEC_EP(client_type);
+ return -EPERM;
+}
+
+
+/**
+ * ipa_sps_connect_safe() - connect endpoint from BAM prespective
+ * @h: [in] sps pipe handle
+ * @connect: [in] sps connect parameters
+ * @ipa_client: [in] ipa client handle representing the pipe
+ *
+ * This function connects a BAM pipe using SPS driver sps_connect() API
+ * and by requesting uC interface to reset the pipe, avoids an IPA HW
+ * limitation that does not allow resetting a BAM pipe during traffic in
+ * IPA TX command queue.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_sps_connect_safe(struct sps_pipe *h, struct sps_connect *connect,
+ enum ipa_client_type ipa_client)
+{
+ int res;
+
+ if (ipa_ctx->ipa_hw_type > IPA_HW_v2_5 || ipa_ctx->skip_uc_pipe_reset) {
+ IPADBG("uC pipe reset is not required\n");
+ } else {
+ res = ipa_uc_reset_pipe(ipa_client);
+ if (res)
+ return res;
+ }
+ return sps_connect(h, connect);
+}
+EXPORT_SYMBOL(ipa_sps_connect_safe);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
new file mode 100644
index 0000000..a8266c8
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -0,0 +1,2147 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/stringify.h>
+#include "ipa_i.h"
+#include "../ipa_rm_i.h"
+
+#define IPA_MAX_MSG_LEN 4096
+#define IPA_DBG_CNTR_ON 127265
+#define IPA_DBG_CNTR_OFF 127264
+#define IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE ((IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN \
+ * IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) \
+ + IPA_MAX_MSG_LEN)
+
+#define RX_MIN_POLL_CNT "Rx Min Poll Count"
+#define RX_MAX_POLL_CNT "Rx Max Poll Count"
+#define MAX_COUNT_LENGTH 6
+#define MAX_POLLING_ITERATION 40
+#define MIN_POLLING_ITERATION 1
+
+#define IPA_DUMP_STATUS_FIELD(f) \
+ pr_err(#f "=0x%x\n", status->f)
+
+const char *ipa_excp_name[] = {
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD0),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD1),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IHL),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_TAG),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_NAT),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IP),
+};
+
+const char *ipa_status_excp_name[] = {
+ __stringify_1(IPA_EXCP_DEAGGR),
+ __stringify_1(IPA_EXCP_REPLICATION),
+ __stringify_1(IPA_EXCP_IP),
+ __stringify_1(IPA_EXCP_IHL),
+ __stringify_1(IPA_EXCP_FRAG_MISS),
+ __stringify_1(IPA_EXCP_SW),
+ __stringify_1(IPA_EXCP_NAT),
+ __stringify_1(IPA_EXCP_NONE),
+};
+
+const char *ipa_event_name[] = {
+ __stringify(WLAN_CLIENT_CONNECT),
+ __stringify(WLAN_CLIENT_DISCONNECT),
+ __stringify(WLAN_CLIENT_POWER_SAVE_MODE),
+ __stringify(WLAN_CLIENT_NORMAL_MODE),
+ __stringify(SW_ROUTING_ENABLE),
+ __stringify(SW_ROUTING_DISABLE),
+ __stringify(WLAN_AP_CONNECT),
+ __stringify(WLAN_AP_DISCONNECT),
+ __stringify(WLAN_STA_CONNECT),
+ __stringify(WLAN_STA_DISCONNECT),
+ __stringify(WLAN_CLIENT_CONNECT_EX),
+ __stringify(WLAN_SWITCH_TO_SCC),
+ __stringify(WLAN_SWITCH_TO_MCC),
+ __stringify(WLAN_WDI_ENABLE),
+ __stringify(WLAN_WDI_DISABLE),
+ __stringify(WAN_UPSTREAM_ROUTE_ADD),
+ __stringify(WAN_UPSTREAM_ROUTE_DEL),
+ __stringify(WAN_EMBMS_CONNECT),
+ __stringify(WAN_XLAT_CONNECT),
+ __stringify(ECM_CONNECT),
+ __stringify(ECM_DISCONNECT),
+ __stringify(IPA_TETHERING_STATS_UPDATE_STATS),
+ __stringify(IPA_TETHERING_STATS_UPDATE_NETWORK_STATS),
+};
+
+const char *ipa_hdr_l2_type_name[] = {
+ __stringify(IPA_HDR_L2_NONE),
+ __stringify(IPA_HDR_L2_ETHERNET_II),
+ __stringify(IPA_HDR_L2_802_3),
+};
+
+const char *ipa_hdr_proc_type_name[] = {
+ __stringify(IPA_HDR_PROC_NONE),
+ __stringify(IPA_HDR_PROC_ETHII_TO_ETHII),
+ __stringify(IPA_HDR_PROC_ETHII_TO_802_3),
+ __stringify(IPA_HDR_PROC_802_3_TO_ETHII),
+ __stringify(IPA_HDR_PROC_802_3_TO_802_3),
+};
+
+static struct dentry *dent;
+static struct dentry *dfile_gen_reg;
+static struct dentry *dfile_ep_reg;
+static struct dentry *dfile_keep_awake;
+static struct dentry *dfile_ep_holb;
+static struct dentry *dfile_hdr;
+static struct dentry *dfile_proc_ctx;
+static struct dentry *dfile_ip4_rt;
+static struct dentry *dfile_ip6_rt;
+static struct dentry *dfile_ip4_flt;
+static struct dentry *dfile_ip6_flt;
+static struct dentry *dfile_stats;
+static struct dentry *dfile_wstats;
+static struct dentry *dfile_wdi_stats;
+static struct dentry *dfile_ntn_stats;
+static struct dentry *dfile_dbg_cnt;
+static struct dentry *dfile_msg;
+static struct dentry *dfile_ip4_nat;
+static struct dentry *dfile_rm_stats;
+static struct dentry *dfile_status_stats;
+static struct dentry *dfile_active_clients;
+static struct dentry *dfile_ipa_rx_poll_timeout;
+static struct dentry *dfile_ipa_poll_iteration;
+
+static char dbg_buff[IPA_MAX_MSG_LEN];
+static char *active_clients_buf;
+static s8 ep_reg_idx;
+
+int _ipa_read_gen_reg_v1_1(char *buff, int max_len)
+{
+ return scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "IPA_VERSION=0x%x\n"
+ "IPA_COMP_HW_VERSION=0x%x\n"
+ "IPA_ROUTE=0x%x\n"
+ "IPA_FILTER=0x%x\n"
+ "IPA_SHARED_MEM_SIZE=0x%x\n",
+ ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST),
+ ipa_read_reg(ipa_ctx->mmio, IPA_COMP_HW_VERSION_OFST),
+ ipa_read_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1),
+ ipa_read_reg(ipa_ctx->mmio, IPA_FILTER_OFST_v1_1),
+ ipa_read_reg(ipa_ctx->mmio,
+ IPA_SHARED_MEM_SIZE_OFST_v1_1));
+}
+
+int _ipa_read_gen_reg_v2_0(char *buff, int max_len)
+{
+ return scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "IPA_VERSION=0x%x\n"
+ "IPA_COMP_HW_VERSION=0x%x\n"
+ "IPA_ROUTE=0x%x\n"
+ "IPA_FILTER=0x%x\n"
+ "IPA_SHARED_MEM_RESTRICTED=0x%x\n"
+ "IPA_SHARED_MEM_SIZE=0x%x\n",
+ ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST),
+ ipa_read_reg(ipa_ctx->mmio, IPA_COMP_HW_VERSION_OFST),
+ ipa_read_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1),
+ ipa_read_reg(ipa_ctx->mmio, IPA_FILTER_OFST_v1_1),
+ ipa_read_reg_field(ipa_ctx->mmio,
+ IPA_SHARED_MEM_SIZE_OFST_v2_0,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0),
+ ipa_read_reg_field(ipa_ctx->mmio,
+ IPA_SHARED_MEM_SIZE_OFST_v2_0,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0));
+}
+
+static ssize_t ipa_read_gen_reg(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ nbytes = ipa_ctx->ctrl->ipa_read_gen_reg(dbg_buff, IPA_MAX_MSG_LEN);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_write_ep_holb(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct ipa_ep_cfg_holb holb;
+ u32 en;
+ u32 tmr_val;
+ u32 ep_idx;
+ unsigned long missing;
+ char *sptr, *token;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, buf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+
+ sptr = dbg_buff;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &ep_idx))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &en))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &tmr_val))
+ return -EINVAL;
+
+ holb.en = en;
+ holb.tmr_val = tmr_val;
+
+ ipa2_cfg_ep_holb(ep_idx, &holb);
+
+ return count;
+}
+
+static ssize_t ipa_write_ep_reg(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ s8 option = 0;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, buf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+ if (kstrtos8(dbg_buff, 0, &option))
+ return -EFAULT;
+
+ if (option >= ipa_ctx->ipa_num_pipes) {
+ IPAERR("bad pipe specified %u\n", option);
+ return count;
+ }
+
+ ep_reg_idx = option;
+
+ return count;
+}
+
+int _ipa_read_ep_reg_v1_1(char *buf, int max_len, int pipe)
+{
+ return scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "IPA_ENDP_INIT_NAT_%u=0x%x\n"
+ "IPA_ENDP_INIT_HDR_%u=0x%x\n"
+ "IPA_ENDP_INIT_MODE_%u=0x%x\n"
+ "IPA_ENDP_INIT_AGGR_%u=0x%x\n"
+ "IPA_ENDP_INIT_ROUTE_%u=0x%x\n"
+ "IPA_ENDP_INIT_CTRL_%u=0x%x\n"
+ "IPA_ENDP_INIT_HOL_EN_%u=0x%x\n"
+ "IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n",
+ pipe, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_NAT_N_OFST_v1_1(pipe)),
+ pipe, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HDR_N_OFST_v1_1(pipe)),
+ pipe, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_MODE_N_OFST_v1_1(pipe)),
+ pipe, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_AGGR_N_OFST_v1_1(pipe)),
+ pipe, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_ROUTE_N_OFST_v1_1(pipe)),
+ pipe, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_CTRL_N_OFST(pipe)),
+ pipe, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v1_1(pipe)),
+ pipe, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v1_1(pipe))
+ );
+}
+
+int _ipa_read_ep_reg_v2_0(char *buf, int max_len, int pipe)
+{
+ return scnprintf(
+ dbg_buff, IPA_MAX_MSG_LEN,
+ "IPA_ENDP_INIT_NAT_%u=0x%x\n"
+ "IPA_ENDP_INIT_HDR_%u=0x%x\n"
+ "IPA_ENDP_INIT_HDR_EXT_%u=0x%x\n"
+ "IPA_ENDP_INIT_MODE_%u=0x%x\n"
+ "IPA_ENDP_INIT_AGGR_%u=0x%x\n"
+ "IPA_ENDP_INIT_ROUTE_%u=0x%x\n"
+ "IPA_ENDP_INIT_CTRL_%u=0x%x\n"
+ "IPA_ENDP_INIT_HOL_EN_%u=0x%x\n"
+ "IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n"
+ "IPA_ENDP_INIT_DEAGGR_%u=0x%x\n"
+ "IPA_ENDP_INIT_CFG_%u=0x%x\n",
+ pipe, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_NAT_N_OFST_v2_0(pipe)),
+ pipe, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HDR_N_OFST_v2_0(pipe)),
+ pipe, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HDR_EXT_n_OFST_v2_0(pipe)),
+ pipe, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_MODE_N_OFST_v2_0(pipe)),
+ pipe, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_AGGR_N_OFST_v2_0(pipe)),
+ pipe, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_ROUTE_N_OFST_v2_0(pipe)),
+ pipe, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_CTRL_N_OFST(pipe)),
+ pipe, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe)),
+ pipe, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe)),
+ pipe, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_DEAGGR_n_OFST_v2_0(pipe)),
+ pipe, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_CFG_n_OFST(pipe)));
+}
+
+static ssize_t ipa_read_ep_reg(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes;
+ int i;
+ int start_idx;
+ int end_idx;
+ int size = 0;
+ int ret;
+ loff_t pos;
+
+ /* negative ep_reg_idx means all registers */
+ if (ep_reg_idx < 0) {
+ start_idx = 0;
+ end_idx = ipa_ctx->ipa_num_pipes;
+ } else {
+ start_idx = ep_reg_idx;
+ end_idx = start_idx + 1;
+ }
+ pos = *ppos;
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ for (i = start_idx; i < end_idx; i++) {
+
+ nbytes = ipa_ctx->ctrl->ipa_read_ep_reg(dbg_buff,
+ IPA_MAX_MSG_LEN, i);
+
+ *ppos = pos;
+ ret = simple_read_from_buffer(ubuf, count, ppos, dbg_buff,
+ nbytes);
+ if (ret < 0) {
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return ret;
+ }
+
+ size += ret;
+ ubuf += nbytes;
+ count -= nbytes;
+ }
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ *ppos = pos + size;
+ return size;
+}
+
+static ssize_t ipa_write_keep_awake(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ s8 option = 0;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, buf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+ if (kstrtos8(dbg_buff, 0, &option))
+ return -EFAULT;
+
+ if (option == 1)
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ else if (option == 0)
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ else
+ return -EFAULT;
+
+ return count;
+}
+
+static ssize_t ipa_read_keep_awake(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes;
+
+ ipa_active_clients_lock();
+ if (ipa_ctx->ipa_active_clients.cnt)
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "IPA APPS power state is ON\n");
+ else
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "IPA APPS power state is OFF\n");
+ ipa_active_clients_unlock();
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_read_hdr(struct file *file, char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ int nbytes = 0;
+ int i = 0;
+ struct ipa_hdr_entry *entry;
+
+ mutex_lock(&ipa_ctx->lock);
+
+ if (ipa_ctx->hdr_tbl_lcl)
+ pr_err("Table resides on local memory\n");
+ else
+ pr_err("Table resides on system (ddr) memory\n");
+
+ list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+ link) {
+ nbytes = scnprintf(
+ dbg_buff,
+ IPA_MAX_MSG_LEN,
+ "name:%s len=%d ref=%d partial=%d type=%s ",
+ entry->name,
+ entry->hdr_len,
+ entry->ref_cnt,
+ entry->is_partial,
+ ipa_hdr_l2_type_name[entry->type]);
+
+ if (entry->is_hdr_proc_ctx) {
+ nbytes += scnprintf(
+ dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "phys_base=0x%pa ",
+ &entry->phys_base);
+ } else {
+ nbytes += scnprintf(
+ dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "ofst=%u ",
+ entry->offset_entry->offset >> 2);
+ }
+ for (i = 0; i < entry->hdr_len; i++) {
+ scnprintf(dbg_buff + nbytes + i * 2,
+ IPA_MAX_MSG_LEN - nbytes - i * 2,
+ "%02x", entry->hdr[i]);
+ }
+ scnprintf(dbg_buff + nbytes + entry->hdr_len * 2,
+ IPA_MAX_MSG_LEN - nbytes - entry->hdr_len * 2,
+ "\n");
+ pr_err("%s", dbg_buff);
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return 0;
+}
+
+static int ipa_attrib_dump(struct ipa_rule_attrib *attrib,
+ enum ipa_ip_type ip)
+{
+ uint32_t addr[4];
+ uint32_t mask[4];
+ int i;
+
+ if (attrib->attrib_mask & IPA_FLT_TOS_MASKED)
+ pr_err("tos_value:%d ", attrib->tos_value);
+
+ if (attrib->attrib_mask & IPA_FLT_TOS_MASKED)
+ pr_err("tos_mask:%d ", attrib->tos_mask);
+
+ if (attrib->attrib_mask & IPA_FLT_PROTOCOL)
+ pr_err("protocol:%d ", attrib->u.v4.protocol);
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (ip == IPA_IP_v4) {
+ addr[0] = htonl(attrib->u.v4.src_addr);
+ mask[0] = htonl(attrib->u.v4.src_addr_mask);
+ pr_err(
+ "src_addr:%pI4 src_addr_mask:%pI4 ",
+ addr + 0, mask + 0);
+ } else if (ip == IPA_IP_v6) {
+ for (i = 0; i < 4; i++) {
+ addr[i] = htonl(attrib->u.v6.src_addr[i]);
+ mask[i] = htonl(attrib->u.v6.src_addr_mask[i]);
+ }
+ pr_err(
+ "src_addr:%pI6 src_addr_mask:%pI6 ",
+ addr + 0, mask + 0);
+ } else {
+ WARN_ON(1);
+ }
+ }
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (ip == IPA_IP_v4) {
+ addr[0] = htonl(attrib->u.v4.dst_addr);
+ mask[0] = htonl(attrib->u.v4.dst_addr_mask);
+ pr_err(
+ "dst_addr:%pI4 dst_addr_mask:%pI4 ",
+ addr + 0, mask + 0);
+ } else if (ip == IPA_IP_v6) {
+ for (i = 0; i < 4; i++) {
+ addr[i] = htonl(attrib->u.v6.dst_addr[i]);
+ mask[i] = htonl(attrib->u.v6.dst_addr_mask[i]);
+ }
+ pr_err(
+ "dst_addr:%pI6 dst_addr_mask:%pI6 ",
+ addr + 0, mask + 0);
+ } else {
+ WARN_ON(1);
+ }
+ }
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ pr_err("src_port_range:%u %u ",
+ attrib->src_port_lo,
+ attrib->src_port_hi);
+ }
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ pr_err("dst_port_range:%u %u ",
+ attrib->dst_port_lo,
+ attrib->dst_port_hi);
+ }
+ if (attrib->attrib_mask & IPA_FLT_TYPE)
+ pr_err("type:%d ", attrib->type);
+
+ if (attrib->attrib_mask & IPA_FLT_CODE)
+ pr_err("code:%d ", attrib->code);
+
+ if (attrib->attrib_mask & IPA_FLT_SPI)
+ pr_err("spi:%x ", attrib->spi);
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT)
+ pr_err("src_port:%u ", attrib->src_port);
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT)
+ pr_err("dst_port:%u ", attrib->dst_port);
+
+ if (attrib->attrib_mask & IPA_FLT_TC)
+ pr_err("tc:%d ", attrib->u.v6.tc);
+
+ if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL)
+ pr_err("flow_label:%x ", attrib->u.v6.flow_label);
+
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR)
+ pr_err("next_hdr:%d ", attrib->u.v6.next_hdr);
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ pr_err(
+ "metadata:%x metadata_mask:%x",
+ attrib->meta_data, attrib->meta_data_mask);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
+ pr_err("frg ");
+
+ if ((attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) ||
+ (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3)) {
+ pr_err("src_mac_addr:%pM ", attrib->src_mac_addr);
+ }
+
+ if ((attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) ||
+ (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3)) {
+ pr_err("dst_mac_addr:%pM ", attrib->dst_mac_addr);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE)
+ pr_err("ether_type:%x ", attrib->ether_type);
+
+ pr_err("\n");
+ return 0;
+}
+
+static int ipa_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
+{
+ uint8_t addr[16];
+ uint8_t mask[16];
+ int i;
+ int j;
+
+ if (attrib->tos_eq_present)
+ pr_err("tos_value:%d ", attrib->tos_eq);
+
+ if (attrib->protocol_eq_present)
+ pr_err("protocol:%d ", attrib->protocol_eq);
+
+ for (i = 0; i < attrib->num_ihl_offset_range_16; i++) {
+ pr_err(
+ "(ihl_ofst_range16: ofst:%u lo:%u hi:%u) ",
+ attrib->ihl_offset_range_16[i].offset,
+ attrib->ihl_offset_range_16[i].range_low,
+ attrib->ihl_offset_range_16[i].range_high);
+ }
+
+ for (i = 0; i < attrib->num_offset_meq_32; i++) {
+ pr_err(
+ "(ofst_meq32: ofst:%u mask:0x%x val:0x%x) ",
+ attrib->offset_meq_32[i].offset,
+ attrib->offset_meq_32[i].mask,
+ attrib->offset_meq_32[i].value);
+ }
+
+ if (attrib->tc_eq_present)
+ pr_err("tc:%d ", attrib->tc_eq);
+
+ if (attrib->fl_eq_present)
+ pr_err("flow_label:%d ", attrib->fl_eq);
+
+ if (attrib->ihl_offset_eq_16_present) {
+ pr_err(
+ "(ihl_ofst_eq16:%d val:0x%x) ",
+ attrib->ihl_offset_eq_16.offset,
+ attrib->ihl_offset_eq_16.value);
+ }
+
+ for (i = 0; i < attrib->num_ihl_offset_meq_32; i++) {
+ pr_err(
+ "(ihl_ofst_meq32: ofts:%d mask:0x%x val:0x%x) ",
+ attrib->ihl_offset_meq_32[i].offset,
+ attrib->ihl_offset_meq_32[i].mask,
+ attrib->ihl_offset_meq_32[i].value);
+ }
+
+ for (i = 0; i < attrib->num_offset_meq_128; i++) {
+ for (j = 0; j < 16; j++) {
+ addr[j] = attrib->offset_meq_128[i].value[j];
+ mask[j] = attrib->offset_meq_128[i].mask[j];
+ }
+ pr_err(
+ "(ofst_meq128: ofst:%d mask:%pI6 val:%pI6) ",
+ attrib->offset_meq_128[i].offset,
+ mask + 0,
+ addr + 0);
+ }
+
+ if (attrib->metadata_meq32_present) {
+ pr_err(
+ "(metadata: ofst:%u mask:0x%x val:0x%x) ",
+ attrib->metadata_meq32.offset,
+ attrib->metadata_meq32.mask,
+ attrib->metadata_meq32.value);
+ }
+
+ if (attrib->ipv4_frag_eq_present)
+ pr_err("frg ");
+
+ pr_err("\n");
+ return 0;
+}
+
+static int ipa_open_dbg(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t ipa_read_rt(struct file *file, char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ int i = 0;
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_entry *entry;
+ struct ipa_rt_tbl_set *set;
+ enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+ u32 ofst;
+ u32 ofst_words;
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+
+ mutex_lock(&ipa_ctx->lock);
+
+ if (ip == IPA_IP_v6) {
+ if (ipa_ctx->ip6_rt_tbl_lcl)
+ pr_err("Table resides on local memory\n");
+ else
+ pr_err("Table resides on system (ddr) memory\n");
+ } else if (ip == IPA_IP_v4) {
+ if (ipa_ctx->ip4_rt_tbl_lcl)
+ pr_err("Table resides on local memory\n");
+ else
+ pr_err("Table resides on system (ddr) memory\n");
+ }
+
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ i = 0;
+ list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+ if (entry->proc_ctx) {
+ ofst = entry->proc_ctx->offset_entry->offset;
+ ofst_words =
+ (ofst +
+ ipa_ctx->hdr_proc_ctx_tbl.start_offset)
+ >> 5;
+
+ pr_err("tbl_idx:%d tbl_name:%s tbl_ref:%u ",
+ entry->tbl->idx, entry->tbl->name,
+ entry->tbl->ref_cnt);
+ pr_err("rule_idx:%d dst:%d ep:%d S:%u ",
+ i, entry->rule.dst,
+ ipa2_get_ep_mapping(entry->rule.dst),
+ !ipa_ctx->hdr_tbl_lcl);
+ pr_err("proc_ctx[32B]:%u attrib_mask:%08x ",
+ ofst_words,
+ entry->rule.attrib.attrib_mask);
+ } else {
+ if (entry->hdr)
+ ofst = entry->hdr->offset_entry->offset;
+ else
+ ofst = 0;
+
+ pr_err("tbl_idx:%d tbl_name:%s tbl_ref:%u ",
+ entry->tbl->idx, entry->tbl->name,
+ entry->tbl->ref_cnt);
+ pr_err("rule_idx:%d dst:%d ep:%d S:%u ",
+ i, entry->rule.dst,
+ ipa2_get_ep_mapping(entry->rule.dst),
+ !ipa_ctx->hdr_tbl_lcl);
+ pr_err("hdr_ofst[words]:%u attrib_mask:%08x ",
+ ofst >> 2,
+ entry->rule.attrib.attrib_mask);
+ }
+
+ ipa_attrib_dump(&entry->rule.attrib, ip);
+ i++;
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return 0;
+}
+
+static ssize_t ipa_read_proc_ctx(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes = 0;
+ struct ipa_hdr_proc_ctx_tbl *tbl;
+ struct ipa_hdr_proc_ctx_entry *entry;
+ u32 ofst_words;
+
+ tbl = &ipa_ctx->hdr_proc_ctx_tbl;
+
+ mutex_lock(&ipa_ctx->lock);
+
+ if (ipa_ctx->hdr_proc_ctx_tbl_lcl)
+ pr_info("Table resides on local memory\n");
+ else
+ pr_info("Table resides on system(ddr) memory\n");
+
+ list_for_each_entry(entry, &tbl->head_proc_ctx_entry_list, link) {
+ ofst_words = (entry->offset_entry->offset +
+ ipa_ctx->hdr_proc_ctx_tbl.start_offset)
+ >> 5;
+ if (entry->hdr->is_hdr_proc_ctx) {
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "id:%u hdr_proc_type:%s proc_ctx[32B]:%u ",
+ entry->id,
+ ipa_hdr_proc_type_name[entry->type],
+ ofst_words);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "hdr_phys_base:0x%pa\n",
+ &entry->hdr->phys_base);
+ } else {
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "id:%u hdr_proc_type:%s proc_ctx[32B]:%u ",
+ entry->id,
+ ipa_hdr_proc_type_name[entry->type],
+ ofst_words);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "hdr[words]:%u\n",
+ entry->hdr->offset_entry->offset >> 2);
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ int i;
+ int j;
+ struct ipa_flt_tbl *tbl;
+ struct ipa_flt_entry *entry;
+ enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+ struct ipa_rt_tbl *rt_tbl;
+ u32 rt_tbl_idx;
+ u32 bitmap;
+ bool eq;
+
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+ mutex_lock(&ipa_ctx->lock);
+ i = 0;
+ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ if (entry->rule.eq_attrib_type) {
+ rt_tbl_idx = entry->rule.rt_tbl_idx;
+ bitmap = entry->rule.eq_attrib.rule_eq_bitmap;
+ eq = true;
+ } else {
+ rt_tbl = ipa_id_find(entry->rule.rt_tbl_hdl);
+ if (rt_tbl)
+ rt_tbl_idx = rt_tbl->idx;
+ else
+ rt_tbl_idx = ~0;
+ bitmap = entry->rule.attrib.attrib_mask;
+ eq = false;
+ }
+ pr_err("ep_idx:global rule_idx:%d act:%d rt_tbl_idx:%d ",
+ i, entry->rule.action, rt_tbl_idx);
+ pr_err("attrib_mask:%08x retain_hdr:%d eq:%d ",
+ bitmap, entry->rule.retain_hdr, eq);
+ if (eq)
+ ipa_attrib_dump_eq(
+ &entry->rule.eq_attrib);
+ else
+ ipa_attrib_dump(
+ &entry->rule.attrib, ip);
+ i++;
+ }
+
+ for (j = 0; j < ipa_ctx->ipa_num_pipes; j++) {
+ tbl = &ipa_ctx->flt_tbl[j][ip];
+ i = 0;
+ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ if (entry->rule.eq_attrib_type) {
+ rt_tbl_idx = entry->rule.rt_tbl_idx;
+ bitmap = entry->rule.eq_attrib.rule_eq_bitmap;
+ eq = true;
+ } else {
+ rt_tbl = ipa_id_find(entry->rule.rt_tbl_hdl);
+ if (rt_tbl)
+ rt_tbl_idx = rt_tbl->idx;
+ else
+ rt_tbl_idx = ~0;
+ bitmap = entry->rule.attrib.attrib_mask;
+ eq = false;
+ }
+ pr_err("ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ",
+ j, i, entry->rule.action, rt_tbl_idx);
+ pr_err("attrib_mask:%08x retain_hdr:%d ",
+ bitmap, entry->rule.retain_hdr);
+ pr_err("eq:%d ", eq);
+ if (eq)
+ ipa_attrib_dump_eq(
+ &entry->rule.eq_attrib);
+ else
+ ipa_attrib_dump(
+ &entry->rule.attrib, ip);
+ i++;
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return 0;
+}
+
+static ssize_t ipa_read_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes;
+ int i;
+ int cnt = 0;
+ uint connect = 0;
+
+ for (i = 0; i < ipa_ctx->ipa_num_pipes; i++)
+ connect |= (ipa_ctx->ep[i].valid << i);
+
+ if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "sw_tx=%u\n"
+ "hw_tx=%u\n"
+ "tx_non_linear=%u\n"
+ "tx_compl=%u\n"
+ "wan_rx=%u\n"
+ "stat_compl=%u\n"
+ "lan_aggr_close=%u\n"
+ "wan_aggr_close=%u\n"
+ "act_clnt=%u\n"
+ "con_clnt_bmap=0x%x\n"
+ "wan_rx_empty=%u\n"
+ "wan_repl_rx_empty=%u\n"
+ "lan_rx_empty=%u\n"
+ "lan_repl_rx_empty=%u\n"
+ "flow_enable=%u\n"
+ "flow_disable=%u\n",
+ ipa_ctx->stats.tx_sw_pkts,
+ ipa_ctx->stats.tx_hw_pkts,
+ ipa_ctx->stats.tx_non_linear,
+ ipa_ctx->stats.tx_pkts_compl,
+ ipa_ctx->stats.rx_pkts,
+ ipa_ctx->stats.stat_compl,
+ ipa_ctx->stats.aggr_close,
+ ipa_ctx->stats.wan_aggr_close,
+ ipa_ctx->ipa_active_clients.cnt,
+ connect,
+ ipa_ctx->stats.wan_rx_empty,
+ ipa_ctx->stats.wan_repl_rx_empty,
+ ipa_ctx->stats.lan_rx_empty,
+ ipa_ctx->stats.lan_repl_rx_empty,
+ ipa_ctx->stats.flow_enable,
+ ipa_ctx->stats.flow_disable);
+ cnt += nbytes;
+
+ for (i = 0; i < MAX_NUM_EXCP; i++) {
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt,
+ "lan_rx_excp[%u:%20s]=%u\n", i,
+ ipa_status_excp_name[i],
+ ipa_ctx->stats.rx_excp_pkts[i]);
+ cnt += nbytes;
+ }
+ } else{
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "sw_tx=%u\n"
+ "hw_tx=%u\n"
+ "rx=%u\n"
+ "rx_repl_repost=%u\n"
+ "rx_q_len=%u\n"
+ "act_clnt=%u\n"
+ "con_clnt_bmap=0x%x\n",
+ ipa_ctx->stats.tx_sw_pkts,
+ ipa_ctx->stats.tx_hw_pkts,
+ ipa_ctx->stats.rx_pkts,
+ ipa_ctx->stats.rx_repl_repost,
+ ipa_ctx->stats.rx_q_len,
+ ipa_ctx->ipa_active_clients.cnt,
+ connect);
+ cnt += nbytes;
+
+ for (i = 0; i < MAX_NUM_EXCP; i++) {
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt,
+ "rx_excp[%u:%35s]=%u\n", i, ipa_excp_name[i],
+ ipa_ctx->stats.rx_excp_pkts[i]);
+ cnt += nbytes;
+ }
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa_read_wstats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+
+#define HEAD_FRMT_STR "%25s\n"
+#define FRMT_STR "%25s %10u\n"
+#define FRMT_STR1 "%25s %10u\n\n"
+
+ int cnt = 0;
+ int nbytes;
+ int ipa_ep_idx;
+ enum ipa_client_type client = IPA_CLIENT_WLAN1_PROD;
+ struct ipa_ep_context *ep;
+
+ do {
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ HEAD_FRMT_STR, "Client IPA_CLIENT_WLAN1_PROD Stats:");
+ cnt += nbytes;
+
+ ipa_ep_idx = ipa2_get_ep_mapping(client);
+ if (ipa_ep_idx == -1) {
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+ cnt += nbytes;
+ break;
+ }
+
+ ep = &ipa_ctx->ep[ipa_ep_idx];
+ if (ep->valid != 1) {
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+ cnt += nbytes;
+ break;
+ }
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR, "Avail Fifo Desc:",
+ atomic_read(&ep->avail_fifo_desc));
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR, "Rx Pkts Rcvd:", ep->wstats.rx_pkts_rcvd);
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR, "Rx Pkts Status Rcvd:",
+ ep->wstats.rx_pkts_status_rcvd);
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR, "Rx DH Rcvd:", ep->wstats.rx_hd_rcvd);
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR, "Rx DH Processed:",
+ ep->wstats.rx_hd_processed);
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR, "Rx DH Sent Back:", ep->wstats.rx_hd_reply);
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR, "Rx Pkt Leak:", ep->wstats.rx_pkt_leak);
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR1, "Rx DP Fail:", ep->wstats.rx_dp_fail);
+ cnt += nbytes;
+
+ } while (0);
+
+ client = IPA_CLIENT_WLAN1_CONS;
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+ "Client IPA_CLIENT_WLAN1_CONS Stats:");
+ cnt += nbytes;
+ while (1) {
+ ipa_ep_idx = ipa2_get_ep_mapping(client);
+ if (ipa_ep_idx == -1) {
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+ cnt += nbytes;
+ goto nxt_clnt_cons;
+ }
+
+ ep = &ipa_ctx->ep[ipa_ep_idx];
+ if (ep->valid != 1) {
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+ cnt += nbytes;
+ goto nxt_clnt_cons;
+ }
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR, "Tx Pkts Received:", ep->wstats.tx_pkts_rcvd);
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR, "Tx Pkts Sent:", ep->wstats.tx_pkts_sent);
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR1, "Tx Pkts Dropped:",
+ ep->wstats.tx_pkts_dropped);
+ cnt += nbytes;
+
+nxt_clnt_cons:
+ switch (client) {
+ case IPA_CLIENT_WLAN1_CONS:
+ client = IPA_CLIENT_WLAN2_CONS;
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+ "Client IPA_CLIENT_WLAN2_CONS Stats:");
+ cnt += nbytes;
+ continue;
+ case IPA_CLIENT_WLAN2_CONS:
+ client = IPA_CLIENT_WLAN3_CONS;
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+ "Client IPA_CLIENT_WLAN3_CONS Stats:");
+ cnt += nbytes;
+ continue;
+ case IPA_CLIENT_WLAN3_CONS:
+ client = IPA_CLIENT_WLAN4_CONS;
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+ "Client IPA_CLIENT_WLAN4_CONS Stats:");
+ cnt += nbytes;
+ continue;
+ case IPA_CLIENT_WLAN4_CONS:
+ default:
+ break;
+ }
+ break;
+ }
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "\n"HEAD_FRMT_STR, "All Wlan Consumer pipes stats:");
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR,
+ "Tx Comm Buff Allocated:",
+ ipa_ctx->wc_memb.wlan_comm_total_cnt);
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR,
+ "Tx Comm Buff Avail:", ipa_ctx->wc_memb.wlan_comm_free_cnt);
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR1,
+ "Total Tx Pkts Freed:", ipa_ctx->wc_memb.total_tx_pkts_freed);
+ cnt += nbytes;
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa_read_ntn(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+#define TX_STATS(y) \
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) \
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+ struct IpaHwStatsNTNInfoData_t stats;
+ int nbytes;
+ int cnt = 0;
+
+ if (!ipa2_get_ntn_stats(&stats)) {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "TX num_pkts_processed=%u\n"
+ "TX tail_ptr_val=%u\n"
+ "TX num_db_fired=%u\n"
+ "TX ringFull=%u\n"
+ "TX ringEmpty=%u\n"
+ "TX ringUsageHigh=%u\n"
+ "TX ringUsageLow=%u\n"
+ "TX RingUtilCount=%u\n"
+ "TX bamFifoFull=%u\n"
+ "TX bamFifoEmpty=%u\n"
+ "TX bamFifoUsageHigh=%u\n"
+ "TX bamFifoUsageLow=%u\n"
+ "TX bamUtilCount=%u\n"
+ "TX num_db=%u\n"
+ "TX num_unexpected_db=%u\n"
+ "TX num_bam_int_handled=%u\n"
+ "TX num_bam_int_in_non_running_state=%u\n"
+ "TX num_qmb_int_handled=%u\n"
+ "TX num_bam_int_handled_while_wait_for_bam=%u\n"
+ "TX num_bam_int_handled_while_not_in_bam=%u\n",
+ TX_STATS(num_pkts_processed),
+ TX_STATS(tail_ptr_val),
+ TX_STATS(num_db_fired),
+ TX_STATS(tx_comp_ring_stats.ringFull),
+ TX_STATS(tx_comp_ring_stats.ringEmpty),
+ TX_STATS(tx_comp_ring_stats.ringUsageHigh),
+ TX_STATS(tx_comp_ring_stats.ringUsageLow),
+ TX_STATS(tx_comp_ring_stats.RingUtilCount),
+ TX_STATS(bam_stats.bamFifoFull),
+ TX_STATS(bam_stats.bamFifoEmpty),
+ TX_STATS(bam_stats.bamFifoUsageHigh),
+ TX_STATS(bam_stats.bamFifoUsageLow),
+ TX_STATS(bam_stats.bamUtilCount),
+ TX_STATS(num_db),
+ TX_STATS(num_unexpected_db),
+ TX_STATS(num_bam_int_handled),
+ TX_STATS(num_bam_int_in_non_running_state),
+ TX_STATS(num_qmb_int_handled),
+ TX_STATS(num_bam_int_handled_while_wait_for_bam),
+ TX_STATS(num_bam_int_handled_while_not_in_bam));
+ cnt += nbytes;
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "RX max_outstanding_pkts=%u\n"
+ "RX num_pkts_processed=%u\n"
+ "RX rx_ring_rp_value=%u\n"
+ "RX ringFull=%u\n"
+ "RX ringEmpty=%u\n"
+ "RX ringUsageHigh=%u\n"
+ "RX ringUsageLow=%u\n"
+ "RX RingUtilCount=%u\n"
+ "RX bamFifoFull=%u\n"
+ "RX bamFifoEmpty=%u\n"
+ "RX bamFifoUsageHigh=%u\n"
+ "RX bamFifoUsageLow=%u\n"
+ "RX bamUtilCount=%u\n"
+ "RX num_bam_int_handled=%u\n"
+ "RX num_db=%u\n"
+ "RX num_unexpected_db=%u\n"
+ "RX num_pkts_in_dis_uninit_state=%u\n"
+ "num_ic_inj_vdev_change=%u\n"
+ "num_ic_inj_fw_desc_change=%u\n",
+ RX_STATS(max_outstanding_pkts),
+ RX_STATS(num_pkts_processed),
+ RX_STATS(rx_ring_rp_value),
+ RX_STATS(rx_ind_ring_stats.ringFull),
+ RX_STATS(rx_ind_ring_stats.ringEmpty),
+ RX_STATS(rx_ind_ring_stats.ringUsageHigh),
+ RX_STATS(rx_ind_ring_stats.ringUsageLow),
+ RX_STATS(rx_ind_ring_stats.RingUtilCount),
+ RX_STATS(bam_stats.bamFifoFull),
+ RX_STATS(bam_stats.bamFifoEmpty),
+ RX_STATS(bam_stats.bamFifoUsageHigh),
+ RX_STATS(bam_stats.bamFifoUsageLow),
+ RX_STATS(bam_stats.bamUtilCount),
+ RX_STATS(num_bam_int_handled),
+ RX_STATS(num_db),
+ RX_STATS(num_unexpected_db),
+ RX_STATS(num_pkts_in_dis_uninit_state),
+ RX_STATS(num_bam_int_handled_while_not_in_bam),
+ RX_STATS(num_bam_int_handled_while_in_bam_state));
+ cnt += nbytes;
+ } else {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "Fail to read NTN stats\n");
+ cnt += nbytes;
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa_read_wdi(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct IpaHwStatsWDIInfoData_t stats;
+ int nbytes;
+ int cnt = 0;
+
+ if (!ipa2_get_wdi_stats(&stats)) {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "TX num_pkts_processed=%u\n"
+ "TX copy_engine_doorbell_value=%u\n"
+ "TX num_db_fired=%u\n"
+ "TX ringFull=%u\n"
+ "TX ringEmpty=%u\n"
+ "TX ringUsageHigh=%u\n"
+ "TX ringUsageLow=%u\n"
+ "TX RingUtilCount=%u\n"
+ "TX bamFifoFull=%u\n"
+ "TX bamFifoEmpty=%u\n"
+ "TX bamFifoUsageHigh=%u\n"
+ "TX bamFifoUsageLow=%u\n"
+ "TX bamUtilCount=%u\n"
+ "TX num_db=%u\n"
+ "TX num_unexpected_db=%u\n"
+ "TX num_bam_int_handled=%u\n"
+ "TX num_bam_int_in_non_running_state=%u\n"
+ "TX num_qmb_int_handled=%u\n"
+ "TX num_bam_int_handled_while_wait_for_bam=%u\n",
+ stats.tx_ch_stats.num_pkts_processed,
+ stats.tx_ch_stats.copy_engine_doorbell_value,
+ stats.tx_ch_stats.num_db_fired,
+ stats.tx_ch_stats.tx_comp_ring_stats.ringFull,
+ stats.tx_ch_stats.tx_comp_ring_stats.ringEmpty,
+ stats.tx_ch_stats.tx_comp_ring_stats.ringUsageHigh,
+ stats.tx_ch_stats.tx_comp_ring_stats.ringUsageLow,
+ stats.tx_ch_stats.tx_comp_ring_stats.RingUtilCount,
+ stats.tx_ch_stats.bam_stats.bamFifoFull,
+ stats.tx_ch_stats.bam_stats.bamFifoEmpty,
+ stats.tx_ch_stats.bam_stats.bamFifoUsageHigh,
+ stats.tx_ch_stats.bam_stats.bamFifoUsageLow,
+ stats.tx_ch_stats.bam_stats.bamUtilCount,
+ stats.tx_ch_stats.num_db,
+ stats.tx_ch_stats.num_unexpected_db,
+ stats.tx_ch_stats.num_bam_int_handled,
+ stats.tx_ch_stats.num_bam_int_in_non_running_state,
+ stats.tx_ch_stats.num_qmb_int_handled,
+ stats.tx_ch_stats.
+ num_bam_int_handled_while_wait_for_bam);
+ cnt += nbytes;
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "RX max_outstanding_pkts=%u\n"
+ "RX num_pkts_processed=%u\n"
+ "RX rx_ring_rp_value=%u\n"
+ "RX ringFull=%u\n"
+ "RX ringEmpty=%u\n"
+ "RX ringUsageHigh=%u\n"
+ "RX ringUsageLow=%u\n"
+ "RX RingUtilCount=%u\n"
+ "RX bamFifoFull=%u\n"
+ "RX bamFifoEmpty=%u\n"
+ "RX bamFifoUsageHigh=%u\n"
+ "RX bamFifoUsageLow=%u\n"
+ "RX bamUtilCount=%u\n"
+ "RX num_bam_int_handled=%u\n"
+ "RX num_db=%u\n"
+ "RX num_unexpected_db=%u\n"
+ "RX num_pkts_in_dis_uninit_state=%u\n"
+ "num_ic_inj_vdev_change=%u\n"
+ "num_ic_inj_fw_desc_change=%u\n"
+ "RX reserved1=%u\n"
+ "RX reserved2=%u\n",
+ stats.rx_ch_stats.max_outstanding_pkts,
+ stats.rx_ch_stats.num_pkts_processed,
+ stats.rx_ch_stats.rx_ring_rp_value,
+ stats.rx_ch_stats.rx_ind_ring_stats.ringFull,
+ stats.rx_ch_stats.rx_ind_ring_stats.ringEmpty,
+ stats.rx_ch_stats.rx_ind_ring_stats.ringUsageHigh,
+ stats.rx_ch_stats.rx_ind_ring_stats.ringUsageLow,
+ stats.rx_ch_stats.rx_ind_ring_stats.RingUtilCount,
+ stats.rx_ch_stats.bam_stats.bamFifoFull,
+ stats.rx_ch_stats.bam_stats.bamFifoEmpty,
+ stats.rx_ch_stats.bam_stats.bamFifoUsageHigh,
+ stats.rx_ch_stats.bam_stats.bamFifoUsageLow,
+ stats.rx_ch_stats.bam_stats.bamUtilCount,
+ stats.rx_ch_stats.num_bam_int_handled,
+ stats.rx_ch_stats.num_db,
+ stats.rx_ch_stats.num_unexpected_db,
+ stats.rx_ch_stats.num_pkts_in_dis_uninit_state,
+ stats.rx_ch_stats.num_ic_inj_vdev_change,
+ stats.rx_ch_stats.num_ic_inj_fw_desc_change,
+ stats.rx_ch_stats.reserved1,
+ stats.rx_ch_stats.reserved2);
+ cnt += nbytes;
+ } else {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "Fail to read WDI stats\n");
+ cnt += nbytes;
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+void _ipa_write_dbg_cnt_v1_1(int option)
+{
+ if (option == 1)
+ ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v1_1(0),
+ IPA_DBG_CNTR_ON);
+ else
+ ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v1_1(0),
+ IPA_DBG_CNTR_OFF);
+}
+
+void _ipa_write_dbg_cnt_v2_0(int option)
+{
+ if (option == 1)
+ ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v2_0(0),
+ IPA_DBG_CNTR_ON);
+ else
+ ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v2_0(0),
+ IPA_DBG_CNTR_OFF);
+}
+
+static ssize_t ipa_write_dbg_cnt(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ u32 option = 0;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, buf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+ if (kstrtou32(dbg_buff, 0, &option))
+ return -EFAULT;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ ipa_ctx->ctrl->ipa_write_dbg_cnt(option);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return count;
+}
+
+int _ipa_read_dbg_cnt_v1_1(char *buf, int max_len)
+{
+ int regval;
+
+ regval = ipa_read_reg(ipa_ctx->mmio,
+ IPA_DEBUG_CNT_REG_N_OFST_v1_1(0));
+
+ return scnprintf(buf, max_len,
+ "IPA_DEBUG_CNT_REG_0=0x%x\n", regval);
+}
+
+int _ipa_read_dbg_cnt_v2_0(char *buf, int max_len)
+{
+ int regval;
+
+ regval = ipa_read_reg(ipa_ctx->mmio,
+ IPA_DEBUG_CNT_REG_N_OFST_v2_0(0));
+
+ return scnprintf(buf, max_len,
+ "IPA_DEBUG_CNT_REG_0=0x%x\n", regval);
+}
+
+static ssize_t ipa_read_dbg_cnt(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ nbytes = ipa_ctx->ctrl->ipa_read_dbg_cnt(dbg_buff, IPA_MAX_MSG_LEN);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_read_msg(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes;
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < IPA_EVENT_MAX_NUM; i++) {
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "msg[%u:%27s] W:%u R:%u\n", i,
+ ipa_event_name[i],
+ ipa_ctx->stats.msg_w[i],
+ ipa_ctx->stats.msg_r[i]);
+ cnt += nbytes;
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa_read_nat4(struct file *file,
+ char __user *ubuf, size_t count,
+ loff_t *ppos) {
+
+#define ENTRY_U32_FIELDS 8
+#define NAT_ENTRY_ENABLE 0x8000
+#define NAT_ENTRY_RST_FIN_BIT 0x4000
+#define BASE_TABLE 0
+#define EXPANSION_TABLE 1
+
+ u32 *base_tbl, *indx_tbl;
+ u32 tbl_size, *tmp;
+ u32 value, i, j, rule_id;
+ u16 enable, tbl_entry, flag;
+ u32 no_entrys = 0;
+
+ value = ipa_ctx->nat_mem.public_ip_addr;
+ pr_err(
+ "Table IP Address:%d.%d.%d.%d\n",
+ ((value & 0xFF000000) >> 24),
+ ((value & 0x00FF0000) >> 16),
+ ((value & 0x0000FF00) >> 8),
+ ((value & 0x000000FF)));
+
+ pr_err("Table Size:%d\n",
+ ipa_ctx->nat_mem.size_base_tables);
+
+ pr_err("Expansion Table Size:%d\n",
+ ipa_ctx->nat_mem.size_expansion_tables-1);
+
+ if (!ipa_ctx->nat_mem.is_sys_mem)
+ pr_err("Not supported for local(shared) memory\n");
+
+ /* Print Base tables */
+ rule_id = 0;
+ for (j = 0; j < 2; j++) {
+ if (j == BASE_TABLE) {
+ tbl_size = ipa_ctx->nat_mem.size_base_tables;
+ base_tbl = (u32 *)ipa_ctx->nat_mem.ipv4_rules_addr;
+
+ pr_err("\nBase Table:\n");
+ } else {
+ tbl_size = ipa_ctx->nat_mem.size_expansion_tables-1;
+ base_tbl =
+ (u32 *)ipa_ctx->nat_mem.ipv4_expansion_rules_addr;
+
+ pr_err("\nExpansion Base Table:\n");
+ }
+
+ if (base_tbl != NULL) {
+ for (i = 0; i <= tbl_size; i++, rule_id++) {
+ tmp = base_tbl;
+ value = tmp[4];
+ enable = ((value & 0xFFFF0000) >> 16);
+
+ if (enable & NAT_ENTRY_ENABLE) {
+ no_entrys++;
+ pr_err("Rule:%d ", rule_id);
+
+ value = *tmp;
+ pr_err(
+ "Private_IP:%d.%d.%d.%d ",
+ ((value & 0xFF000000) >> 24),
+ ((value & 0x00FF0000) >> 16),
+ ((value & 0x0000FF00) >> 8),
+ ((value & 0x000000FF)));
+ tmp++;
+
+ value = *tmp;
+ pr_err(
+ "Target_IP:%d.%d.%d.%d ",
+ ((value & 0xFF000000) >> 24),
+ ((value & 0x00FF0000) >> 16),
+ ((value & 0x0000FF00) >> 8),
+ ((value & 0x000000FF)));
+ tmp++;
+
+ value = *tmp;
+ pr_err(
+ "Next_Index:%d Public_Port:%d ",
+ (value & 0x0000FFFF),
+ ((value & 0xFFFF0000) >> 16));
+ tmp++;
+
+ value = *tmp;
+ pr_err(
+ "Private_Port:%d Target_Port:%d ",
+ (value & 0x0000FFFF),
+ ((value & 0xFFFF0000) >> 16));
+ tmp++;
+
+ value = *tmp;
+ flag = ((value & 0xFFFF0000) >> 16);
+ if (flag & NAT_ENTRY_RST_FIN_BIT) {
+ pr_err(
+ "IP_CKSM_delta:0x%x Flags:%s ",
+ (value & 0x0000FFFF),
+ "Direct_To_A5");
+ } else {
+ pr_err(
+ "IP_CKSM_delta:0x%x Flags:%s ",
+ (value & 0x0000FFFF),
+ "Fwd_to_route");
+ }
+ tmp++;
+
+ value = *tmp;
+ pr_err(
+ "Time_stamp:0x%x Proto:%d ",
+ (value & 0x00FFFFFF),
+ ((value & 0xFF000000) >> 24));
+ tmp++;
+
+ value = *tmp;
+ pr_err(
+ "Prev_Index:%d Indx_tbl_entry:%d ",
+ (value & 0x0000FFFF),
+ ((value & 0xFFFF0000) >> 16));
+ tmp++;
+
+ value = *tmp;
+ pr_err(
+ "TCP_UDP_cksum_delta:0x%x\n",
+ ((value & 0xFFFF0000) >> 16));
+ }
+
+ base_tbl += ENTRY_U32_FIELDS;
+
+ }
+ }
+ }
+
+ /* Print Index tables */
+ rule_id = 0;
+ for (j = 0; j < 2; j++) {
+ if (j == BASE_TABLE) {
+ tbl_size = ipa_ctx->nat_mem.size_base_tables;
+ indx_tbl = (u32 *)ipa_ctx->nat_mem.index_table_addr;
+
+ pr_err("\nIndex Table:\n");
+ } else {
+ tbl_size = ipa_ctx->nat_mem.size_expansion_tables-1;
+ indx_tbl =
+ (u32 *)ipa_ctx->nat_mem.index_table_expansion_addr;
+
+ pr_err("\nExpansion Index Table:\n");
+ }
+
+ if (indx_tbl != NULL) {
+ for (i = 0; i <= tbl_size; i++, rule_id++) {
+ tmp = indx_tbl;
+ value = *tmp;
+ tbl_entry = (value & 0x0000FFFF);
+
+ if (tbl_entry) {
+ pr_err("Rule:%d ", rule_id);
+
+ value = *tmp;
+ pr_err(
+ "Table_Entry:%d Next_Index:%d\n",
+ tbl_entry,
+ ((value & 0xFFFF0000) >> 16));
+ }
+
+ indx_tbl++;
+ }
+ }
+ }
+ pr_err("Current No. Nat Entries: %d\n", no_entrys);
+
+ return 0;
+}
+
+static ssize_t ipa_rm_read_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int result, nbytes, cnt = 0;
+
+ result = ipa_rm_stat(dbg_buff, IPA_MAX_MSG_LEN);
+ if (result < 0) {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "Error in printing RM stat %d\n", result);
+ cnt += nbytes;
+ } else
+ cnt += result;
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static void ipa_dump_status(struct ipa_hw_pkt_status *status)
+{
+ IPA_DUMP_STATUS_FIELD(status_opcode);
+ IPA_DUMP_STATUS_FIELD(exception);
+ IPA_DUMP_STATUS_FIELD(status_mask);
+ IPA_DUMP_STATUS_FIELD(pkt_len);
+ IPA_DUMP_STATUS_FIELD(endp_src_idx);
+ IPA_DUMP_STATUS_FIELD(endp_dest_idx);
+ IPA_DUMP_STATUS_FIELD(metadata);
+
+ if (ipa_ctx->ipa_hw_type < IPA_HW_v2_5) {
+ IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.filt_local);
+ IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.filt_global);
+ IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.filt_pipe_idx);
+ IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.filt_match);
+ IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.filt_rule_idx);
+ IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.ret_hdr);
+ IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.tag_f_1);
+ } else {
+ IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.filt_local);
+ IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.filt_global);
+ IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.filt_pipe_idx);
+ IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.ret_hdr);
+ IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.filt_rule_idx);
+ IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.tag_f_1);
+ }
+
+ IPA_DUMP_STATUS_FIELD(tag_f_2);
+ IPA_DUMP_STATUS_FIELD(time_day_ctr);
+ IPA_DUMP_STATUS_FIELD(nat_hit);
+ IPA_DUMP_STATUS_FIELD(nat_tbl_idx);
+ IPA_DUMP_STATUS_FIELD(nat_type);
+ IPA_DUMP_STATUS_FIELD(route_local);
+ IPA_DUMP_STATUS_FIELD(route_tbl_idx);
+ IPA_DUMP_STATUS_FIELD(route_match);
+ IPA_DUMP_STATUS_FIELD(ucp);
+ IPA_DUMP_STATUS_FIELD(route_rule_idx);
+ IPA_DUMP_STATUS_FIELD(hdr_local);
+ IPA_DUMP_STATUS_FIELD(hdr_offset);
+ IPA_DUMP_STATUS_FIELD(frag_hit);
+ IPA_DUMP_STATUS_FIELD(frag_rule);
+}
+
+static ssize_t ipa_status_stats_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ipa_status_stats *stats;
+ int i, j;
+
+ stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+ if (!stats)
+ return -EFAULT;
+
+ for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
+ if (!ipa_ctx->ep[i].sys || !ipa_ctx->ep[i].sys->status_stat)
+ continue;
+
+ memcpy(stats, ipa_ctx->ep[i].sys->status_stat, sizeof(*stats));
+ stats->curr = (stats->curr + IPA_MAX_STATUS_STAT_NUM - 1)
+ % IPA_MAX_STATUS_STAT_NUM;
+ pr_err("Statuses for pipe %d\n", i);
+ for (j = 0; j < IPA_MAX_STATUS_STAT_NUM; j++) {
+ pr_err("curr=%d\n", stats->curr);
+ ipa_dump_status(&stats->status[stats->curr]);
+ pr_err("\n\n\n");
+ stats->curr = (stats->curr + 1) %
+ IPA_MAX_STATUS_STAT_NUM;
+ }
+ }
+
+ kfree(stats);
+ return 0;
+}
+
+static ssize_t ipa2_print_active_clients_log(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ int cnt;
+ int table_size;
+
+ if (active_clients_buf == NULL) {
+ IPAERR("Active Clients buffer is not allocated");
+ return 0;
+ }
+ memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE);
+ ipa_active_clients_lock();
+ cnt = ipa2_active_clients_log_print_buffer(active_clients_buf,
+ IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE - IPA_MAX_MSG_LEN);
+ table_size = ipa2_active_clients_log_print_table(active_clients_buf
+ + cnt, IPA_MAX_MSG_LEN);
+ ipa_active_clients_unlock();
+
+ return simple_read_from_buffer(ubuf, count, ppos, active_clients_buf,
+ cnt + table_size);
+}
+
+static ssize_t ipa2_clear_active_clients_log(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ s8 option = 0;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, ubuf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+ if (kstrtos8(dbg_buff, 0, &option))
+ return -EFAULT;
+
+ ipa2_active_clients_log_clear();
+
+ return count;
+}
+
+static ssize_t ipa_read_rx_polling_timeout(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ int min_cnt;
+ int max_cnt;
+
+ if (active_clients_buf == NULL) {
+ IPAERR("Active Clients buffer is not allocated");
+ return 0;
+ }
+ memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE);
+ min_cnt = scnprintf(active_clients_buf,
+ IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE,
+ "Rx Min Poll count = %u\n",
+ ipa_ctx->ipa_rx_min_timeout_usec);
+
+ max_cnt = scnprintf(active_clients_buf + min_cnt,
+ IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE,
+ "Rx Max Poll count = %u\n",
+ ipa_ctx->ipa_rx_max_timeout_usec);
+
+ return simple_read_from_buffer(ubuf, count, ppos, active_clients_buf,
+ min_cnt + max_cnt);
+}
+
+static ssize_t ipa_write_rx_polling_timeout(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ s8 polltime = 0;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ if (copy_from_user(dbg_buff, ubuf, count))
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+
+ if (kstrtos8(dbg_buff, 0, &polltime))
+ return -EFAULT;
+
+ ipa_rx_timeout_min_max_calc(&ipa_ctx->ipa_rx_min_timeout_usec,
+ &ipa_ctx->ipa_rx_max_timeout_usec, polltime);
+ return count;
+}
+
+static ssize_t ipa_read_polling_iteration(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ int cnt;
+
+ if (active_clients_buf == NULL) {
+ IPAERR("Active Clients buffer is not allocated");
+ return 0;
+ }
+
+ memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE);
+
+ cnt = scnprintf(active_clients_buf, IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE,
+ "Polling Iteration count = %u\n",
+ ipa_ctx->ipa_polling_iteration);
+
+ return simple_read_from_buffer(ubuf, count, ppos, active_clients_buf,
+ cnt);
+}
+
+static ssize_t ipa_write_polling_iteration(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ s8 iteration_cnt = 0;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ if (copy_from_user(dbg_buff, ubuf, count))
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+
+ if (kstrtos8(dbg_buff, 0, &iteration_cnt))
+ return -EFAULT;
+
+ if ((iteration_cnt >= MIN_POLLING_ITERATION) &&
+ (iteration_cnt <= MAX_POLLING_ITERATION))
+ ipa_ctx->ipa_polling_iteration = iteration_cnt;
+ else
+ ipa_ctx->ipa_polling_iteration = MAX_POLLING_ITERATION;
+
+ return count;
+}
+
+const struct file_operations ipa_gen_reg_ops = {
+ .read = ipa_read_gen_reg,
+};
+
+const struct file_operations ipa_ep_reg_ops = {
+ .read = ipa_read_ep_reg,
+ .write = ipa_write_ep_reg,
+};
+
+const struct file_operations ipa_keep_awake_ops = {
+ .read = ipa_read_keep_awake,
+ .write = ipa_write_keep_awake,
+};
+
+const struct file_operations ipa_ep_holb_ops = {
+ .write = ipa_write_ep_holb,
+};
+
+const struct file_operations ipa_hdr_ops = {
+ .read = ipa_read_hdr,
+};
+
+const struct file_operations ipa_rt_ops = {
+ .read = ipa_read_rt,
+ .open = ipa_open_dbg,
+};
+
+const struct file_operations ipa_proc_ctx_ops = {
+ .read = ipa_read_proc_ctx,
+};
+
+const struct file_operations ipa_flt_ops = {
+ .read = ipa_read_flt,
+ .open = ipa_open_dbg,
+};
+
+const struct file_operations ipa_stats_ops = {
+ .read = ipa_read_stats,
+};
+
+const struct file_operations ipa_wstats_ops = {
+ .read = ipa_read_wstats,
+};
+
+const struct file_operations ipa_wdi_ops = {
+ .read = ipa_read_wdi,
+};
+
+const struct file_operations ipa_ntn_ops = {
+ .read = ipa_read_ntn,
+};
+
+const struct file_operations ipa_msg_ops = {
+ .read = ipa_read_msg,
+};
+
+const struct file_operations ipa_dbg_cnt_ops = {
+ .read = ipa_read_dbg_cnt,
+ .write = ipa_write_dbg_cnt,
+};
+
+const struct file_operations ipa_nat4_ops = {
+ .read = ipa_read_nat4,
+};
+
+const struct file_operations ipa_rm_stats = {
+ .read = ipa_rm_read_stats,
+};
+
+const struct file_operations ipa_status_stats_ops = {
+ .read = ipa_status_stats_read,
+};
+
+const struct file_operations ipa2_active_clients = {
+ .read = ipa2_print_active_clients_log,
+ .write = ipa2_clear_active_clients_log,
+};
+
+const struct file_operations ipa_rx_poll_time_ops = {
+ .read = ipa_read_rx_polling_timeout,
+ .write = ipa_write_rx_polling_timeout,
+};
+
+const struct file_operations ipa_poll_iteration_ops = {
+ .read = ipa_read_polling_iteration,
+ .write = ipa_write_polling_iteration,
+};
+
+void ipa_debugfs_init(void)
+{
+ const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+ const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+ S_IWUSR | S_IWGRP;
+ const mode_t write_only_mode = S_IWUSR | S_IWGRP;
+ struct dentry *file;
+
+ dent = debugfs_create_dir("ipa", 0);
+ if (IS_ERR(dent)) {
+ IPAERR("fail to create folder in debug_fs.\n");
+ return;
+ }
+
+ file = debugfs_create_u32("hw_type", read_only_mode,
+ dent, &ipa_ctx->ipa_hw_type);
+ if (!file) {
+ IPAERR("could not create hw_type file\n");
+ goto fail;
+ }
+
+
+ dfile_gen_reg = debugfs_create_file("gen_reg", read_only_mode, dent, 0,
+ &ipa_gen_reg_ops);
+ if (!dfile_gen_reg || IS_ERR(dfile_gen_reg)) {
+ IPAERR("fail to create file for debug_fs gen_reg\n");
+ goto fail;
+ }
+
+ dfile_active_clients = debugfs_create_file("active_clients",
+ read_write_mode, dent, 0, &ipa2_active_clients);
+ if (!dfile_active_clients || IS_ERR(dfile_active_clients)) {
+ IPAERR("fail to create file for debug_fs active_clients\n");
+ goto fail;
+ }
+
+ active_clients_buf = NULL;
+ active_clients_buf = kzalloc(IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE,
+ GFP_KERNEL);
+ if (active_clients_buf == NULL)
+ IPAERR("fail to allocate active clients memory buffer");
+
+ dfile_ep_reg = debugfs_create_file("ep_reg", read_write_mode, dent, 0,
+ &ipa_ep_reg_ops);
+ if (!dfile_ep_reg || IS_ERR(dfile_ep_reg)) {
+ IPAERR("fail to create file for debug_fs ep_reg\n");
+ goto fail;
+ }
+
+ dfile_keep_awake = debugfs_create_file("keep_awake", read_write_mode,
+ dent, 0, &ipa_keep_awake_ops);
+ if (!dfile_keep_awake || IS_ERR(dfile_keep_awake)) {
+ IPAERR("fail to create file for debug_fs dfile_keep_awake\n");
+ goto fail;
+ }
+
+ dfile_ep_holb = debugfs_create_file("holb", write_only_mode, dent,
+ 0, &ipa_ep_holb_ops);
+ if (!dfile_ep_holb || IS_ERR(dfile_ep_holb)) {
+ IPAERR("fail to create file for debug_fs dfile_ep_hol_en\n");
+ goto fail;
+ }
+
+ dfile_hdr = debugfs_create_file("hdr", read_only_mode, dent, 0,
+ &ipa_hdr_ops);
+ if (!dfile_hdr || IS_ERR(dfile_hdr)) {
+ IPAERR("fail to create file for debug_fs hdr\n");
+ goto fail;
+ }
+
+ dfile_proc_ctx = debugfs_create_file("proc_ctx", read_only_mode, dent,
+ 0, &ipa_proc_ctx_ops);
+ if (!dfile_hdr || IS_ERR(dfile_hdr)) {
+ IPAERR("fail to create file for debug_fs proc_ctx\n");
+ goto fail;
+ }
+
+ dfile_ip4_rt = debugfs_create_file("ip4_rt", read_only_mode, dent,
+ (void *)IPA_IP_v4, &ipa_rt_ops);
+ if (!dfile_ip4_rt || IS_ERR(dfile_ip4_rt)) {
+ IPAERR("fail to create file for debug_fs ip4 rt\n");
+ goto fail;
+ }
+
+ dfile_ip6_rt = debugfs_create_file("ip6_rt", read_only_mode, dent,
+ (void *)IPA_IP_v6, &ipa_rt_ops);
+ if (!dfile_ip6_rt || IS_ERR(dfile_ip6_rt)) {
+ IPAERR("fail to create file for debug_fs ip6:w rt\n");
+ goto fail;
+ }
+
+ dfile_ip4_flt = debugfs_create_file("ip4_flt", read_only_mode, dent,
+ (void *)IPA_IP_v4, &ipa_flt_ops);
+ if (!dfile_ip4_flt || IS_ERR(dfile_ip4_flt)) {
+ IPAERR("fail to create file for debug_fs ip4 flt\n");
+ goto fail;
+ }
+
+ dfile_ip6_flt = debugfs_create_file("ip6_flt", read_only_mode, dent,
+ (void *)IPA_IP_v6, &ipa_flt_ops);
+ if (!dfile_ip6_flt || IS_ERR(dfile_ip6_flt)) {
+ IPAERR("fail to create file for debug_fs ip6 flt\n");
+ goto fail;
+ }
+
+ dfile_stats = debugfs_create_file("stats", read_only_mode, dent, 0,
+ &ipa_stats_ops);
+ if (!dfile_stats || IS_ERR(dfile_stats)) {
+ IPAERR("fail to create file for debug_fs stats\n");
+ goto fail;
+ }
+
+ dfile_wstats = debugfs_create_file("wstats", read_only_mode,
+ dent, 0, &ipa_wstats_ops);
+ if (!dfile_wstats || IS_ERR(dfile_wstats)) {
+ IPAERR("fail to create file for debug_fs wstats\n");
+ goto fail;
+ }
+
+ dfile_wdi_stats = debugfs_create_file("wdi", read_only_mode, dent, 0,
+ &ipa_wdi_ops);
+ if (!dfile_wdi_stats || IS_ERR(dfile_wdi_stats)) {
+ IPAERR("fail to create file for debug_fs wdi stats\n");
+ goto fail;
+ }
+
+ dfile_ntn_stats = debugfs_create_file("ntn", read_only_mode, dent, 0,
+ &ipa_ntn_ops);
+ if (!dfile_ntn_stats || IS_ERR(dfile_ntn_stats)) {
+ IPAERR("fail to create file for debug_fs ntn stats\n");
+ goto fail;
+ }
+
+ dfile_dbg_cnt = debugfs_create_file("dbg_cnt", read_write_mode, dent, 0,
+ &ipa_dbg_cnt_ops);
+ if (!dfile_dbg_cnt || IS_ERR(dfile_dbg_cnt)) {
+ IPAERR("fail to create file for debug_fs dbg_cnt\n");
+ goto fail;
+ }
+
+ dfile_msg = debugfs_create_file("msg", read_only_mode, dent, 0,
+ &ipa_msg_ops);
+ if (!dfile_msg || IS_ERR(dfile_msg)) {
+ IPAERR("fail to create file for debug_fs msg\n");
+ goto fail;
+ }
+
+ dfile_ip4_nat = debugfs_create_file("ip4_nat", read_only_mode, dent,
+ 0, &ipa_nat4_ops);
+ if (!dfile_ip4_nat || IS_ERR(dfile_ip4_nat)) {
+ IPAERR("fail to create file for debug_fs ip4 nat\n");
+ goto fail;
+ }
+
+ dfile_rm_stats = debugfs_create_file("rm_stats",
+ read_only_mode, dent, 0, &ipa_rm_stats);
+ if (!dfile_rm_stats || IS_ERR(dfile_rm_stats)) {
+ IPAERR("fail to create file for debug_fs rm_stats\n");
+ goto fail;
+ }
+
+ dfile_status_stats = debugfs_create_file("status_stats",
+ read_only_mode, dent, 0, &ipa_status_stats_ops);
+ if (!dfile_status_stats || IS_ERR(dfile_status_stats)) {
+ IPAERR("fail to create file for debug_fs status_stats\n");
+ goto fail;
+ }
+
+ dfile_ipa_rx_poll_timeout = debugfs_create_file("ipa_rx_poll_time",
+ read_write_mode, dent, 0, &ipa_rx_poll_time_ops);
+ if (!dfile_ipa_rx_poll_timeout || IS_ERR(dfile_ipa_rx_poll_timeout)) {
+ IPAERR("fail to create file for debug_fs rx poll timeout\n");
+ goto fail;
+ }
+
+ dfile_ipa_poll_iteration = debugfs_create_file("ipa_poll_iteration",
+ read_write_mode, dent, 0, &ipa_poll_iteration_ops);
+ if (!dfile_ipa_poll_iteration || IS_ERR(dfile_ipa_poll_iteration)) {
+ IPAERR("fail to create file for debug_fs poll iteration\n");
+ goto fail;
+ }
+
+ file = debugfs_create_u32("enable_clock_scaling", read_write_mode,
+ dent, &ipa_ctx->enable_clock_scaling);
+ if (!file) {
+ IPAERR("could not create enable_clock_scaling file\n");
+ goto fail;
+ }
+
+ file = debugfs_create_u32("clock_scaling_bw_threshold_nominal_mbps",
+ read_write_mode, dent,
+ &ipa_ctx->ctrl->clock_scaling_bw_threshold_nominal);
+ if (!file) {
+ IPAERR("could not create bw_threshold_nominal_mbps\n");
+ goto fail;
+ }
+
+ file = debugfs_create_u32("clock_scaling_bw_threshold_turbo_mbps",
+ read_write_mode, dent,
+ &ipa_ctx->ctrl->clock_scaling_bw_threshold_turbo);
+ if (!file) {
+ IPAERR("could not create bw_threshold_turbo_mbps\n");
+ goto fail;
+ }
+
+ return;
+
+fail:
+ debugfs_remove_recursive(dent);
+}
+
+void ipa_debugfs_remove(void)
+{
+ if (IS_ERR(dent)) {
+ IPAERR("ipa_debugfs_remove: folder was not created.\n");
+ return;
+ }
+ if (active_clients_buf != NULL) {
+ kfree(active_clients_buf);
+ active_clients_buf = NULL;
+ }
+ debugfs_remove_recursive(dent);
+}
+
+#else /* !CONFIG_DEBUG_FS */
+void ipa_debugfs_init(void) {}
+void ipa_debugfs_remove(void) {}
+int _ipa_read_dbg_cnt_v1_1(char *buf, int max_len)
+{
+ return 0;
+}
+int _ipa_read_ep_reg_v1_1(char *buf, int max_len, int pipe)
+{
+ return 0;
+}
+int _ipa_read_gen_reg_v1_1(char *buff, int max_len)
+{
+ return 0;
+}
+void _ipa_write_dbg_cnt_v1_1(int option) {}
+int _ipa_read_gen_reg_v2_0(char *buff, int max_len)
+{
+ return 0;
+}
+int _ipa_read_ep_reg_v2_0(char *buf, int max_len, int pipe)
+{
+ return 0;
+}
+void _ipa_write_dbg_cnt_v2_0(int option) {}
+int _ipa_read_dbg_cnt_v2_0(char *buf, int max_len)
+{
+ return 0;
+}
+#endif
+
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c
new file mode 100644
index 0000000..bee6331
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c
@@ -0,0 +1,884 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/msm_ipa.h>
+#include <linux/mutex.h>
+#include <linux/ipa.h>
+#include "ipa_i.h"
+
+#define IPA_DMA_POLLING_MIN_SLEEP_RX 1010
+#define IPA_DMA_POLLING_MAX_SLEEP_RX 1050
+#define IPA_DMA_SYS_DESC_MAX_FIFO_SZ 0x7FF8
+#define IPA_DMA_MAX_PKT_SZ 0xFFFF
+#define IPA_DMA_MAX_PENDING_SYNC (IPA_SYS_DESC_FIFO_SZ / \
+ sizeof(struct sps_iovec) - 1)
+#define IPA_DMA_MAX_PENDING_ASYNC (IPA_DMA_SYS_DESC_MAX_FIFO_SZ / \
+ sizeof(struct sps_iovec) - 1)
+
+#define IPADMA_DRV_NAME "ipa_dma"
+
+#define IPADMA_DBG(fmt, args...) \
+ pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args)
+#define IPADMA_ERR(fmt, args...) \
+ pr_err(IPADMA_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+
+#define IPADMA_FUNC_ENTRY() \
+ IPADMA_DBG("ENTRY\n")
+
+#define IPADMA_FUNC_EXIT() \
+ IPADMA_DBG("EXIT\n")
+
+#ifdef CONFIG_DEBUG_FS
+#define IPADMA_MAX_MSG_LEN 1024
+static char dbg_buff[IPADMA_MAX_MSG_LEN];
+static void ipa_dma_debugfs_init(void);
+static void ipa_dma_debugfs_destroy(void);
+#else
+static void ipa_dma_debugfs_init(void) {}
+static void ipa_dma_debugfs_destroy(void) {}
+#endif
+
+/**
+ * struct ipa_dma_xfer_wrapper - IPADMA transfer descr wrapper
+ * @phys_addr_src: physical address of the source data to copy
+ * @phys_addr_dest: physical address to store the copied data
+ * @len: len in bytes to copy
+ * @link: linked to the wrappers list on the proper(sync/async) cons pipe
+ * @xfer_done: completion object for sync_memcpy completion
+ * @callback: IPADMA client provided completion callback
+ * @user1: cookie1 for above callback
+ *
+ * This struct can wrap both sync and async memcpy transfers descriptors.
+ */
+struct ipa_dma_xfer_wrapper {
+ u64 phys_addr_src;
+ u64 phys_addr_dest;
+ u16 len;
+ struct list_head link;
+ struct completion xfer_done;
+ void (*callback)(void *user1);
+ void *user1;
+};
+
+/**
+ * struct ipa_dma_ctx -IPADMA driver context information
+ * @is_enabled:is ipa_dma enabled?
+ * @destroy_pending: destroy ipa_dma after handling all pending memcpy
+ * @ipa_dma_xfer_wrapper_cache: cache of ipa_dma_xfer_wrapper structs
+ * @sync_lock: lock for synchronisation in sync_memcpy
+ * @async_lock: lock for synchronisation in async_memcpy
+ * @enable_lock: lock for is_enabled
+ * @pending_lock: lock for synchronize is_enable and pending_cnt
+ * @done: no pending works-ipadma can be destroyed
+ * @ipa_dma_sync_prod_hdl: handle of sync memcpy producer
+ * @ipa_dma_async_prod_hdl:handle of async memcpy producer
+ * @ipa_dma_sync_cons_hdl: handle of sync memcpy consumer
+ * @sync_memcpy_pending_cnt: number of pending sync memcopy operations
+ * @async_memcpy_pending_cnt: number of pending async memcopy operations
+ * @uc_memcpy_pending_cnt: number of pending uc memcopy operations
+ * @total_sync_memcpy: total number of sync memcpy (statistics)
+ * @total_async_memcpy: total number of async memcpy (statistics)
+ * @total_uc_memcpy: total number of uc memcpy (statistics)
+ */
+struct ipa_dma_ctx {
+ bool is_enabled;
+ bool destroy_pending;
+ struct kmem_cache *ipa_dma_xfer_wrapper_cache;
+ struct mutex sync_lock;
+ spinlock_t async_lock;
+ struct mutex enable_lock;
+ spinlock_t pending_lock;
+ struct completion done;
+ u32 ipa_dma_sync_prod_hdl;
+ u32 ipa_dma_async_prod_hdl;
+ u32 ipa_dma_sync_cons_hdl;
+ u32 ipa_dma_async_cons_hdl;
+ atomic_t sync_memcpy_pending_cnt;
+ atomic_t async_memcpy_pending_cnt;
+ atomic_t uc_memcpy_pending_cnt;
+ atomic_t total_sync_memcpy;
+ atomic_t total_async_memcpy;
+ atomic_t total_uc_memcpy;
+};
+static struct ipa_dma_ctx *ipa_dma_ctx;
+
+/**
+ * ipa2_dma_init() -Initialize IPADMA.
+ *
+ * This function initialize all IPADMA internal data and connect in dma:
+ * MEMCPY_DMA_SYNC_PROD ->MEMCPY_DMA_SYNC_CONS
+ * MEMCPY_DMA_ASYNC_PROD->MEMCPY_DMA_SYNC_CONS
+ *
+ * Return codes: 0: success
+ * -EFAULT: IPADMA is already initialized
+ * -ENOMEM: allocating memory error
+ * -EPERM: pipe connection failed
+ */
+int ipa2_dma_init(void)
+{
+ struct ipa_dma_ctx *ipa_dma_ctx_t;
+ struct ipa_sys_connect_params sys_in;
+ int res = 0;
+
+ IPADMA_FUNC_ENTRY();
+
+ if (ipa_dma_ctx) {
+ IPADMA_ERR("Already initialized.\n");
+ return -EFAULT;
+ }
+ ipa_dma_ctx_t = kzalloc(sizeof(*(ipa_dma_ctx)), GFP_KERNEL);
+
+ if (!ipa_dma_ctx_t) {
+ IPADMA_ERR("kzalloc error.\n");
+ return -ENOMEM;
+ }
+
+ ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache =
+ kmem_cache_create("IPA_DMA_XFER_WRAPPER",
+ sizeof(struct ipa_dma_xfer_wrapper), 0, 0, NULL);
+ if (!ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache) {
+ IPAERR(":failed to create ipa dma xfer wrapper cache.\n");
+ res = -ENOMEM;
+ goto fail_mem_ctrl;
+ }
+
+ mutex_init(&ipa_dma_ctx_t->enable_lock);
+ spin_lock_init(&ipa_dma_ctx_t->async_lock);
+ mutex_init(&ipa_dma_ctx_t->sync_lock);
+ spin_lock_init(&ipa_dma_ctx_t->pending_lock);
+ init_completion(&ipa_dma_ctx_t->done);
+ ipa_dma_ctx_t->is_enabled = false;
+ ipa_dma_ctx_t->destroy_pending = false;
+ atomic_set(&ipa_dma_ctx_t->async_memcpy_pending_cnt, 0);
+ atomic_set(&ipa_dma_ctx_t->sync_memcpy_pending_cnt, 0);
+ atomic_set(&ipa_dma_ctx_t->uc_memcpy_pending_cnt, 0);
+ atomic_set(&ipa_dma_ctx_t->total_async_memcpy, 0);
+ atomic_set(&ipa_dma_ctx_t->total_sync_memcpy, 0);
+ atomic_set(&ipa_dma_ctx_t->total_uc_memcpy, 0);
+
+ /* IPADMA SYNC PROD-source for sync memcpy */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_PROD;
+ sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+ sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS;
+ sys_in.skip_ep_cfg = false;
+ if (ipa2_setup_sys_pipe(&sys_in,
+ &ipa_dma_ctx_t->ipa_dma_sync_prod_hdl)) {
+ IPADMA_ERR(":setup sync prod pipe failed\n");
+ res = -EPERM;
+ goto fail_sync_prod;
+ }
+
+ /* IPADMA SYNC CONS-destination for sync memcpy */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS;
+ sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ sys_in.skip_ep_cfg = false;
+ sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ sys_in.notify = NULL;
+ sys_in.priv = NULL;
+ if (ipa2_setup_sys_pipe(&sys_in,
+ &ipa_dma_ctx_t->ipa_dma_sync_cons_hdl)) {
+ IPADMA_ERR(":setup sync cons pipe failed.\n");
+ res = -EPERM;
+ goto fail_sync_cons;
+ }
+
+ IPADMA_DBG("SYNC MEMCPY pipes are connected\n");
+
+ /* IPADMA ASYNC PROD-source for sync memcpy */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD;
+ sys_in.desc_fifo_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ;
+ sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+ sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS;
+ sys_in.skip_ep_cfg = false;
+ sys_in.notify = NULL;
+ if (ipa2_setup_sys_pipe(&sys_in,
+ &ipa_dma_ctx_t->ipa_dma_async_prod_hdl)) {
+ IPADMA_ERR(":setup async prod pipe failed.\n");
+ res = -EPERM;
+ goto fail_async_prod;
+ }
+
+ /* IPADMA ASYNC CONS-destination for sync memcpy */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS;
+ sys_in.desc_fifo_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ;
+ sys_in.skip_ep_cfg = false;
+ sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ sys_in.notify = ipa_dma_async_memcpy_notify_cb;
+ sys_in.priv = NULL;
+ if (ipa2_setup_sys_pipe(&sys_in,
+ &ipa_dma_ctx_t->ipa_dma_async_cons_hdl)) {
+ IPADMA_ERR(":setup async cons pipe failed.\n");
+ res = -EPERM;
+ goto fail_async_cons;
+ }
+ ipa_dma_debugfs_init();
+ ipa_dma_ctx = ipa_dma_ctx_t;
+ IPADMA_DBG("ASYNC MEMCPY pipes are connected\n");
+
+ IPADMA_FUNC_EXIT();
+ return res;
+fail_async_cons:
+ ipa2_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_async_prod_hdl);
+fail_async_prod:
+ ipa2_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_cons_hdl);
+fail_sync_cons:
+ ipa2_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_prod_hdl);
+fail_sync_prod:
+ kmem_cache_destroy(ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache);
+fail_mem_ctrl:
+ kfree(ipa_dma_ctx_t);
+ ipa_dma_ctx = NULL;
+ return res;
+
+}
+
+
+/**
+ * ipa2_dma_enable() -Vote for IPA clocks.
+ *
+ *Return codes: 0: success
+ * -EINVAL: IPADMA is not initialized
+ * -EPERM: Operation not permitted as ipa_dma is already
+ * enabled
+ */
+int ipa2_dma_enable(void)
+{
+ IPADMA_FUNC_ENTRY();
+ if (ipa_dma_ctx == NULL) {
+ IPADMA_ERR("IPADMA isn't initialized, can't enable\n");
+ return -EPERM;
+ }
+ mutex_lock(&ipa_dma_ctx->enable_lock);
+ if (ipa_dma_ctx->is_enabled) {
+ IPADMA_DBG("Already enabled.\n");
+ mutex_unlock(&ipa_dma_ctx->enable_lock);
+ return -EPERM;
+ }
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("DMA");
+ ipa_dma_ctx->is_enabled = true;
+ mutex_unlock(&ipa_dma_ctx->enable_lock);
+
+ IPADMA_FUNC_EXIT();
+ return 0;
+}
+
+static bool ipa_dma_work_pending(void)
+{
+ if (atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt)) {
+ IPADMA_DBG("pending sync\n");
+ return true;
+ }
+ if (atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt)) {
+ IPADMA_DBG("pending async\n");
+ return true;
+ }
+ if (atomic_read(&ipa_dma_ctx->uc_memcpy_pending_cnt)) {
+ IPADMA_DBG("pending uc\n");
+ return true;
+ }
+ IPADMA_DBG("no pending work\n");
+ return false;
+}
+
+/**
+ * ipa2_dma_disable()- Unvote for IPA clocks.
+ *
+ * enter to power save mode.
+ *
+ * Return codes: 0: success
+ * -EINVAL: IPADMA is not initialized
+ * -EPERM: Operation not permitted as ipa_dma is already
+ * diabled
+ * -EFAULT: can not disable ipa_dma as there are pending
+ * memcopy works
+ */
+int ipa2_dma_disable(void)
+{
+ unsigned long flags;
+
+ IPADMA_FUNC_ENTRY();
+ if (ipa_dma_ctx == NULL) {
+ IPADMA_ERR("IPADMA isn't initialized, can't disable\n");
+ return -EPERM;
+ }
+ mutex_lock(&ipa_dma_ctx->enable_lock);
+ spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags);
+ if (!ipa_dma_ctx->is_enabled) {
+ IPADMA_DBG("Already disabled.\n");
+ spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags);
+ mutex_unlock(&ipa_dma_ctx->enable_lock);
+ return -EPERM;
+ }
+ if (ipa_dma_work_pending()) {
+ IPADMA_ERR("There is pending work, can't disable.\n");
+ spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags);
+ mutex_unlock(&ipa_dma_ctx->enable_lock);
+ return -EFAULT;
+ }
+ ipa_dma_ctx->is_enabled = false;
+ spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags);
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("DMA");
+ mutex_unlock(&ipa_dma_ctx->enable_lock);
+ IPADMA_FUNC_EXIT();
+ return 0;
+}
+
+/**
+ * ipa2_dma_sync_memcpy()- Perform synchronous memcpy using IPA.
+ *
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Return codes: 0: success
+ * -EINVAL: invalid params
+ * -EPERM: operation not permitted as ipa_dma isn't enable or
+ * initialized
+ * -SPS_ERROR: on sps faliures
+ * -EFAULT: other
+ */
+int ipa2_dma_sync_memcpy(u64 dest, u64 src, int len)
+{
+ int ep_idx;
+ int res;
+ int i = 0;
+ struct ipa_sys_context *cons_sys;
+ struct ipa_sys_context *prod_sys;
+ struct sps_iovec iov;
+ struct ipa_dma_xfer_wrapper *xfer_descr = NULL;
+ struct ipa_dma_xfer_wrapper *head_descr = NULL;
+ unsigned long flags;
+
+ IPADMA_FUNC_ENTRY();
+
+ if (ipa_dma_ctx == NULL) {
+ IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n");
+ return -EPERM;
+ }
+ if ((max(src, dest) - min(src, dest)) < len) {
+ IPADMA_ERR("invalid addresses - overlapping buffers\n");
+ return -EINVAL;
+ }
+ if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {
+ IPADMA_ERR("invalid len, %d\n", len);
+ return -EINVAL;
+ }
+ if (((u32)src != src) || ((u32)dest != dest)) {
+ IPADMA_ERR("Bad addr - only 32b addr supported for BAM");
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags);
+ if (!ipa_dma_ctx->is_enabled) {
+ IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n");
+ spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags);
+ return -EPERM;
+ }
+ atomic_inc(&ipa_dma_ctx->sync_memcpy_pending_cnt);
+ spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags);
+ if (atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt) >=
+ IPA_DMA_MAX_PENDING_SYNC) {
+ atomic_dec(&ipa_dma_ctx->sync_memcpy_pending_cnt);
+ IPADMA_DBG("Reached pending requests limit\n");
+ return -EFAULT;
+ }
+
+ ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS);
+ if (-1 == ep_idx) {
+ IPADMA_ERR("Client %u is not mapped\n",
+ IPA_CLIENT_MEMCPY_DMA_SYNC_CONS);
+ return -EFAULT;
+ }
+ cons_sys = ipa_ctx->ep[ep_idx].sys;
+
+ ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
+ if (-1 == ep_idx) {
+ IPADMA_ERR("Client %u is not mapped\n",
+ IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
+ return -EFAULT;
+ }
+ prod_sys = ipa_ctx->ep[ep_idx].sys;
+
+ xfer_descr = kmem_cache_zalloc(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache,
+ GFP_KERNEL);
+ if (!xfer_descr) {
+ IPADMA_ERR("failed to alloc xfer descr wrapper\n");
+ res = -ENOMEM;
+ goto fail_mem_alloc;
+ }
+ xfer_descr->phys_addr_dest = dest;
+ xfer_descr->phys_addr_src = src;
+ xfer_descr->len = len;
+ init_completion(&xfer_descr->xfer_done);
+
+ mutex_lock(&ipa_dma_ctx->sync_lock);
+ list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list);
+ cons_sys->len++;
+ res = sps_transfer_one(cons_sys->ep->ep_hdl, dest, len, NULL, 0);
+ if (res) {
+ IPADMA_ERR("Failed: sps_transfer_one on dest descr\n");
+ goto fail_sps_send;
+ }
+ res = sps_transfer_one(prod_sys->ep->ep_hdl, src, len,
+ NULL, SPS_IOVEC_FLAG_EOT);
+ if (res) {
+ IPADMA_ERR("Failed: sps_transfer_one on src descr\n");
+ BUG();
+ }
+ head_descr = list_first_entry(&cons_sys->head_desc_list,
+ struct ipa_dma_xfer_wrapper, link);
+
+ /* in case we are not the head of the list, wait for head to wake us */
+ if (xfer_descr != head_descr) {
+ mutex_unlock(&ipa_dma_ctx->sync_lock);
+ wait_for_completion(&xfer_descr->xfer_done);
+ mutex_lock(&ipa_dma_ctx->sync_lock);
+ head_descr = list_first_entry(&cons_sys->head_desc_list,
+ struct ipa_dma_xfer_wrapper, link);
+ BUG_ON(xfer_descr != head_descr);
+ }
+ mutex_unlock(&ipa_dma_ctx->sync_lock);
+
+ do {
+ /* wait for transfer to complete */
+ res = sps_get_iovec(cons_sys->ep->ep_hdl, &iov);
+ if (res)
+ IPADMA_ERR("Failed: get_iovec, returned %d loop#:%d\n"
+ , res, i);
+
+ usleep_range(IPA_DMA_POLLING_MIN_SLEEP_RX,
+ IPA_DMA_POLLING_MAX_SLEEP_RX);
+ i++;
+ } while (iov.addr == 0);
+
+ mutex_lock(&ipa_dma_ctx->sync_lock);
+ list_del(&head_descr->link);
+ cons_sys->len--;
+ kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr);
+ /* wake the head of the list */
+ if (!list_empty(&cons_sys->head_desc_list)) {
+ head_descr = list_first_entry(&cons_sys->head_desc_list,
+ struct ipa_dma_xfer_wrapper, link);
+ complete(&head_descr->xfer_done);
+ }
+ mutex_unlock(&ipa_dma_ctx->sync_lock);
+
+ BUG_ON(dest != iov.addr);
+ BUG_ON(len != iov.size);
+ atomic_inc(&ipa_dma_ctx->total_sync_memcpy);
+ atomic_dec(&ipa_dma_ctx->sync_memcpy_pending_cnt);
+ if (ipa_dma_ctx->destroy_pending && !ipa_dma_work_pending())
+ complete(&ipa_dma_ctx->done);
+
+ IPADMA_FUNC_EXIT();
+ return res;
+
+fail_sps_send:
+ list_del(&xfer_descr->link);
+ cons_sys->len--;
+ mutex_unlock(&ipa_dma_ctx->sync_lock);
+ kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr);
+fail_mem_alloc:
+ atomic_dec(&ipa_dma_ctx->sync_memcpy_pending_cnt);
+ if (ipa_dma_ctx->destroy_pending && !ipa_dma_work_pending())
+ complete(&ipa_dma_ctx->done);
+ return res;
+}
+
+/**
+ * ipa2_dma_async_memcpy()- Perform asynchronous memcpy using IPA.
+ *
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ * @user_cb: callback function to notify the client when the copy was done.
+ * @user_param: cookie for user_cb.
+ *
+ * Return codes: 0: success
+ * -EINVAL: invalid params
+ * -EPERM: operation not permitted as ipa_dma isn't enable or
+ * initialized
+ * -SPS_ERROR: on sps faliures
+ * -EFAULT: descr fifo is full.
+ */
+int ipa2_dma_async_memcpy(u64 dest, u64 src, int len,
+ void (*user_cb)(void *user1), void *user_param)
+{
+ int ep_idx;
+ int res = 0;
+ struct ipa_dma_xfer_wrapper *xfer_descr = NULL;
+ struct ipa_sys_context *prod_sys;
+ struct ipa_sys_context *cons_sys;
+ unsigned long flags;
+
+ IPADMA_FUNC_ENTRY();
+ if (ipa_dma_ctx == NULL) {
+ IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n");
+ return -EPERM;
+ }
+ if ((max(src, dest) - min(src, dest)) < len) {
+ IPADMA_ERR("invalid addresses - overlapping buffers\n");
+ return -EINVAL;
+ }
+ if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {
+ IPADMA_ERR("invalid len, %d\n", len);
+ return -EINVAL;
+ }
+ if (((u32)src != src) || ((u32)dest != dest)) {
+ IPADMA_ERR("Bad addr - only 32b addr supported for BAM");
+ return -EINVAL;
+ }
+ if (!user_cb) {
+ IPADMA_ERR("null pointer: user_cb\n");
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags);
+ if (!ipa_dma_ctx->is_enabled) {
+ IPADMA_ERR("can't memcpy, IPA_DMA isn't enabled\n");
+ spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags);
+ return -EPERM;
+ }
+ atomic_inc(&ipa_dma_ctx->async_memcpy_pending_cnt);
+ spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags);
+ if (atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt) >=
+ IPA_DMA_MAX_PENDING_ASYNC) {
+ atomic_dec(&ipa_dma_ctx->async_memcpy_pending_cnt);
+ IPADMA_DBG("Reached pending requests limit\n");
+ return -EFAULT;
+ }
+
+ ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+ if (-1 == ep_idx) {
+ IPADMA_ERR("Client %u is not mapped\n",
+ IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+ return -EFAULT;
+ }
+ cons_sys = ipa_ctx->ep[ep_idx].sys;
+
+ ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD);
+ if (-1 == ep_idx) {
+ IPADMA_ERR("Client %u is not mapped\n",
+ IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
+ return -EFAULT;
+ }
+ prod_sys = ipa_ctx->ep[ep_idx].sys;
+
+ xfer_descr = kmem_cache_zalloc(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache,
+ GFP_KERNEL);
+ if (!xfer_descr) {
+ IPADMA_ERR("failed to alloc xfrer descr wrapper\n");
+ res = -ENOMEM;
+ goto fail_mem_alloc;
+ }
+ xfer_descr->phys_addr_dest = dest;
+ xfer_descr->phys_addr_src = src;
+ xfer_descr->len = len;
+ xfer_descr->callback = user_cb;
+ xfer_descr->user1 = user_param;
+
+ spin_lock_irqsave(&ipa_dma_ctx->async_lock, flags);
+ list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list);
+ cons_sys->len++;
+ res = sps_transfer_one(cons_sys->ep->ep_hdl, dest, len, xfer_descr, 0);
+ if (res) {
+ IPADMA_ERR("Failed: sps_transfer_one on dest descr\n");
+ goto fail_sps_send;
+ }
+ res = sps_transfer_one(prod_sys->ep->ep_hdl, src, len,
+ NULL, SPS_IOVEC_FLAG_EOT);
+ if (res) {
+ IPADMA_ERR("Failed: sps_transfer_one on src descr\n");
+ BUG();
+ goto fail_sps_send;
+ }
+ spin_unlock_irqrestore(&ipa_dma_ctx->async_lock, flags);
+ IPADMA_FUNC_EXIT();
+ return res;
+
+fail_sps_send:
+ list_del(&xfer_descr->link);
+ spin_unlock_irqrestore(&ipa_dma_ctx->async_lock, flags);
+ kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr);
+fail_mem_alloc:
+ atomic_dec(&ipa_dma_ctx->async_memcpy_pending_cnt);
+ if (ipa_dma_ctx->destroy_pending && !ipa_dma_work_pending())
+ complete(&ipa_dma_ctx->done);
+ return res;
+}
+
+/**
+ * ipa2_dma_uc_memcpy() - Perform a memcpy action using IPA uC
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Return codes: 0: success
+ * -EINVAL: invalid params
+ * -EPERM: operation not permitted as ipa_dma isn't enable or
+ * initialized
+ * -EBADF: IPA uC is not loaded
+ */
+int ipa2_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
+{
+ int res;
+ unsigned long flags;
+
+ IPADMA_FUNC_ENTRY();
+ if (ipa_dma_ctx == NULL) {
+ IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n");
+ return -EPERM;
+ }
+ if ((max(src, dest) - min(src, dest)) < len) {
+ IPADMA_ERR("invalid addresses - overlapping buffers\n");
+ return -EINVAL;
+ }
+ if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {
+ IPADMA_ERR("invalid len, %d\n", len);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags);
+ if (!ipa_dma_ctx->is_enabled) {
+ IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n");
+ spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags);
+ return -EPERM;
+ }
+ atomic_inc(&ipa_dma_ctx->uc_memcpy_pending_cnt);
+ spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags);
+
+ res = ipa_uc_memcpy(dest, src, len);
+ if (res) {
+ IPADMA_ERR("ipa_uc_memcpy failed %d\n", res);
+ goto dec_and_exit;
+ }
+
+ atomic_inc(&ipa_dma_ctx->total_uc_memcpy);
+ res = 0;
+dec_and_exit:
+ atomic_dec(&ipa_dma_ctx->uc_memcpy_pending_cnt);
+ if (ipa_dma_ctx->destroy_pending && !ipa_dma_work_pending())
+ complete(&ipa_dma_ctx->done);
+ IPADMA_FUNC_EXIT();
+ return res;
+}
+
+/**
+ * ipa2_dma_destroy() -teardown IPADMA pipes and release ipadma.
+ *
+ * this is a blocking function, returns just after destroying IPADMA.
+ */
+void ipa2_dma_destroy(void)
+{
+ int res = 0;
+
+ IPADMA_FUNC_ENTRY();
+ if (!ipa_dma_ctx) {
+ IPADMA_DBG("IPADMA isn't initialized\n");
+ return;
+ }
+
+ if (ipa_dma_work_pending()) {
+ ipa_dma_ctx->destroy_pending = true;
+ IPADMA_DBG("There are pending memcpy, wait for completion\n");
+ wait_for_completion(&ipa_dma_ctx->done);
+ }
+
+ res = ipa2_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_async_cons_hdl);
+ if (res)
+ IPADMA_ERR("teardown IPADMA ASYNC CONS failed\n");
+ ipa_dma_ctx->ipa_dma_async_cons_hdl = 0;
+ res = ipa2_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_sync_cons_hdl);
+ if (res)
+ IPADMA_ERR("teardown IPADMA SYNC CONS failed\n");
+ ipa_dma_ctx->ipa_dma_sync_cons_hdl = 0;
+ res = ipa2_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_async_prod_hdl);
+ if (res)
+ IPADMA_ERR("teardown IPADMA ASYNC PROD failed\n");
+ ipa_dma_ctx->ipa_dma_async_prod_hdl = 0;
+ res = ipa2_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_sync_prod_hdl);
+ if (res)
+ IPADMA_ERR("teardown IPADMA SYNC PROD failed\n");
+ ipa_dma_ctx->ipa_dma_sync_prod_hdl = 0;
+
+ ipa_dma_debugfs_destroy();
+ kmem_cache_destroy(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache);
+ kfree(ipa_dma_ctx);
+ ipa_dma_ctx = NULL;
+
+ IPADMA_FUNC_EXIT();
+}
+
+/**
+ * ipa_dma_async_memcpy_notify_cb() -Callback function which will be called by
+ * IPA driver after getting notify from SPS driver or poll mode on Rx operation
+ * is completed (data was written to dest descriptor on async_cons ep).
+ *
+ * @priv -not in use.
+ * @evt - event name - IPA_RECIVE.
+ * @data -the iovec.
+ */
+void ipa_dma_async_memcpy_notify_cb(void *priv
+ , enum ipa_dp_evt_type evt, unsigned long data)
+{
+ int ep_idx = 0;
+ struct sps_iovec *iov = (struct sps_iovec *) data;
+ struct ipa_dma_xfer_wrapper *xfer_descr_expected;
+ struct ipa_sys_context *sys;
+ unsigned long flags;
+
+ IPADMA_FUNC_ENTRY();
+
+ ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+ sys = ipa_ctx->ep[ep_idx].sys;
+
+ spin_lock_irqsave(&ipa_dma_ctx->async_lock, flags);
+ xfer_descr_expected = list_first_entry(&sys->head_desc_list,
+ struct ipa_dma_xfer_wrapper, link);
+ list_del(&xfer_descr_expected->link);
+ sys->len--;
+ spin_unlock_irqrestore(&ipa_dma_ctx->async_lock, flags);
+
+ BUG_ON(xfer_descr_expected->phys_addr_dest != iov->addr);
+ BUG_ON(xfer_descr_expected->len != iov->size);
+
+ atomic_inc(&ipa_dma_ctx->total_async_memcpy);
+ atomic_dec(&ipa_dma_ctx->async_memcpy_pending_cnt);
+ xfer_descr_expected->callback(xfer_descr_expected->user1);
+
+ kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache,
+ xfer_descr_expected);
+
+ if (ipa_dma_ctx->destroy_pending && !ipa_dma_work_pending())
+ complete(&ipa_dma_ctx->done);
+
+ IPADMA_FUNC_EXIT();
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *dent;
+static struct dentry *dfile_info;
+
+static ssize_t ipa_dma_debugfs_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes = 0;
+
+ if (!ipa_dma_ctx) {
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPADMA_MAX_MSG_LEN - nbytes,
+ "Not initialized\n");
+ } else {
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPADMA_MAX_MSG_LEN - nbytes,
+ "Status:\n IPADMA is %s\n",
+ (ipa_dma_ctx->is_enabled) ? "Enabled" : "Disabled");
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPADMA_MAX_MSG_LEN - nbytes,
+ "Statistics:\n total sync memcpy: %d\n ",
+ atomic_read(&ipa_dma_ctx->total_sync_memcpy));
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPADMA_MAX_MSG_LEN - nbytes,
+ "total async memcpy: %d\n ",
+ atomic_read(&ipa_dma_ctx->total_async_memcpy));
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPADMA_MAX_MSG_LEN - nbytes,
+ "pending sync memcpy jobs: %d\n ",
+ atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt));
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPADMA_MAX_MSG_LEN - nbytes,
+ "pending async memcpy jobs: %d\n",
+ atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt));
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPADMA_MAX_MSG_LEN - nbytes,
+ "pending uc memcpy jobs: %d\n",
+ atomic_read(&ipa_dma_ctx->uc_memcpy_pending_cnt));
+ }
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_dma_debugfs_reset_statistics(struct file *file,
+ const char __user *ubuf,
+ size_t count,
+ loff_t *ppos)
+{
+ unsigned long missing;
+ s8 in_num = 0;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, ubuf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+ if (kstrtos8(dbg_buff, 0, &in_num))
+ return -EFAULT;
+ switch (in_num) {
+ case 0:
+ if (ipa_dma_work_pending())
+ IPADMA_DBG("Note, there are pending memcpy\n");
+
+ atomic_set(&ipa_dma_ctx->total_async_memcpy, 0);
+ atomic_set(&ipa_dma_ctx->total_sync_memcpy, 0);
+ break;
+ default:
+ IPADMA_ERR("invalid argument: To reset statistics echo 0\n");
+ break;
+ }
+ return count;
+}
+
+const struct file_operations ipadma_stats_ops = {
+ .read = ipa_dma_debugfs_read,
+ .write = ipa_dma_debugfs_reset_statistics,
+};
+
+static void ipa_dma_debugfs_init(void)
+{
+ const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+ S_IWUSR | S_IWGRP | S_IWOTH;
+
+ dent = debugfs_create_dir("ipa_dma", 0);
+ if (IS_ERR(dent)) {
+ IPADMA_ERR("fail to create folder ipa_dma\n");
+ return;
+ }
+
+ dfile_info =
+ debugfs_create_file("info", read_write_mode, dent,
+ 0, &ipadma_stats_ops);
+ if (!dfile_info || IS_ERR(dfile_info)) {
+ IPADMA_ERR("fail to create file stats\n");
+ goto fail;
+ }
+ return;
+fail:
+ debugfs_remove_recursive(dent);
+}
+
+static void ipa_dma_debugfs_destroy(void)
+{
+ debugfs_remove_recursive(dent);
+}
+
+#endif /* !CONFIG_DEBUG_FS */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
new file mode 100644
index 0000000..02e4a76
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -0,0 +1,3711 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include "ipa_i.h"
+#include "ipa_trace.h"
+
+#define IPA_LAST_DESC_CNT 0xFFFF
+#define POLLING_INACTIVITY_RX 40
+#define POLLING_INACTIVITY_TX 40
+#define POLLING_MIN_SLEEP_TX 400
+#define POLLING_MAX_SLEEP_TX 500
+/* 8K less 1 nominal MTU (1500 bytes) rounded to units of KB */
+#define IPA_MTU 1500
+#define IPA_GENERIC_AGGR_BYTE_LIMIT 6
+#define IPA_GENERIC_AGGR_TIME_LIMIT 1
+#define IPA_GENERIC_AGGR_PKT_LIMIT 0
+
+#define IPA_GENERIC_RX_BUFF_BASE_SZ 8192
+#define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\
+ (X) + NET_SKB_PAD) +\
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define IPA_GENERIC_RX_BUFF_SZ(X) ((X) -\
+ (IPA_REAL_GENERIC_RX_BUFF_SZ(X) - (X)))
+#define IPA_GENERIC_RX_BUFF_LIMIT (\
+ IPA_REAL_GENERIC_RX_BUFF_SZ(\
+ IPA_GENERIC_RX_BUFF_BASE_SZ) -\
+ IPA_GENERIC_RX_BUFF_BASE_SZ)
+
+#define IPA_RX_BUFF_CLIENT_HEADROOM 256
+
+/* less 1 nominal MTU (1500 bytes) rounded to units of KB */
+#define IPA_ADJUST_AGGR_BYTE_LIMIT(X) (((X) - IPA_MTU)/1000)
+
+#define IPA_WLAN_RX_POOL_SZ 100
+#define IPA_WLAN_RX_POOL_SZ_LOW_WM 5
+#define IPA_WLAN_RX_BUFF_SZ 2048
+#define IPA_WLAN_COMM_RX_POOL_LOW 100
+#define IPA_WLAN_COMM_RX_POOL_HIGH 900
+
+#define IPA_ODU_RX_BUFF_SZ 2048
+#define IPA_ODU_RX_POOL_SZ 32
+#define IPA_SIZE_DL_CSUM_META_TRAILER 8
+
+#define IPA_HEADROOM 128
+
+static struct sk_buff *ipa_get_skb_ipa_rx(unsigned int len, gfp_t flags);
+static void ipa_replenish_wlan_rx_cache(struct ipa_sys_context *sys);
+static void ipa_replenish_rx_cache(struct ipa_sys_context *sys);
+static void replenish_rx_work_func(struct work_struct *work);
+static void ipa_wq_handle_rx(struct work_struct *work);
+static void ipa_wq_handle_tx(struct work_struct *work);
+static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size);
+static void ipa_wlan_wq_rx_common(struct ipa_sys_context *sys,
+ u32 size);
+static int ipa_assign_policy(struct ipa_sys_connect_params *in,
+ struct ipa_sys_context *sys);
+static void ipa_cleanup_rx(struct ipa_sys_context *sys);
+static void ipa_wq_rx_avail(struct work_struct *work);
+static void ipa_alloc_wlan_rx_common_cache(u32 size);
+static void ipa_cleanup_wlan_rx_common_cache(void);
+static void ipa_wq_repl_rx(struct work_struct *work);
+static void ipa_dma_memcpy_notify(struct ipa_sys_context *sys,
+ struct sps_iovec *iovec);
+
+static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit);
+static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys);
+
+static void ipa_wq_write_done_common(struct ipa_sys_context *sys, u32 cnt)
+{
+ struct ipa_tx_pkt_wrapper *tx_pkt_expected;
+ int i;
+
+ for (i = 0; i < cnt; i++) {
+ spin_lock_bh(&sys->spinlock);
+ if (unlikely(list_empty(&sys->head_desc_list))) {
+ spin_unlock_bh(&sys->spinlock);
+ return;
+ }
+ tx_pkt_expected = list_first_entry(&sys->head_desc_list,
+ struct ipa_tx_pkt_wrapper,
+ link);
+ list_del(&tx_pkt_expected->link);
+ sys->len--;
+ spin_unlock_bh(&sys->spinlock);
+ if (!tx_pkt_expected->no_unmap_dma) {
+ if (tx_pkt_expected->type != IPA_DATA_DESC_SKB_PAGED) {
+ dma_unmap_single(ipa_ctx->pdev,
+ tx_pkt_expected->mem.phys_base,
+ tx_pkt_expected->mem.size,
+ DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(ipa_ctx->pdev,
+ tx_pkt_expected->mem.phys_base,
+ tx_pkt_expected->mem.size,
+ DMA_TO_DEVICE);
+ }
+ }
+ if (tx_pkt_expected->callback)
+ tx_pkt_expected->callback(tx_pkt_expected->user1,
+ tx_pkt_expected->user2);
+ if (tx_pkt_expected->cnt > 1 &&
+ tx_pkt_expected->cnt != IPA_LAST_DESC_CNT) {
+ if (tx_pkt_expected->cnt == IPA_NUM_DESC_PER_SW_TX) {
+ dma_pool_free(ipa_ctx->dma_pool,
+ tx_pkt_expected->mult.base,
+ tx_pkt_expected->mult.phys_base);
+ } else {
+ dma_unmap_single(ipa_ctx->pdev,
+ tx_pkt_expected->mult.phys_base,
+ tx_pkt_expected->mult.size,
+ DMA_TO_DEVICE);
+ kfree(tx_pkt_expected->mult.base);
+ }
+ }
+ kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt_expected);
+ }
+}
+
+static void ipa_wq_write_done_status(int src_pipe)
+{
+ struct ipa_tx_pkt_wrapper *tx_pkt_expected;
+ struct ipa_sys_context *sys;
+ u32 cnt;
+
+ WARN_ON(src_pipe >= ipa_ctx->ipa_num_pipes);
+
+ if (!ipa_ctx->ep[src_pipe].status.status_en)
+ return;
+
+ sys = ipa_ctx->ep[src_pipe].sys;
+ if (!sys)
+ return;
+
+ spin_lock_bh(&sys->spinlock);
+ if (unlikely(list_empty(&sys->head_desc_list))) {
+ spin_unlock_bh(&sys->spinlock);
+ return;
+ }
+ tx_pkt_expected = list_first_entry(&sys->head_desc_list,
+ struct ipa_tx_pkt_wrapper,
+ link);
+ cnt = tx_pkt_expected->cnt;
+ spin_unlock_bh(&sys->spinlock);
+ ipa_wq_write_done_common(sys, cnt);
+}
+
+/**
+ * ipa_write_done() - this function will be (eventually) called when a Tx
+ * operation is complete
+ * * @work: work_struct used by the work queue
+ *
+ * Will be called in deferred context.
+ * - invoke the callback supplied by the client who sent this command
+ * - iterate over all packets and validate that
+ * the order for sent packet is the same as expected
+ * - delete all the tx packet descriptors from the system
+ * pipe context (not needed anymore)
+ * - return the tx buffer back to dma_pool
+ */
+static void ipa_wq_write_done(struct work_struct *work)
+{
+ struct ipa_tx_pkt_wrapper *tx_pkt;
+ u32 cnt;
+ struct ipa_sys_context *sys;
+
+ tx_pkt = container_of(work, struct ipa_tx_pkt_wrapper, work);
+ cnt = tx_pkt->cnt;
+ sys = tx_pkt->sys;
+
+ ipa_wq_write_done_common(sys, cnt);
+}
+
+static int ipa_handle_tx_core(struct ipa_sys_context *sys, bool process_all,
+ bool in_poll_state)
+{
+ struct sps_iovec iov;
+ int ret;
+ int cnt = 0;
+
+ while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
+ !atomic_read(&sys->curr_polling_state))) {
+ if (cnt && !process_all)
+ break;
+ ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+ if (ret) {
+ IPAERR("sps_get_iovec failed %d\n", ret);
+ break;
+ }
+
+ if (iov.addr == 0)
+ break;
+
+ ipa_wq_write_done_common(sys, 1);
+ cnt++;
+ };
+
+ return cnt;
+}
+
+/**
+ * ipa_tx_switch_to_intr_mode() - Operate the Tx data path in interrupt mode
+ */
+static void ipa_tx_switch_to_intr_mode(struct ipa_sys_context *sys)
+{
+ int ret;
+
+ if (!atomic_read(&sys->curr_polling_state)) {
+ IPAERR("already in intr mode\n");
+ goto fail;
+ }
+
+ ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ goto fail;
+ }
+ sys->event.options = SPS_O_EOT;
+ ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
+ if (ret) {
+ IPAERR("sps_register_event() failed %d\n", ret);
+ goto fail;
+ }
+ sys->ep->connect.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+ ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ goto fail;
+ }
+ atomic_set(&sys->curr_polling_state, 0);
+ ipa_handle_tx_core(sys, true, false);
+ return;
+
+fail:
+ queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
+ msecs_to_jiffies(1));
+}
+
+static void ipa_handle_tx(struct ipa_sys_context *sys)
+{
+ int inactive_cycles = 0;
+ int cnt;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ do {
+ cnt = ipa_handle_tx_core(sys, true, true);
+ if (cnt == 0) {
+ inactive_cycles++;
+ usleep_range(POLLING_MIN_SLEEP_TX,
+ POLLING_MAX_SLEEP_TX);
+ } else {
+ inactive_cycles = 0;
+ }
+ } while (inactive_cycles <= POLLING_INACTIVITY_TX);
+
+ ipa_tx_switch_to_intr_mode(sys);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+static void ipa_wq_handle_tx(struct work_struct *work)
+{
+ struct ipa_sys_context *sys;
+
+ sys = container_of(work, struct ipa_sys_context, work);
+
+ ipa_handle_tx(sys);
+}
+
+/**
+ * ipa_send_one() - Send a single descriptor
+ * @sys: system pipe context
+ * @desc: descriptor to send
+ * @in_atomic: whether caller is in atomic context
+ *
+ * - Allocate tx_packet wrapper
+ * - transfer data to the IPA
+ * - after the transfer was done the SPS will
+ * notify the sending user via ipa_sps_irq_comp_tx()
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
+ bool in_atomic)
+{
+ struct ipa_tx_pkt_wrapper *tx_pkt;
+ int result;
+ u16 sps_flags = SPS_IOVEC_FLAG_EOT;
+ dma_addr_t dma_address;
+ u16 len;
+ u32 mem_flag = GFP_ATOMIC;
+ struct sps_iovec iov;
+ int ret;
+
+ if (unlikely(!in_atomic))
+ mem_flag = GFP_KERNEL;
+
+ tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache, mem_flag);
+ if (!tx_pkt) {
+ IPAERR("failed to alloc tx wrapper\n");
+ goto fail_mem_alloc;
+ }
+
+ if (!desc->dma_address_valid) {
+ dma_address = dma_map_single(ipa_ctx->pdev, desc->pyld,
+ desc->len, DMA_TO_DEVICE);
+ } else {
+ dma_address = desc->dma_address;
+ tx_pkt->no_unmap_dma = true;
+ }
+ if (!dma_address) {
+ IPAERR("failed to DMA wrap\n");
+ goto fail_dma_map;
+ }
+
+ INIT_LIST_HEAD(&tx_pkt->link);
+ tx_pkt->type = desc->type;
+ tx_pkt->cnt = 1; /* only 1 desc in this "set" */
+
+ tx_pkt->mem.phys_base = dma_address;
+ tx_pkt->mem.base = desc->pyld;
+ tx_pkt->mem.size = desc->len;
+ tx_pkt->sys = sys;
+ tx_pkt->callback = desc->callback;
+ tx_pkt->user1 = desc->user1;
+ tx_pkt->user2 = desc->user2;
+
+ /*
+ * Special treatment for immediate commands, where the structure of the
+ * descriptor is different
+ */
+ if (desc->type == IPA_IMM_CMD_DESC) {
+ sps_flags |= SPS_IOVEC_FLAG_IMME;
+ len = desc->opcode;
+ IPADBG("sending cmd=%d pyld_len=%d sps_flags=%x\n",
+ desc->opcode, desc->len, sps_flags);
+ IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
+ } else {
+ len = desc->len;
+ }
+
+ INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
+
+ spin_lock_bh(&sys->spinlock);
+ list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+ if (sys->policy == IPA_POLICY_NOINTR_MODE) {
+ do {
+ ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+ if (ret) {
+ IPADBG("sps_get_iovec failed %d\n", ret);
+ break;
+ }
+ if ((iov.addr == 0x0) && (iov.size == 0x0))
+ break;
+ } while (1);
+ }
+ result = sps_transfer_one(sys->ep->ep_hdl, dma_address, len, tx_pkt,
+ sps_flags);
+ if (result) {
+ IPAERR("sps_transfer_one failed rc=%d\n", result);
+ goto fail_sps_send;
+ }
+
+ spin_unlock_bh(&sys->spinlock);
+
+ return 0;
+
+fail_sps_send:
+ list_del(&tx_pkt->link);
+ spin_unlock_bh(&sys->spinlock);
+ dma_unmap_single(ipa_ctx->pdev, dma_address, desc->len, DMA_TO_DEVICE);
+fail_dma_map:
+ kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+fail_mem_alloc:
+ return -EFAULT;
+}
+
+/**
+ * ipa_send() - Send multiple descriptors in one HW transaction
+ * @sys: system pipe context
+ * @num_desc: number of packets
+ * @desc: packets to send (may be immediate command or data)
+ * @in_atomic: whether caller is in atomic context
+ *
+ * This function is used for system-to-bam connection.
+ * - SPS driver expect struct sps_transfer which will contain all the data
+ * for a transaction
+ * - ipa_tx_pkt_wrapper will be used for each ipa
+ * descriptor (allocated from wrappers cache)
+ * - The wrapper struct will be configured for each ipa-desc payload and will
+ * contain information which will be later used by the user callbacks
+ * - each transfer will be made by calling to sps_transfer()
+ * - Each packet (command or data) that will be sent will also be saved in
+ * ipa_sys_context for later check that all data was sent
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
+ bool in_atomic)
+{
+ struct ipa_tx_pkt_wrapper *tx_pkt;
+ struct ipa_tx_pkt_wrapper *next_pkt;
+ struct sps_transfer transfer = { 0 };
+ struct sps_iovec *iovec;
+ dma_addr_t dma_addr;
+ int i = 0;
+ int j;
+ int result;
+ int fail_dma_wrap = 0;
+ uint size = num_desc * sizeof(struct sps_iovec);
+ u32 mem_flag = GFP_ATOMIC;
+ struct sps_iovec iov;
+ int ret;
+
+ if (unlikely(!in_atomic))
+ mem_flag = GFP_KERNEL;
+
+ if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
+ transfer.iovec = dma_pool_alloc(ipa_ctx->dma_pool, mem_flag,
+ &dma_addr);
+ if (!transfer.iovec) {
+ IPAERR("fail to alloc dma mem for sps xfr buff\n");
+ return -EFAULT;
+ }
+ } else {
+ transfer.iovec = kmalloc(size, mem_flag);
+ if (!transfer.iovec) {
+ IPAERR("fail to alloc mem for sps xfr buff ");
+ IPAERR("num_desc = %d size = %d\n", num_desc, size);
+ return -EFAULT;
+ }
+ dma_addr = dma_map_single(ipa_ctx->pdev,
+ transfer.iovec, size, DMA_TO_DEVICE);
+ if (!dma_addr) {
+ IPAERR("dma_map_single failed for sps xfr buff\n");
+ kfree(transfer.iovec);
+ return -EFAULT;
+ }
+ }
+
+ transfer.iovec_phys = dma_addr;
+ transfer.iovec_count = num_desc;
+ spin_lock_bh(&sys->spinlock);
+
+ for (i = 0; i < num_desc; i++) {
+ fail_dma_wrap = 0;
+ tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache,
+ mem_flag);
+ if (!tx_pkt) {
+ IPAERR("failed to alloc tx wrapper\n");
+ goto failure;
+ }
+ /*
+ * first desc of set is "special" as it holds the count and
+ * other info
+ */
+ if (i == 0) {
+ transfer.user = tx_pkt;
+ tx_pkt->mult.phys_base = dma_addr;
+ tx_pkt->mult.base = transfer.iovec;
+ tx_pkt->mult.size = size;
+ tx_pkt->cnt = num_desc;
+ INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
+ }
+
+ iovec = &transfer.iovec[i];
+ iovec->flags = 0;
+
+ INIT_LIST_HEAD(&tx_pkt->link);
+ tx_pkt->type = desc[i].type;
+
+ if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) {
+ tx_pkt->mem.base = desc[i].pyld;
+ tx_pkt->mem.size = desc[i].len;
+
+ if (!desc[i].dma_address_valid) {
+ tx_pkt->mem.phys_base =
+ dma_map_single(ipa_ctx->pdev,
+ tx_pkt->mem.base,
+ tx_pkt->mem.size,
+ DMA_TO_DEVICE);
+ } else {
+ tx_pkt->mem.phys_base = desc[i].dma_address;
+ tx_pkt->no_unmap_dma = true;
+ }
+ } else {
+ tx_pkt->mem.base = desc[i].frag;
+ tx_pkt->mem.size = desc[i].len;
+
+ if (!desc[i].dma_address_valid) {
+ tx_pkt->mem.phys_base =
+ skb_frag_dma_map(ipa_ctx->pdev,
+ desc[i].frag,
+ 0, tx_pkt->mem.size,
+ DMA_TO_DEVICE);
+ } else {
+ tx_pkt->mem.phys_base = desc[i].dma_address;
+ tx_pkt->no_unmap_dma = true;
+ }
+ }
+
+ if (!tx_pkt->mem.phys_base) {
+ IPAERR("failed to alloc tx wrapper\n");
+ fail_dma_wrap = 1;
+ goto failure;
+ }
+
+ tx_pkt->sys = sys;
+ tx_pkt->callback = desc[i].callback;
+ tx_pkt->user1 = desc[i].user1;
+ tx_pkt->user2 = desc[i].user2;
+
+ /*
+ * Point the iovec to the buffer and
+ * add this packet to system pipe context.
+ */
+ iovec->addr = tx_pkt->mem.phys_base;
+ list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+
+ /*
+ * Special treatment for immediate commands, where the structure
+ * of the descriptor is different
+ */
+ if (desc[i].type == IPA_IMM_CMD_DESC) {
+ iovec->size = desc[i].opcode;
+ iovec->flags |= SPS_IOVEC_FLAG_IMME;
+ IPA_DUMP_BUFF(desc[i].pyld,
+ tx_pkt->mem.phys_base, desc[i].len);
+ } else {
+ iovec->size = desc[i].len;
+ }
+
+ if (i == (num_desc - 1)) {
+ iovec->flags |= SPS_IOVEC_FLAG_EOT;
+ /* "mark" the last desc */
+ tx_pkt->cnt = IPA_LAST_DESC_CNT;
+ }
+ }
+
+ if (sys->policy == IPA_POLICY_NOINTR_MODE) {
+ do {
+ ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+ if (ret) {
+ IPADBG("sps_get_iovec failed %d\n", ret);
+ break;
+ }
+ if ((iov.addr == 0x0) && (iov.size == 0x0))
+ break;
+ } while (1);
+ }
+ result = sps_transfer(sys->ep->ep_hdl, &transfer);
+ if (result) {
+ IPAERR("sps_transfer failed rc=%d\n", result);
+ goto failure;
+ }
+
+ spin_unlock_bh(&sys->spinlock);
+ return 0;
+
+failure:
+ tx_pkt = transfer.user;
+ for (j = 0; j < i; j++) {
+ next_pkt = list_next_entry(tx_pkt, link);
+ list_del(&tx_pkt->link);
+ if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
+ dma_unmap_single(ipa_ctx->pdev, tx_pkt->mem.phys_base,
+ tx_pkt->mem.size,
+ DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(ipa_ctx->pdev, tx_pkt->mem.phys_base,
+ tx_pkt->mem.size,
+ DMA_TO_DEVICE);
+ }
+ kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+ tx_pkt = next_pkt;
+ }
+ if (j < num_desc)
+ /* last desc failed */
+ if (fail_dma_wrap)
+ kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+ if (transfer.iovec_phys) {
+ if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
+ dma_pool_free(ipa_ctx->dma_pool, transfer.iovec,
+ transfer.iovec_phys);
+ } else {
+ dma_unmap_single(ipa_ctx->pdev, transfer.iovec_phys,
+ size, DMA_TO_DEVICE);
+ kfree(transfer.iovec);
+ }
+ }
+ spin_unlock_bh(&sys->spinlock);
+ return -EFAULT;
+}
+
+/**
+ * ipa_sps_irq_cmd_ack - callback function which will be called by SPS driver
+ * after an immediate command is complete.
+ * @user1: pointer to the descriptor of the transfer
+ * @user2:
+ *
+ * Complete the immediate commands completion object, this will release the
+ * thread which waits on this completion object (ipa_send_cmd())
+ */
+static void ipa_sps_irq_cmd_ack(void *user1, int user2)
+{
+ struct ipa_desc *desc = (struct ipa_desc *)user1;
+
+ if (!desc) {
+ IPAERR("desc is NULL\n");
+ WARN_ON(1);
+ return;
+ }
+ IPADBG("got ack for cmd=%d\n", desc->opcode);
+ complete(&desc->xfer_done);
+}
+
+/**
+ * ipa_send_cmd - send immediate commands
+ * @num_desc: number of descriptors within the desc struct
+ * @descr: descriptor structure
+ *
+ * Function will block till command gets ACK from IPA HW, caller needs
+ * to free any resources it allocated after function returns
+ * The callback in ipa_desc should not be set by the caller
+ * for this function.
+ */
+int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr)
+{
+ struct ipa_desc *desc;
+ int result = 0;
+ struct ipa_sys_context *sys;
+ int ep_idx;
+
+ IPADBG("sending command\n");
+
+ ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
+ if (-1 == ep_idx) {
+ IPAERR("Client %u is not mapped\n",
+ IPA_CLIENT_APPS_CMD_PROD);
+ return -EFAULT;
+ }
+ sys = ipa_ctx->ep[ep_idx].sys;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ if (num_desc == 1) {
+ init_completion(&descr->xfer_done);
+
+ if (descr->callback || descr->user1)
+ WARN_ON(1);
+
+ descr->callback = ipa_sps_irq_cmd_ack;
+ descr->user1 = descr;
+ if (ipa_send_one(sys, descr, true)) {
+ IPAERR("fail to send immediate command\n");
+ result = -EFAULT;
+ goto bail;
+ }
+ wait_for_completion(&descr->xfer_done);
+ } else {
+ desc = &descr[num_desc - 1];
+ init_completion(&desc->xfer_done);
+
+ if (desc->callback || desc->user1)
+ WARN_ON(1);
+
+ desc->callback = ipa_sps_irq_cmd_ack;
+ desc->user1 = desc;
+ if (ipa_send(sys, num_desc, descr, true)) {
+ IPAERR("fail to send multiple immediate command set\n");
+ result = -EFAULT;
+ goto bail;
+ }
+ wait_for_completion(&desc->xfer_done);
+ }
+
+bail:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return result;
+}
+
+/**
+ * ipa_sps_irq_tx_notify() - Callback function which will be called by
+ * the SPS driver to start a Tx poll operation.
+ * Called in an interrupt context.
+ * @notify: SPS driver supplied notification struct
+ *
+ * This function defer the work for this event to the tx workqueue.
+ */
+static void ipa_sps_irq_tx_notify(struct sps_event_notify *notify)
+{
+ struct ipa_sys_context *sys = (struct ipa_sys_context *)notify->user;
+ int ret;
+
+ IPADBG("event %d notified\n", notify->event_id);
+
+ switch (notify->event_id) {
+ case SPS_EVENT_EOT:
+ if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
+ atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
+ if (!atomic_read(&sys->curr_polling_state)) {
+ ret = sps_get_config(sys->ep->ep_hdl,
+ &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ break;
+ }
+ sys->ep->connect.options = SPS_O_AUTO_ENABLE |
+ SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ ret = sps_set_config(sys->ep->ep_hdl,
+ &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ break;
+ }
+ atomic_set(&sys->curr_polling_state, 1);
+ queue_work(sys->wq, &sys->work);
+ }
+ break;
+ default:
+ IPAERR("received unexpected event id %d\n", notify->event_id);
+ }
+}
+
+/**
+ * ipa_sps_irq_tx_no_aggr_notify() - Callback function which will be called by
+ * the SPS driver after a Tx operation is complete.
+ * Called in an interrupt context.
+ * @notify: SPS driver supplied notification struct
+ *
+ * This function defer the work for this event to the tx workqueue.
+ * This event will be later handled by ipa_write_done.
+ */
+static void ipa_sps_irq_tx_no_aggr_notify(struct sps_event_notify *notify)
+{
+ struct ipa_tx_pkt_wrapper *tx_pkt;
+
+ IPADBG("event %d notified\n", notify->event_id);
+
+ switch (notify->event_id) {
+ case SPS_EVENT_EOT:
+ tx_pkt = notify->data.transfer.user;
+ if (IPA_CLIENT_IS_APPS_CONS(tx_pkt->sys->ep->client))
+ atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
+ queue_work(tx_pkt->sys->wq, &tx_pkt->work);
+ break;
+ default:
+ IPAERR("received unexpected event id %d\n", notify->event_id);
+ }
+}
+
+/**
+ * ipa_poll_pkt() - Poll packet from SPS BAM
+ * return 0 to caller on poll successfully
+ * else -EIO
+ *
+ */
+static int ipa_poll_pkt(struct ipa_sys_context *sys,
+ struct sps_iovec *iov)
+{
+ int ret;
+
+ ret = sps_get_iovec(sys->ep->ep_hdl, iov);
+ if (ret) {
+ IPAERR("sps_get_iovec failed %d\n", ret);
+ return ret;
+ }
+
+ if (iov->addr == 0)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * ipa_handle_rx_core() - The core functionality of packet reception. This
+ * function is read from multiple code paths.
+ *
+ * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN
+ * endpoint. The function runs as long as there are packets in the pipe.
+ * For each packet:
+ * - Disconnect the packet from the system pipe linked list
+ * - Unmap the packets skb, make it non DMAable
+ * - Free the packet from the cache
+ * - Prepare a proper skb
+ * - Call the endpoints notify function, passing the skb in the parameters
+ * - Replenish the rx cache
+ */
+static int ipa_handle_rx_core(struct ipa_sys_context *sys, bool process_all,
+ bool in_poll_state)
+{
+ struct sps_iovec iov;
+ int ret;
+ int cnt = 0;
+
+ while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
+ !atomic_read(&sys->curr_polling_state))) {
+ if (cnt && !process_all)
+ break;
+
+ ret = ipa_poll_pkt(sys, &iov);
+ if (ret)
+ break;
+
+ if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
+ ipa_dma_memcpy_notify(sys, &iov);
+ else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
+ ipa_wlan_wq_rx_common(sys, iov.size);
+ else
+ ipa_wq_rx_common(sys, iov.size);
+
+ cnt++;
+ };
+
+ return cnt;
+}
+
+/**
+ * ipa_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
+ */
+static void ipa_rx_switch_to_intr_mode(struct ipa_sys_context *sys)
+{
+ int ret;
+
+ if (!sys->ep || !sys->ep->valid) {
+ IPAERR("EP Not Valid, no need to cleanup.\n");
+ return;
+ }
+
+ ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ goto fail;
+ }
+
+ if (!atomic_read(&sys->curr_polling_state) &&
+ ((sys->ep->connect.options & SPS_O_EOT) == SPS_O_EOT)) {
+ IPADBG("already in intr mode\n");
+ return;
+ }
+
+ if (!atomic_read(&sys->curr_polling_state)) {
+ IPAERR("already in intr mode\n");
+ goto fail;
+ }
+
+ sys->event.options = SPS_O_EOT;
+ ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
+ if (ret) {
+ IPAERR("sps_register_event() failed %d\n", ret);
+ goto fail;
+ }
+ sys->ep->connect.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+ ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ goto fail;
+ }
+ atomic_set(&sys->curr_polling_state, 0);
+ if (!sys->ep->napi_enabled)
+ ipa_handle_rx_core(sys, true, false);
+ ipa_dec_release_wakelock(sys->ep->wakelock_client);
+ return;
+
+fail:
+ queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
+ msecs_to_jiffies(1));
+}
+
+
+/**
+ * ipa_sps_irq_control() - Function to enable or disable BAM IRQ.
+ */
+static void ipa_sps_irq_control(struct ipa_sys_context *sys, bool enable)
+{
+ int ret;
+
+ /*
+ * Do not change sps config in case we are in polling mode as this
+ * indicates that sps driver already notified EOT event and sps config
+ * should not change until ipa driver processes the packet.
+ */
+ if (atomic_read(&sys->curr_polling_state)) {
+ IPADBG("in polling mode, do not change config\n");
+ return;
+ }
+
+ if (enable) {
+ ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ return;
+ }
+ sys->event.options = SPS_O_EOT;
+ ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
+ if (ret) {
+ IPAERR("sps_register_event() failed %d\n", ret);
+ return;
+ }
+ sys->ep->connect.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+ ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ return;
+ }
+ } else {
+ ret = sps_get_config(sys->ep->ep_hdl,
+ &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ return;
+ }
+ sys->ep->connect.options = SPS_O_AUTO_ENABLE |
+ SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ ret = sps_set_config(sys->ep->ep_hdl,
+ &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ return;
+ }
+ }
+}
+
+void ipa_sps_irq_control_all(bool enable)
+{
+ struct ipa_ep_context *ep;
+ int ipa_ep_idx, client_num;
+
+ IPADBG("\n");
+
+ for (client_num = IPA_CLIENT_CONS;
+ client_num < IPA_CLIENT_MAX; client_num++) {
+ if (!IPA_CLIENT_IS_APPS_CONS(client_num))
+ continue;
+
+ ipa_ep_idx = ipa_get_ep_mapping(client_num);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ continue;
+ }
+ ep = &ipa_ctx->ep[ipa_ep_idx];
+ if (!ep->valid) {
+ IPAERR("EP (%d) not allocated.\n", ipa_ep_idx);
+ continue;
+ }
+ ipa_sps_irq_control(ep->sys, enable);
+ }
+}
+
+/**
+ * ipa_rx_notify() - Callback function which is called by the SPS driver when a
+ * a packet is received
+ * @notify: SPS driver supplied notification information
+ *
+ * Called in an interrupt context, therefore the majority of the work is
+ * deffered using a work queue.
+ *
+ * After receiving a packet, the driver goes to polling mode and keeps pulling
+ * packets until the rx buffer is empty, then it goes back to interrupt mode.
+ * This comes to prevent the CPU from handling too many interrupts when the
+ * throughput is high.
+ */
+static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)
+{
+ struct ipa_sys_context *sys = (struct ipa_sys_context *)notify->user;
+ int ret;
+
+ IPADBG("event %d notified\n", notify->event_id);
+
+ switch (notify->event_id) {
+ case SPS_EVENT_EOT:
+ if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
+ atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
+
+ if (atomic_read(&sys->curr_polling_state)) {
+ sys->ep->eot_in_poll_err++;
+ break;
+ }
+
+ ret = sps_get_config(sys->ep->ep_hdl,
+ &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ break;
+ }
+ sys->ep->connect.options = SPS_O_AUTO_ENABLE |
+ SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ ret = sps_set_config(sys->ep->ep_hdl,
+ &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ break;
+ }
+ ipa_inc_acquire_wakelock(sys->ep->wakelock_client);
+ atomic_set(&sys->curr_polling_state, 1);
+ trace_intr_to_poll(sys->ep->client);
+ queue_work(sys->wq, &sys->work);
+ break;
+ default:
+ IPAERR("received unexpected event id %d\n", notify->event_id);
+ }
+}
+
+static void switch_to_intr_tx_work_func(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct ipa_sys_context *sys;
+
+ dwork = container_of(work, struct delayed_work, work);
+ sys = container_of(dwork, struct ipa_sys_context, switch_to_intr_work);
+ ipa_handle_tx(sys);
+}
+
+/**
+ * ipa_handle_rx() - handle packet reception. This function is executed in the
+ * context of a work queue.
+ * @work: work struct needed by the work queue
+ *
+ * ipa_handle_rx_core() is run in polling mode. After all packets has been
+ * received, the driver switches back to interrupt mode.
+ */
+static void ipa_handle_rx(struct ipa_sys_context *sys)
+{
+ int inactive_cycles = 0;
+ int cnt;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ do {
+ cnt = ipa_handle_rx_core(sys, true, true);
+ if (cnt == 0) {
+ inactive_cycles++;
+ trace_idle_sleep_enter(sys->ep->client);
+ usleep_range(ipa_ctx->ipa_rx_min_timeout_usec,
+ ipa_ctx->ipa_rx_max_timeout_usec);
+ trace_idle_sleep_exit(sys->ep->client);
+ } else {
+ inactive_cycles = 0;
+ }
+
+ /* if pipe is out of buffers there is no point polling for
+ * completed descs; release the worker so delayed work can
+ * run in a timely manner
+ */
+ if (sys->len == 0)
+ break;
+
+ } while (inactive_cycles <= ipa_ctx->ipa_polling_iteration);
+
+ trace_poll_to_intr(sys->ep->client);
+ ipa_rx_switch_to_intr_mode(sys);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+/**
+ * ipa2_rx_poll() - Poll the rx packets from IPA HW. This
+ * function is exectued in the softirq context
+ *
+ * if input budget is zero, the driver switches back to
+ * interrupt mode
+ *
+ * return number of polled packets, on error 0(zero)
+ */
+int ipa2_rx_poll(u32 clnt_hdl, int weight)
+{
+ struct ipa_ep_context *ep;
+ int ret;
+ int cnt = 0;
+ unsigned int delay = 1;
+ struct sps_iovec iov;
+
+ IPADBG("\n");
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm 0x%x\n", clnt_hdl);
+ return cnt;
+ }
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+ while (cnt < weight &&
+ atomic_read(&ep->sys->curr_polling_state)) {
+
+ ret = ipa_poll_pkt(ep->sys, &iov);
+ if (ret)
+ break;
+
+ ipa_wq_rx_common(ep->sys, iov.size);
+ cnt += 5;
+ };
+
+ if (cnt == 0) {
+ ep->inactive_cycles++;
+ ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
+
+ if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
+ ep->switch_to_intr = true;
+ delay = 0;
+ }
+ queue_delayed_work(ep->sys->wq,
+ &ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
+ } else
+ ep->inactive_cycles = 0;
+
+ return cnt;
+}
+
+static void switch_to_intr_rx_work_func(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct ipa_sys_context *sys;
+
+ dwork = container_of(work, struct delayed_work, work);
+ sys = container_of(dwork, struct ipa_sys_context, switch_to_intr_work);
+
+ if (sys->ep->napi_enabled) {
+ if (sys->ep->switch_to_intr) {
+ ipa_rx_switch_to_intr_mode(sys);
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI");
+ sys->ep->switch_to_intr = false;
+ sys->ep->inactive_cycles = 0;
+ } else
+ sys->ep->client_notify(sys->ep->priv,
+ IPA_CLIENT_START_POLL, 0);
+ } else
+ ipa_handle_rx(sys);
+}
+
+/**
+ * ipa_update_repl_threshold()- Update the repl_threshold for the client.
+ *
+ * Return value: None.
+ */
+void ipa_update_repl_threshold(enum ipa_client_type ipa_client)
+{
+ int ep_idx;
+ struct ipa_ep_context *ep;
+
+ /* Check if ep is valid. */
+ ep_idx = ipa2_get_ep_mapping(ipa_client);
+ if (ep_idx == -1) {
+ IPADBG("Invalid IPA client\n");
+ return;
+ }
+
+ ep = &ipa_ctx->ep[ep_idx];
+ if (!ep->valid) {
+ IPADBG("EP not valid/Not applicable for client.\n");
+ return;
+ }
+ /*
+ * Determine how many buffers/descriptors remaining will
+ * cause to drop below the yellow WM bar.
+ */
+ ep->rx_replenish_threshold = ipa_get_sys_yellow_wm(ep->sys)
+ / ep->sys->rx_buff_sz;
+}
+
+/**
+ * ipa2_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
+ * IPA EP configuration
+ * @sys_in: [in] input needed to setup BAM pipe and configure EP
+ * @clnt_hdl: [out] client handle
+ *
+ * - configure the end-point registers with the supplied
+ * parameters from the user.
+ * - call SPS APIs to create a system-to-bam connection with IPA.
+ * - allocate descriptor FIFO
+ * - register callback function(ipa_sps_irq_rx_notify or
+ * ipa_sps_irq_tx_notify - depends on client type) in case the driver is
+ * not configured to pulling mode
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
+{
+ struct ipa_ep_context *ep;
+ int ipa_ep_idx;
+ int result = -EINVAL;
+ dma_addr_t dma_addr;
+ char buff[IPA_RESOURCE_NAME_MAX];
+ struct iommu_domain *smmu_domain;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (sys_in == NULL || clnt_hdl == NULL) {
+ IPAERR("NULL args\n");
+ goto fail_gen;
+ }
+
+ if (sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
+ IPAERR("bad parm client:%d fifo_sz:%d\n",
+ sys_in->client, sys_in->desc_fifo_sz);
+ goto fail_gen;
+ }
+
+ ipa_ep_idx = ipa2_get_ep_mapping(sys_in->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ goto fail_gen;
+ }
+
+ ep = &ipa_ctx->ep[ipa_ep_idx];
+
+ IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
+
+ if (ep->valid == 1) {
+ if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
+ IPAERR("EP already allocated.\n");
+ goto fail_and_disable_clocks;
+ } else {
+ if (ipa2_cfg_ep_hdr(ipa_ep_idx,
+ &sys_in->ipa_ep_cfg.hdr)) {
+ IPAERR("fail to configure hdr prop of EP.\n");
+ result = -EFAULT;
+ goto fail_and_disable_clocks;
+ }
+ if (ipa2_cfg_ep_cfg(ipa_ep_idx,
+ &sys_in->ipa_ep_cfg.cfg)) {
+ IPAERR("fail to configure cfg prop of EP.\n");
+ result = -EFAULT;
+ goto fail_and_disable_clocks;
+ }
+ IPADBG("client %d (ep: %d) overlay ok sys=%p\n",
+ sys_in->client, ipa_ep_idx, ep->sys);
+ ep->client_notify = sys_in->notify;
+ ep->priv = sys_in->priv;
+ *clnt_hdl = ipa_ep_idx;
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+ return 0;
+ }
+ }
+
+ memset(ep, 0, offsetof(struct ipa_ep_context, sys));
+
+ if (!ep->sys) {
+ ep->sys = kzalloc(sizeof(struct ipa_sys_context), GFP_KERNEL);
+ if (!ep->sys) {
+ IPAERR("failed to sys ctx for client %d\n",
+ sys_in->client);
+ result = -ENOMEM;
+ goto fail_and_disable_clocks;
+ }
+
+ ep->sys->ep = ep;
+ snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipawq%d",
+ sys_in->client);
+ ep->sys->wq = alloc_workqueue(buff,
+ WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+ if (!ep->sys->wq) {
+ IPAERR("failed to create wq for client %d\n",
+ sys_in->client);
+ result = -EFAULT;
+ goto fail_wq;
+ }
+
+ snprintf(buff, IPA_RESOURCE_NAME_MAX, "iparepwq%d",
+ sys_in->client);
+ ep->sys->repl_wq = alloc_workqueue(buff,
+ WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+ if (!ep->sys->repl_wq) {
+ IPAERR("failed to create rep wq for client %d\n",
+ sys_in->client);
+ result = -EFAULT;
+ goto fail_wq2;
+ }
+
+ INIT_LIST_HEAD(&ep->sys->head_desc_list);
+ INIT_LIST_HEAD(&ep->sys->rcycl_list);
+ spin_lock_init(&ep->sys->spinlock);
+ } else {
+ memset(ep->sys, 0, offsetof(struct ipa_sys_context, ep));
+ }
+
+ ep->skip_ep_cfg = sys_in->skip_ep_cfg;
+ if (ipa_assign_policy(sys_in, ep->sys)) {
+ IPAERR("failed to sys ctx for client %d\n", sys_in->client);
+ result = -ENOMEM;
+ goto fail_gen2;
+ }
+
+ ep->valid = 1;
+ ep->client = sys_in->client;
+ ep->client_notify = sys_in->notify;
+ ep->napi_enabled = sys_in->napi_enabled;
+ ep->priv = sys_in->priv;
+ ep->keep_ipa_awake = sys_in->keep_ipa_awake;
+ atomic_set(&ep->avail_fifo_desc,
+ ((sys_in->desc_fifo_sz/sizeof(struct sps_iovec))-1));
+
+ if (ep->status.status_en && IPA_CLIENT_IS_CONS(ep->client) &&
+ ep->sys->status_stat == NULL) {
+ ep->sys->status_stat =
+ kzalloc(sizeof(struct ipa_status_stats), GFP_KERNEL);
+ if (!ep->sys->status_stat) {
+ IPAERR("no memory\n");
+ goto fail_gen2;
+ }
+ }
+
+ result = ipa_enable_data_path(ipa_ep_idx);
+ if (result) {
+ IPAERR("enable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx);
+ goto fail_gen2;
+ }
+
+ if (!ep->skip_ep_cfg) {
+ if (ipa2_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
+ IPAERR("fail to configure EP.\n");
+ goto fail_gen2;
+ }
+ if (ipa2_cfg_ep_status(ipa_ep_idx, &ep->status)) {
+ IPAERR("fail to configure status of EP.\n");
+ goto fail_gen2;
+ }
+ IPADBG("ep configuration successful\n");
+ } else {
+ IPADBG("skipping ep configuration\n");
+ }
+
+ /* Default Config */
+ ep->ep_hdl = sps_alloc_endpoint();
+ if (ep->ep_hdl == NULL) {
+ IPAERR("SPS EP allocation failed.\n");
+ goto fail_gen2;
+ }
+
+ result = sps_get_config(ep->ep_hdl, &ep->connect);
+ if (result) {
+ IPAERR("fail to get config.\n");
+ goto fail_sps_cfg;
+ }
+
+ /* Specific Config */
+ if (IPA_CLIENT_IS_CONS(sys_in->client)) {
+ ep->connect.mode = SPS_MODE_SRC;
+ ep->connect.destination = SPS_DEV_HANDLE_MEM;
+ ep->connect.source = ipa_ctx->bam_handle;
+ ep->connect.dest_pipe_index = ipa_ctx->a5_pipe_index++;
+ ep->connect.src_pipe_index = ipa_ep_idx;
+ /*
+ * Determine how many buffers/descriptors remaining will
+ * cause to drop below the yellow WM bar.
+ */
+ ep->rx_replenish_threshold = ipa_get_sys_yellow_wm(ep->sys)
+ / ep->sys->rx_buff_sz;
+ /* Only when the WAN pipes are setup, actual threshold will
+ * be read from the register. So update LAN_CONS ep again with
+ * right value.
+ */
+ if (sys_in->client == IPA_CLIENT_APPS_WAN_CONS)
+ ipa_update_repl_threshold(IPA_CLIENT_APPS_LAN_CONS);
+ } else {
+ ep->connect.mode = SPS_MODE_DEST;
+ ep->connect.source = SPS_DEV_HANDLE_MEM;
+ ep->connect.destination = ipa_ctx->bam_handle;
+ ep->connect.src_pipe_index = ipa_ctx->a5_pipe_index++;
+ ep->connect.dest_pipe_index = ipa_ep_idx;
+ }
+
+ IPADBG("client:%d ep:%d",
+ sys_in->client, ipa_ep_idx);
+
+ IPADBG("dest_pipe_index:%d src_pipe_index:%d\n",
+ ep->connect.dest_pipe_index,
+ ep->connect.src_pipe_index);
+
+ ep->connect.options = ep->sys->sps_option;
+ ep->connect.desc.size = sys_in->desc_fifo_sz;
+ ep->connect.desc.base = dma_alloc_coherent(ipa_ctx->pdev,
+ ep->connect.desc.size, &dma_addr, GFP_KERNEL);
+ if (ipa_ctx->smmu_s1_bypass) {
+ ep->connect.desc.phys_base = dma_addr;
+ } else {
+ ep->connect.desc.iova = dma_addr;
+ smmu_domain = ipa2_get_smmu_domain();
+ if (smmu_domain != NULL) {
+ ep->connect.desc.phys_base =
+ iommu_iova_to_phys(smmu_domain, dma_addr);
+ }
+ }
+ if (ep->connect.desc.base == NULL) {
+ IPAERR("fail to get DMA desc memory.\n");
+ goto fail_sps_cfg;
+ }
+
+ ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
+
+ result = ipa_sps_connect_safe(ep->ep_hdl, &ep->connect, sys_in->client);
+ if (result) {
+ IPAERR("sps_connect fails.\n");
+ goto fail_sps_connect;
+ }
+
+ ep->sys->event.options = SPS_O_EOT;
+ ep->sys->event.mode = SPS_TRIGGER_CALLBACK;
+ ep->sys->event.xfer_done = NULL;
+ ep->sys->event.user = ep->sys;
+ ep->sys->event.callback = ep->sys->sps_callback;
+ result = sps_register_event(ep->ep_hdl, &ep->sys->event);
+ if (result < 0) {
+ IPAERR("register event error %d\n", result);
+ goto fail_register_event;
+ }
+
+ *clnt_hdl = ipa_ep_idx;
+
+ if (ep->sys->repl_hdlr == ipa_fast_replenish_rx_cache) {
+ ep->sys->repl.capacity = ep->sys->rx_pool_sz + 1;
+ ep->sys->repl.cache = kzalloc(ep->sys->repl.capacity *
+ sizeof(void *), GFP_KERNEL);
+ if (!ep->sys->repl.cache) {
+ IPAERR("ep=%d fail to alloc repl cache\n", ipa_ep_idx);
+ ep->sys->repl_hdlr = ipa_replenish_rx_cache;
+ ep->sys->repl.capacity = 0;
+ } else {
+ atomic_set(&ep->sys->repl.head_idx, 0);
+ atomic_set(&ep->sys->repl.tail_idx, 0);
+ ipa_wq_repl_rx(&ep->sys->repl_work);
+ }
+ }
+
+ if (IPA_CLIENT_IS_CONS(sys_in->client))
+ ipa_replenish_rx_cache(ep->sys);
+
+ if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) {
+ ipa_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW);
+ atomic_inc(&ipa_ctx->wc_memb.active_clnt_cnt);
+ }
+
+ ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+ if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) {
+ if (ipa_ctx->modem_cfg_emb_pipe_flt &&
+ sys_in->client == IPA_CLIENT_APPS_LAN_WAN_PROD)
+ IPADBG("modem cfg emb pipe flt\n");
+ else
+ ipa_install_dflt_flt_rules(ipa_ep_idx);
+ }
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+ IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
+ ipa_ep_idx, ep->sys);
+
+ return 0;
+
+fail_register_event:
+ sps_disconnect(ep->ep_hdl);
+fail_sps_connect:
+ dma_free_coherent(ipa_ctx->pdev, ep->connect.desc.size,
+ ep->connect.desc.base,
+ ep->connect.desc.phys_base);
+fail_sps_cfg:
+ sps_free_endpoint(ep->ep_hdl);
+fail_gen2:
+ destroy_workqueue(ep->sys->repl_wq);
+fail_wq2:
+ destroy_workqueue(ep->sys->wq);
+fail_wq:
+ kfree(ep->sys);
+ memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+fail_and_disable_clocks:
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+fail_gen:
+ return result;
+}
+
+/**
+ * ipa2_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP
+ * @clnt_hdl: [in] the handle obtained from ipa2_setup_sys_pipe
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa2_teardown_sys_pipe(u32 clnt_hdl)
+{
+ struct ipa_ep_context *ep;
+ int empty;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ ipa_disable_data_path(clnt_hdl);
+ if (ep->napi_enabled) {
+ ep->switch_to_intr = true;
+ do {
+ usleep_range(95, 105);
+ } while (atomic_read(&ep->sys->curr_polling_state));
+ }
+
+ if (IPA_CLIENT_IS_PROD(ep->client)) {
+ do {
+ spin_lock_bh(&ep->sys->spinlock);
+ empty = list_empty(&ep->sys->head_desc_list);
+ spin_unlock_bh(&ep->sys->spinlock);
+ if (!empty)
+ usleep_range(95, 105);
+ else
+ break;
+ } while (1);
+ }
+
+ if (IPA_CLIENT_IS_CONS(ep->client)) {
+ cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
+ cancel_delayed_work_sync(&ep->sys->switch_to_intr_work);
+ }
+
+ flush_workqueue(ep->sys->wq);
+ sps_disconnect(ep->ep_hdl);
+ dma_free_coherent(ipa_ctx->pdev, ep->connect.desc.size,
+ ep->connect.desc.base,
+ ep->connect.desc.phys_base);
+ sps_free_endpoint(ep->ep_hdl);
+ if (ep->sys->repl_wq)
+ flush_workqueue(ep->sys->repl_wq);
+ if (IPA_CLIENT_IS_CONS(ep->client))
+ ipa_cleanup_rx(ep->sys);
+
+ if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) {
+ if (ipa_ctx->modem_cfg_emb_pipe_flt &&
+ ep->client == IPA_CLIENT_APPS_LAN_WAN_PROD)
+ IPADBG("modem cfg emb pipe flt\n");
+ else
+ ipa_delete_dflt_flt_rules(clnt_hdl);
+ }
+
+ if (IPA_CLIENT_IS_WLAN_CONS(ep->client))
+ atomic_dec(&ipa_ctx->wc_memb.active_clnt_cnt);
+
+ memset(&ep->wstats, 0, sizeof(struct ipa_wlan_stats));
+
+ if (!atomic_read(&ipa_ctx->wc_memb.active_clnt_cnt))
+ ipa_cleanup_wlan_rx_common_cache();
+
+ ep->valid = 0;
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+ return 0;
+}
+
+/**
+ * ipa_tx_comp_usr_notify_release() - Callback function which will call the
+ * user supplied callback function to release the skb, or release it on
+ * its own if no callback function was supplied.
+ * @user1
+ * @user2
+ *
+ * This notified callback is for the destination client.
+ * This function is supplied in ipa_connect.
+ */
+static void ipa_tx_comp_usr_notify_release(void *user1, int user2)
+{
+ struct sk_buff *skb = (struct sk_buff *)user1;
+ int ep_idx = user2;
+
+ IPADBG("skb=%p ep=%d\n", skb, ep_idx);
+
+ IPA_STATS_INC_CNT(ipa_ctx->stats.tx_pkts_compl);
+
+ if (ipa_ctx->ep[ep_idx].client_notify)
+ ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv,
+ IPA_WRITE_DONE, (unsigned long)skb);
+ else
+ dev_kfree_skb_any(skb);
+}
+
+static void ipa_tx_cmd_comp(void *user1, int user2)
+{
+ kfree(user1);
+}
+
+/**
+ * ipa2_tx_dp() - Data-path tx handler
+ * @dst: [in] which IPA destination to route tx packets to
+ * @skb: [in] the packet to send
+ * @metadata: [in] TX packet meta-data
+ *
+ * Data-path tx handler, this is used for both SW data-path which by-passes most
+ * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
+ * dst is a "valid" CONS type, then SW data-path is used. If dst is the
+ * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
+ * is an error. For errors, client needs to free the skb as needed. For success,
+ * IPA driver will later invoke client callback if one was supplied. That
+ * callback should free the skb. If no callback supplied, IPA driver will free
+ * the skb internally
+ *
+ * The function will use two descriptors for this send command
+ * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
+ * the first descriptor will be used to inform the IPA hardware that
+ * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
+ * Once this send was done from SPS point-of-view the IPA driver will
+ * get notified by the supplied callback - ipa_sps_irq_tx_comp()
+ *
+ * ipa_sps_irq_tx_comp will call to the user supplied
+ * callback (from ipa_connect)
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa2_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+ struct ipa_tx_meta *meta)
+{
+ struct ipa_desc *desc;
+ struct ipa_desc _desc[2];
+ int dst_ep_idx;
+ struct ipa_ip_packet_init *cmd;
+ struct ipa_sys_context *sys;
+ int src_ep_idx;
+ int num_frags, f;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (skb->len == 0) {
+ IPAERR("packet size is 0\n");
+ return -EINVAL;
+ }
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ if (num_frags) {
+ /* 1 desc is needed for the linear portion of skb;
+ * 1 desc may be needed for the PACKET_INIT;
+ * 1 desc for each frag
+ */
+ desc = kzalloc(sizeof(*desc) * (num_frags + 2), GFP_ATOMIC);
+ if (!desc) {
+ IPAERR("failed to alloc desc array\n");
+ goto fail_mem;
+ }
+ } else {
+ memset(_desc, 0, 2 * sizeof(struct ipa_desc));
+ desc = &_desc[0];
+ }
+
+ /*
+ * USB_CONS: PKT_INIT ep_idx = dst pipe
+ * Q6_CONS: PKT_INIT ep_idx = sender pipe
+ * A5_LAN_WAN_PROD: HW path ep_idx = sender pipe
+ *
+ * LAN TX: all PKT_INIT
+ * WAN TX: PKT_INIT (cmd) + HW (data)
+ *
+ */
+ if (IPA_CLIENT_IS_CONS(dst)) {
+ src_ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
+ if (-1 == src_ep_idx) {
+ IPAERR("Client %u is not mapped\n",
+ IPA_CLIENT_APPS_LAN_WAN_PROD);
+ goto fail_gen;
+ }
+ dst_ep_idx = ipa2_get_ep_mapping(dst);
+ } else {
+ src_ep_idx = ipa2_get_ep_mapping(dst);
+ if (-1 == src_ep_idx) {
+ IPAERR("Client %u is not mapped\n", dst);
+ goto fail_gen;
+ }
+ if (meta && meta->pkt_init_dst_ep_valid)
+ dst_ep_idx = meta->pkt_init_dst_ep;
+ else
+ dst_ep_idx = -1;
+ }
+
+ sys = ipa_ctx->ep[src_ep_idx].sys;
+
+ if (!sys->ep->valid) {
+ IPAERR("pipe not valid\n");
+ goto fail_gen;
+ }
+
+ if (dst_ep_idx != -1) {
+ /* SW data path */
+ cmd = kzalloc(sizeof(struct ipa_ip_packet_init), GFP_ATOMIC);
+ if (!cmd) {
+ IPAERR("failed to alloc immediate command object\n");
+ goto fail_gen;
+ }
+
+ cmd->destination_pipe_index = dst_ep_idx;
+ desc[0].opcode = IPA_IP_PACKET_INIT;
+ desc[0].pyld = cmd;
+ desc[0].len = sizeof(struct ipa_ip_packet_init);
+ desc[0].type = IPA_IMM_CMD_DESC;
+ desc[0].callback = ipa_tx_cmd_comp;
+ desc[0].user1 = cmd;
+ desc[1].pyld = skb->data;
+ desc[1].len = skb_headlen(skb);
+ desc[1].type = IPA_DATA_DESC_SKB;
+ desc[1].callback = ipa_tx_comp_usr_notify_release;
+ desc[1].user1 = skb;
+ desc[1].user2 = (meta && meta->pkt_init_dst_ep_valid &&
+ meta->pkt_init_dst_ep_remote) ?
+ src_ep_idx :
+ dst_ep_idx;
+ if (meta && meta->dma_address_valid) {
+ desc[1].dma_address_valid = true;
+ desc[1].dma_address = meta->dma_address;
+ }
+
+ for (f = 0; f < num_frags; f++) {
+ desc[2+f].frag = &skb_shinfo(skb)->frags[f];
+ desc[2+f].type = IPA_DATA_DESC_SKB_PAGED;
+ desc[2+f].len = skb_frag_size(desc[2+f].frag);
+ }
+
+ /* don't free skb till frag mappings are released */
+ if (num_frags) {
+ desc[2+f-1].callback = desc[1].callback;
+ desc[2+f-1].user1 = desc[1].user1;
+ desc[2+f-1].user2 = desc[1].user2;
+ desc[1].callback = NULL;
+ }
+
+ if (ipa_send(sys, num_frags + 2, desc, true)) {
+ IPAERR("fail to send skb %p num_frags %u SWP\n",
+ skb, num_frags);
+ goto fail_send;
+ }
+ IPA_STATS_INC_CNT(ipa_ctx->stats.tx_sw_pkts);
+ } else {
+ /* HW data path */
+ desc[0].pyld = skb->data;
+ desc[0].len = skb_headlen(skb);
+ desc[0].type = IPA_DATA_DESC_SKB;
+ desc[0].callback = ipa_tx_comp_usr_notify_release;
+ desc[0].user1 = skb;
+ desc[0].user2 = src_ep_idx;
+
+ if (meta && meta->dma_address_valid) {
+ desc[0].dma_address_valid = true;
+ desc[0].dma_address = meta->dma_address;
+ }
+
+ if (num_frags == 0) {
+ if (ipa_send_one(sys, desc, true)) {
+ IPAERR("fail to send skb %p HWP\n", skb);
+ goto fail_gen;
+ }
+ } else {
+ for (f = 0; f < num_frags; f++) {
+ desc[1+f].frag = &skb_shinfo(skb)->frags[f];
+ desc[1+f].type = IPA_DATA_DESC_SKB_PAGED;
+ desc[1+f].len = skb_frag_size(desc[1+f].frag);
+ }
+
+ /* don't free skb till frag mappings are released */
+ desc[1+f-1].callback = desc[0].callback;
+ desc[1+f-1].user1 = desc[0].user1;
+ desc[1+f-1].user2 = desc[0].user2;
+ desc[0].callback = NULL;
+
+ if (ipa_send(sys, num_frags + 1, desc, true)) {
+ IPAERR("fail to send skb %p num_frags %u HWP\n",
+ skb, num_frags);
+ goto fail_gen;
+ }
+ }
+
+ IPA_STATS_INC_CNT(ipa_ctx->stats.tx_hw_pkts);
+ }
+
+ if (num_frags) {
+ kfree(desc);
+ IPA_STATS_INC_CNT(ipa_ctx->stats.tx_non_linear);
+ }
+
+ return 0;
+
+fail_send:
+ kfree(cmd);
+fail_gen:
+ if (num_frags)
+ kfree(desc);
+fail_mem:
+ return -EFAULT;
+}
+
+static void ipa_wq_handle_rx(struct work_struct *work)
+{
+ struct ipa_sys_context *sys;
+
+ sys = container_of(work, struct ipa_sys_context, work);
+
+ if (sys->ep->napi_enabled) {
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI");
+ sys->ep->client_notify(sys->ep->priv,
+ IPA_CLIENT_START_POLL, 0);
+ } else
+ ipa_handle_rx(sys);
+}
+
+static void ipa_wq_repl_rx(struct work_struct *work)
+{
+ struct ipa_sys_context *sys;
+ void *ptr;
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
+ u32 next;
+ u32 curr;
+
+ sys = container_of(work, struct ipa_sys_context, repl_work);
+ curr = atomic_read(&sys->repl.tail_idx);
+
+begin:
+ while (1) {
+ next = (curr + 1) % sys->repl.capacity;
+ if (next == atomic_read(&sys->repl.head_idx))
+ goto fail_kmem_cache_alloc;
+
+ rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
+ flag);
+ if (!rx_pkt) {
+ pr_err_ratelimited("%s fail alloc rx wrapper sys=%p\n",
+ __func__, sys);
+ goto fail_kmem_cache_alloc;
+ }
+
+ INIT_LIST_HEAD(&rx_pkt->link);
+ INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
+ rx_pkt->sys = sys;
+
+ rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
+ if (rx_pkt->data.skb == NULL) {
+ pr_err_ratelimited("%s fail alloc skb sys=%p\n",
+ __func__, sys);
+ goto fail_skb_alloc;
+ }
+ ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+ rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
+ sys->rx_buff_sz,
+ DMA_FROM_DEVICE);
+ if (rx_pkt->data.dma_addr == 0 ||
+ rx_pkt->data.dma_addr == ~0) {
+ pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n",
+ __func__, (void *)rx_pkt->data.dma_addr,
+ ptr, sys);
+ goto fail_dma_mapping;
+ }
+
+ sys->repl.cache[curr] = rx_pkt;
+ curr = next;
+ /* ensure write is done before setting tail index */
+ mb();
+ atomic_set(&sys->repl.tail_idx, next);
+ }
+
+ return;
+
+fail_dma_mapping:
+ sys->free_skb(rx_pkt->data.skb);
+fail_skb_alloc:
+ kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+fail_kmem_cache_alloc:
+ if (atomic_read(&sys->repl.tail_idx) ==
+ atomic_read(&sys->repl.head_idx)) {
+ if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
+ IPA_STATS_INC_CNT(ipa_ctx->stats.wan_repl_rx_empty);
+ else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
+ IPA_STATS_INC_CNT(ipa_ctx->stats.lan_repl_rx_empty);
+ else
+ WARN_ON(1);
+ pr_err_ratelimited("%s sys=%p repl ring empty\n",
+ __func__, sys);
+ goto begin;
+ }
+}
+
+static void ipa_replenish_wlan_rx_cache(struct ipa_sys_context *sys)
+{
+ struct ipa_rx_pkt_wrapper *rx_pkt = NULL;
+ struct ipa_rx_pkt_wrapper *tmp;
+ int ret;
+ u32 rx_len_cached = 0;
+
+ IPADBG("\n");
+
+ spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
+ rx_len_cached = sys->len;
+
+ if (rx_len_cached < sys->rx_pool_sz) {
+ list_for_each_entry_safe(rx_pkt, tmp,
+ &ipa_ctx->wc_memb.wlan_comm_desc_list, link) {
+ list_del(&rx_pkt->link);
+
+ if (ipa_ctx->wc_memb.wlan_comm_free_cnt > 0)
+ ipa_ctx->wc_memb.wlan_comm_free_cnt--;
+
+ INIT_LIST_HEAD(&rx_pkt->link);
+ rx_pkt->len = 0;
+ rx_pkt->sys = sys;
+
+ ret = sps_transfer_one(sys->ep->ep_hdl,
+ rx_pkt->data.dma_addr,
+ IPA_WLAN_RX_BUFF_SZ, rx_pkt, 0);
+
+ if (ret) {
+ IPAERR("sps_transfer_one failed %d\n", ret);
+ goto fail_sps_transfer;
+ }
+
+ list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+ rx_len_cached = ++sys->len;
+
+ if (rx_len_cached >= sys->rx_pool_sz) {
+ spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
+ return;
+ }
+ }
+ }
+ spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
+
+ if (rx_len_cached < sys->rx_pool_sz &&
+ ipa_ctx->wc_memb.wlan_comm_total_cnt <
+ IPA_WLAN_COMM_RX_POOL_HIGH) {
+ ipa_replenish_rx_cache(sys);
+ ipa_ctx->wc_memb.wlan_comm_total_cnt +=
+ (sys->rx_pool_sz - rx_len_cached);
+ }
+
+ return;
+
+fail_sps_transfer:
+ list_del(&rx_pkt->link);
+ spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
+}
+
+static void ipa_cleanup_wlan_rx_common_cache(void)
+{
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ struct ipa_rx_pkt_wrapper *tmp;
+
+ list_for_each_entry_safe(rx_pkt, tmp,
+ &ipa_ctx->wc_memb.wlan_comm_desc_list, link) {
+ list_del(&rx_pkt->link);
+ dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
+ IPA_WLAN_COMM_RX_POOL_LOW, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(rx_pkt->data.skb);
+ kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+ ipa_ctx->wc_memb.wlan_comm_free_cnt--;
+ ipa_ctx->wc_memb.wlan_comm_total_cnt--;
+ }
+ ipa_ctx->wc_memb.total_tx_pkts_freed = 0;
+
+ if (ipa_ctx->wc_memb.wlan_comm_free_cnt != 0)
+ IPAERR("wlan comm buff free cnt: %d\n",
+ ipa_ctx->wc_memb.wlan_comm_free_cnt);
+
+ if (ipa_ctx->wc_memb.wlan_comm_total_cnt != 0)
+ IPAERR("wlan comm buff total cnt: %d\n",
+ ipa_ctx->wc_memb.wlan_comm_total_cnt);
+
+}
+
+static void ipa_alloc_wlan_rx_common_cache(u32 size)
+{
+ void *ptr;
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ int rx_len_cached = 0;
+ gfp_t flag = GFP_NOWAIT | __GFP_NOWARN |
+ (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
+
+ rx_len_cached = ipa_ctx->wc_memb.wlan_comm_total_cnt;
+ while (rx_len_cached < size) {
+ rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
+ flag);
+ if (!rx_pkt) {
+ IPAERR("failed to alloc rx wrapper\n");
+ goto fail_kmem_cache_alloc;
+ }
+
+ INIT_LIST_HEAD(&rx_pkt->link);
+ INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
+
+ rx_pkt->data.skb =
+ ipa_get_skb_ipa_rx(IPA_WLAN_RX_BUFF_SZ,
+ flag);
+ if (rx_pkt->data.skb == NULL) {
+ IPAERR("failed to alloc skb\n");
+ goto fail_skb_alloc;
+ }
+ ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
+ rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
+ IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
+ if (rx_pkt->data.dma_addr == 0 ||
+ rx_pkt->data.dma_addr == ~0) {
+ IPAERR("dma_map_single failure %p for %p\n",
+ (void *)rx_pkt->data.dma_addr, ptr);
+ goto fail_dma_mapping;
+ }
+
+ list_add_tail(&rx_pkt->link,
+ &ipa_ctx->wc_memb.wlan_comm_desc_list);
+ rx_len_cached = ++ipa_ctx->wc_memb.wlan_comm_total_cnt;
+
+ ipa_ctx->wc_memb.wlan_comm_free_cnt++;
+
+ }
+
+ return;
+
+fail_dma_mapping:
+ dev_kfree_skb_any(rx_pkt->data.skb);
+fail_skb_alloc:
+ kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+fail_kmem_cache_alloc:
+ return;
+}
+
+
+/**
+ * ipa_replenish_rx_cache() - Replenish the Rx packets cache.
+ *
+ * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
+ * are IPA_RX_POOL_CEIL buffers in the cache.
+ * - Allocate a buffer in the cache
+ * - Initialized the packets link
+ * - Initialize the packets work struct
+ * - Allocate the packets socket buffer (skb)
+ * - Fill the packets skb with data
+ * - Make the packet DMAable
+ * - Add the packet to the system pipe linked list
+ * - Initiate a SPS transfer so that SPS driver will use this packet later.
+ */
+static void ipa_replenish_rx_cache(struct ipa_sys_context *sys)
+{
+ void *ptr;
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ int ret;
+ int rx_len_cached = 0;
+ gfp_t flag = GFP_NOWAIT | __GFP_NOWARN |
+ (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
+
+ rx_len_cached = sys->len;
+
+ while (rx_len_cached < sys->rx_pool_sz) {
+ rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
+ flag);
+ if (!rx_pkt) {
+ IPAERR("failed to alloc rx wrapper\n");
+ goto fail_kmem_cache_alloc;
+ }
+
+ INIT_LIST_HEAD(&rx_pkt->link);
+ INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
+ rx_pkt->sys = sys;
+
+ rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
+ if (rx_pkt->data.skb == NULL) {
+ IPAERR("failed to alloc skb\n");
+ goto fail_skb_alloc;
+ }
+ ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+ rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
+ sys->rx_buff_sz,
+ DMA_FROM_DEVICE);
+ if (rx_pkt->data.dma_addr == 0 ||
+ rx_pkt->data.dma_addr == ~0) {
+ IPAERR("dma_map_single failure %p for %p\n",
+ (void *)rx_pkt->data.dma_addr, ptr);
+ goto fail_dma_mapping;
+ }
+
+ list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+ rx_len_cached = ++sys->len;
+
+ ret = sps_transfer_one(sys->ep->ep_hdl,
+ rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
+
+ if (ret) {
+ IPAERR("sps_transfer_one failed %d\n", ret);
+ goto fail_sps_transfer;
+ }
+ }
+
+ return;
+
+fail_sps_transfer:
+ list_del(&rx_pkt->link);
+ rx_len_cached = --sys->len;
+ dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
+ sys->rx_buff_sz, DMA_FROM_DEVICE);
+fail_dma_mapping:
+ sys->free_skb(rx_pkt->data.skb);
+fail_skb_alloc:
+ kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+fail_kmem_cache_alloc:
+ if (rx_len_cached == 0)
+ queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+ msecs_to_jiffies(1));
+}
+
+static void ipa_replenish_rx_cache_recycle(struct ipa_sys_context *sys)
+{
+ void *ptr;
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ int ret;
+ int rx_len_cached = 0;
+
+ rx_len_cached = sys->len;
+
+ while (rx_len_cached < sys->rx_pool_sz) {
+ spin_lock_bh(&sys->spinlock);
+ if (list_empty(&sys->rcycl_list))
+ goto fail_kmem_cache_alloc;
+
+ rx_pkt = list_first_entry(&sys->rcycl_list,
+ struct ipa_rx_pkt_wrapper, link);
+ list_del(&rx_pkt->link);
+ spin_unlock_bh(&sys->spinlock);
+ INIT_LIST_HEAD(&rx_pkt->link);
+ ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+ rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev,
+ ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
+ if (rx_pkt->data.dma_addr == 0 ||
+ rx_pkt->data.dma_addr == ~0)
+ goto fail_dma_mapping;
+
+ list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+ rx_len_cached = ++sys->len;
+
+ ret = sps_transfer_one(sys->ep->ep_hdl,
+ rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
+
+ if (ret) {
+ IPAERR("sps_transfer_one failed %d\n", ret);
+ goto fail_sps_transfer;
+ }
+ }
+
+ return;
+fail_sps_transfer:
+ rx_len_cached = --sys->len;
+ list_del(&rx_pkt->link);
+ INIT_LIST_HEAD(&rx_pkt->link);
+ dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
+ sys->rx_buff_sz, DMA_FROM_DEVICE);
+fail_dma_mapping:
+ spin_lock_bh(&sys->spinlock);
+ list_add_tail(&rx_pkt->link, &sys->rcycl_list);
+ INIT_LIST_HEAD(&rx_pkt->link);
+ spin_unlock_bh(&sys->spinlock);
+fail_kmem_cache_alloc:
+ spin_unlock_bh(&sys->spinlock);
+ if (rx_len_cached == 0)
+ queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+ msecs_to_jiffies(1));
+}
+
+static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys)
+{
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ int ret;
+ int rx_len_cached = 0;
+ u32 curr;
+
+ rx_len_cached = sys->len;
+ curr = atomic_read(&sys->repl.head_idx);
+
+ while (rx_len_cached < sys->rx_pool_sz) {
+ if (curr == atomic_read(&sys->repl.tail_idx)) {
+ queue_work(sys->repl_wq, &sys->repl_work);
+ break;
+ }
+
+ rx_pkt = sys->repl.cache[curr];
+ list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+
+ ret = sps_transfer_one(sys->ep->ep_hdl,
+ rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
+
+ if (ret) {
+ IPAERR("sps_transfer_one failed %d\n", ret);
+ list_del(&rx_pkt->link);
+ break;
+ }
+ rx_len_cached = ++sys->len;
+ sys->repl_trig_cnt++;
+ curr = (curr + 1) % sys->repl.capacity;
+ /* ensure write is done before setting head index */
+ mb();
+ atomic_set(&sys->repl.head_idx, curr);
+ }
+
+ if (sys->repl_trig_cnt % sys->repl_trig_thresh == 0)
+ queue_work(sys->repl_wq, &sys->repl_work);
+
+ if (rx_len_cached <= sys->ep->rx_replenish_threshold) {
+ if (rx_len_cached == 0) {
+ if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
+ IPA_STATS_INC_CNT(ipa_ctx->stats.wan_rx_empty);
+ else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
+ IPA_STATS_INC_CNT(ipa_ctx->stats.lan_rx_empty);
+ else
+ WARN_ON(1);
+ }
+ sys->repl_trig_cnt = 0;
+ queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+ msecs_to_jiffies(1));
+ }
+}
+
+static void replenish_rx_work_func(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct ipa_sys_context *sys;
+
+ dwork = container_of(work, struct delayed_work, work);
+ sys = container_of(dwork, struct ipa_sys_context, replenish_rx_work);
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ sys->repl_hdlr(sys);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+/**
+ * ipa_cleanup_rx() - release RX queue resources
+ *
+ */
+static void ipa_cleanup_rx(struct ipa_sys_context *sys)
+{
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ struct ipa_rx_pkt_wrapper *r;
+ u32 head;
+ u32 tail;
+
+ list_for_each_entry_safe(rx_pkt, r,
+ &sys->head_desc_list, link) {
+ list_del(&rx_pkt->link);
+ dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
+ sys->rx_buff_sz, DMA_FROM_DEVICE);
+ sys->free_skb(rx_pkt->data.skb);
+ kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+ }
+
+ list_for_each_entry_safe(rx_pkt, r,
+ &sys->rcycl_list, link) {
+ list_del(&rx_pkt->link);
+ dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
+ sys->rx_buff_sz, DMA_FROM_DEVICE);
+ sys->free_skb(rx_pkt->data.skb);
+ kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+ }
+
+ if (sys->repl.cache) {
+ head = atomic_read(&sys->repl.head_idx);
+ tail = atomic_read(&sys->repl.tail_idx);
+ while (head != tail) {
+ rx_pkt = sys->repl.cache[head];
+ dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
+ sys->rx_buff_sz, DMA_FROM_DEVICE);
+ sys->free_skb(rx_pkt->data.skb);
+ kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+ head = (head + 1) % sys->repl.capacity;
+ }
+ kfree(sys->repl.cache);
+ }
+}
+
+static struct sk_buff *ipa_skb_copy_for_client(struct sk_buff *skb, int len)
+{
+ struct sk_buff *skb2 = NULL;
+
+ skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM, GFP_KERNEL);
+ if (likely(skb2)) {
+ /* Set the data pointer */
+ skb_reserve(skb2, IPA_RX_BUFF_CLIENT_HEADROOM);
+ memcpy(skb2->data, skb->data, len);
+ skb2->len = len;
+ skb_set_tail_pointer(skb2, len);
+ }
+
+ return skb2;
+}
+
+static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb,
+ struct ipa_sys_context *sys)
+{
+ int rc = 0;
+ struct ipa_hw_pkt_status *status;
+ struct sk_buff *skb2;
+ int pad_len_byte;
+ int len;
+ unsigned char *buf;
+ int src_pipe;
+ unsigned int used = *(unsigned int *)skb->cb;
+ unsigned int used_align = ALIGN(used, 32);
+ unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
+
+ IPA_DUMP_BUFF(skb->data, 0, skb->len);
+
+ if (skb->len == 0) {
+ IPAERR("ZLT\n");
+ sys->free_skb(skb);
+ return rc;
+ }
+
+ if (sys->len_partial) {
+ IPADBG("len_partial %d\n", sys->len_partial);
+ buf = skb_push(skb, sys->len_partial);
+ memcpy(buf, sys->prev_skb->data, sys->len_partial);
+ sys->len_partial = 0;
+ sys->free_skb(sys->prev_skb);
+ sys->prev_skb = NULL;
+ goto begin;
+ }
+
+ /* this pipe has TX comp (status only) + mux-ed LAN RX data
+ * (status+data)
+ */
+ if (sys->len_rem) {
+ IPADBG("rem %d skb %d pad %d\n", sys->len_rem, skb->len,
+ sys->len_pad);
+ if (sys->len_rem <= skb->len) {
+ if (sys->prev_skb) {
+ skb2 = skb_copy_expand(sys->prev_skb, 0,
+ sys->len_rem, GFP_KERNEL);
+ if (likely(skb2)) {
+ memcpy(skb_put(skb2, sys->len_rem),
+ skb->data, sys->len_rem);
+ skb_trim(skb2,
+ skb2->len - sys->len_pad);
+ skb2->truesize = skb2->len +
+ sizeof(struct sk_buff);
+ if (sys->drop_packet)
+ dev_kfree_skb_any(skb2);
+ else
+ sys->ep->client_notify(
+ sys->ep->priv,
+ IPA_RECEIVE,
+ (unsigned long)(skb2));
+ } else {
+ IPAERR("copy expand failed\n");
+ }
+ dev_kfree_skb_any(sys->prev_skb);
+ }
+ skb_pull(skb, sys->len_rem);
+ sys->prev_skb = NULL;
+ sys->len_rem = 0;
+ sys->len_pad = 0;
+ } else {
+ if (sys->prev_skb) {
+ skb2 = skb_copy_expand(sys->prev_skb, 0,
+ skb->len, GFP_KERNEL);
+ if (likely(skb2)) {
+ memcpy(skb_put(skb2, skb->len),
+ skb->data, skb->len);
+ } else {
+ IPAERR("copy expand failed\n");
+ }
+ dev_kfree_skb_any(sys->prev_skb);
+ sys->prev_skb = skb2;
+ }
+ sys->len_rem -= skb->len;
+ sys->free_skb(skb);
+ return rc;
+ }
+ }
+
+begin:
+ while (skb->len) {
+ sys->drop_packet = false;
+ IPADBG("LEN_REM %d\n", skb->len);
+
+ if (skb->len < IPA_PKT_STATUS_SIZE) {
+ WARN_ON(sys->prev_skb != NULL);
+ IPADBG("status straddles buffer\n");
+ sys->prev_skb = skb;
+ sys->len_partial = skb->len;
+ return rc;
+ }
+
+ status = (struct ipa_hw_pkt_status *)skb->data;
+ IPADBG("STATUS opcode=%d src=%d dst=%d len=%d\n",
+ status->status_opcode, status->endp_src_idx,
+ status->endp_dest_idx, status->pkt_len);
+ if (sys->status_stat) {
+ sys->status_stat->status[sys->status_stat->curr] =
+ *status;
+ sys->status_stat->curr++;
+ if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
+ sys->status_stat->curr = 0;
+ }
+
+ if (status->status_opcode !=
+ IPA_HW_STATUS_OPCODE_DROPPED_PACKET &&
+ status->status_opcode !=
+ IPA_HW_STATUS_OPCODE_PACKET &&
+ status->status_opcode !=
+ IPA_HW_STATUS_OPCODE_SUSPENDED_PACKET &&
+ status->status_opcode !=
+ IPA_HW_STATUS_OPCODE_XLAT_PACKET) {
+ IPAERR("unsupported opcode(%d)\n",
+ status->status_opcode);
+ skb_pull(skb, IPA_PKT_STATUS_SIZE);
+ continue;
+ }
+ IPA_STATS_EXCP_CNT(status->exception,
+ ipa_ctx->stats.rx_excp_pkts);
+ if (status->endp_dest_idx >= ipa_ctx->ipa_num_pipes ||
+ status->endp_src_idx >= ipa_ctx->ipa_num_pipes) {
+ IPAERR("status fields invalid\n");
+ IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n",
+ status->status_opcode, status->endp_src_idx,
+ status->endp_dest_idx, status->pkt_len);
+ WARN_ON(1);
+ BUG();
+ }
+ if (status->status_mask & IPA_HW_PKT_STATUS_MASK_TAG_VALID) {
+ struct ipa_tag_completion *comp;
+
+ IPADBG("TAG packet arrived\n");
+ if (status->tag_f_2 == IPA_COOKIE) {
+ skb_pull(skb, IPA_PKT_STATUS_SIZE);
+ if (skb->len < sizeof(comp)) {
+ IPAERR("TAG arrived without packet\n");
+ return rc;
+ }
+ memcpy(&comp, skb->data, sizeof(comp));
+ skb_pull(skb, sizeof(comp) +
+ IPA_SIZE_DL_CSUM_META_TRAILER);
+ complete(&comp->comp);
+ if (atomic_dec_return(&comp->cnt) == 0)
+ kfree(comp);
+ continue;
+ } else {
+ IPADBG("ignoring TAG with wrong cookie\n");
+ }
+ }
+ if (status->pkt_len == 0) {
+ IPADBG("Skip aggr close status\n");
+ skb_pull(skb, IPA_PKT_STATUS_SIZE);
+ IPA_STATS_INC_CNT(ipa_ctx->stats.aggr_close);
+ IPA_STATS_DEC_CNT(
+ ipa_ctx->stats.rx_excp_pkts[MAX_NUM_EXCP - 1]);
+ continue;
+ }
+ if (status->endp_dest_idx == (sys->ep - ipa_ctx->ep)) {
+ /* RX data */
+ src_pipe = status->endp_src_idx;
+
+ /*
+ * A packet which is received back to the AP after
+ * there was no route match.
+ */
+ if (!status->exception && !status->route_match)
+ sys->drop_packet = true;
+
+ if (skb->len == IPA_PKT_STATUS_SIZE &&
+ !status->exception) {
+ WARN_ON(sys->prev_skb != NULL);
+ IPADBG("Ins header in next buffer\n");
+ sys->prev_skb = skb;
+ sys->len_partial = skb->len;
+ return rc;
+ }
+
+ pad_len_byte = ((status->pkt_len + 3) & ~3) -
+ status->pkt_len;
+
+ len = status->pkt_len + pad_len_byte +
+ IPA_SIZE_DL_CSUM_META_TRAILER;
+ IPADBG("pad %d pkt_len %d len %d\n", pad_len_byte,
+ status->pkt_len, len);
+
+ if (status->exception ==
+ IPA_HW_PKT_STATUS_EXCEPTION_DEAGGR) {
+ IPADBG("Dropping packet on DeAggr Exception\n");
+ sys->drop_packet = true;
+ }
+
+ skb2 = ipa_skb_copy_for_client(skb,
+ status->pkt_len + IPA_PKT_STATUS_SIZE);
+ if (likely(skb2)) {
+ if (skb->len < len + IPA_PKT_STATUS_SIZE) {
+ IPADBG("SPL skb len %d len %d\n",
+ skb->len, len);
+ sys->prev_skb = skb2;
+ sys->len_rem = len - skb->len +
+ IPA_PKT_STATUS_SIZE;
+ sys->len_pad = pad_len_byte;
+ skb_pull(skb, skb->len);
+ } else {
+ skb_trim(skb2, status->pkt_len +
+ IPA_PKT_STATUS_SIZE);
+ IPADBG("rx avail for %d\n",
+ status->endp_dest_idx);
+ if (sys->drop_packet) {
+ dev_kfree_skb_any(skb2);
+ } else if (status->pkt_len >
+ IPA_GENERIC_AGGR_BYTE_LIMIT *
+ 1024) {
+ IPAERR("packet size invalid\n");
+ IPAERR("STATUS opcode=%d\n",
+ status->status_opcode);
+ IPAERR("src=%d dst=%d len=%d\n",
+ status->endp_src_idx,
+ status->endp_dest_idx,
+ status->pkt_len);
+ BUG();
+ } else {
+ skb2->truesize = skb2->len +
+ sizeof(struct sk_buff) +
+ (ALIGN(len +
+ IPA_PKT_STATUS_SIZE, 32) *
+ unused / used_align);
+ sys->ep->client_notify(
+ sys->ep->priv,
+ IPA_RECEIVE,
+ (unsigned long)(skb2));
+ }
+ skb_pull(skb, len +
+ IPA_PKT_STATUS_SIZE);
+ }
+ } else {
+ IPAERR("fail to alloc skb\n");
+ if (skb->len < len) {
+ sys->prev_skb = NULL;
+ sys->len_rem = len - skb->len +
+ IPA_PKT_STATUS_SIZE;
+ sys->len_pad = pad_len_byte;
+ skb_pull(skb, skb->len);
+ } else {
+ skb_pull(skb, len +
+ IPA_PKT_STATUS_SIZE);
+ }
+ }
+ /* TX comp */
+ ipa_wq_write_done_status(src_pipe);
+ IPADBG("tx comp imp for %d\n", src_pipe);
+ } else {
+ /* TX comp */
+ ipa_wq_write_done_status(status->endp_src_idx);
+ IPADBG("tx comp exp for %d\n", status->endp_src_idx);
+ skb_pull(skb, IPA_PKT_STATUS_SIZE);
+ IPA_STATS_INC_CNT(ipa_ctx->stats.stat_compl);
+ IPA_STATS_DEC_CNT(
+ ipa_ctx->stats.rx_excp_pkts[MAX_NUM_EXCP - 1]);
+ }
+ };
+
+ sys->free_skb(skb);
+ return rc;
+}
+
+static struct sk_buff *join_prev_skb(struct sk_buff *prev_skb,
+ struct sk_buff *skb, unsigned int len)
+{
+ struct sk_buff *skb2;
+
+ skb2 = skb_copy_expand(prev_skb, 0,
+ len, GFP_KERNEL);
+ if (likely(skb2)) {
+ memcpy(skb_put(skb2, len),
+ skb->data, len);
+ } else {
+ IPAERR("copy expand failed\n");
+ skb2 = NULL;
+ }
+ dev_kfree_skb_any(prev_skb);
+
+ return skb2;
+}
+
+static void wan_rx_handle_splt_pyld(struct sk_buff *skb,
+ struct ipa_sys_context *sys)
+{
+ struct sk_buff *skb2;
+
+ IPADBG("rem %d skb %d\n", sys->len_rem, skb->len);
+ if (sys->len_rem <= skb->len) {
+ if (sys->prev_skb) {
+ skb2 = join_prev_skb(sys->prev_skb, skb,
+ sys->len_rem);
+ if (likely(skb2)) {
+ IPADBG(
+ "removing Status element from skb and sending to WAN client");
+ skb_pull(skb2, IPA_PKT_STATUS_SIZE);
+ skb2->truesize = skb2->len +
+ sizeof(struct sk_buff);
+ sys->ep->client_notify(sys->ep->priv,
+ IPA_RECEIVE,
+ (unsigned long)(skb2));
+ }
+ }
+ skb_pull(skb, sys->len_rem);
+ sys->prev_skb = NULL;
+ sys->len_rem = 0;
+ } else {
+ if (sys->prev_skb) {
+ skb2 = join_prev_skb(sys->prev_skb, skb,
+ skb->len);
+ sys->prev_skb = skb2;
+ }
+ sys->len_rem -= skb->len;
+ skb_pull(skb, skb->len);
+ }
+}
+
+static int ipa_wan_rx_pyld_hdlr(struct sk_buff *skb,
+ struct ipa_sys_context *sys)
+{
+ int rc = 0;
+ struct ipa_hw_pkt_status *status;
+ struct sk_buff *skb2;
+ u16 pkt_len_with_pad;
+ u32 qmap_hdr;
+ int checksum_trailer_exists;
+ int frame_len;
+ int ep_idx;
+ unsigned int used = *(unsigned int *)skb->cb;
+ unsigned int used_align = ALIGN(used, 32);
+ unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
+
+ IPA_DUMP_BUFF(skb->data, 0, skb->len);
+ if (skb->len == 0) {
+ IPAERR("ZLT\n");
+ goto bail;
+ }
+
+ if (ipa_ctx->ipa_client_apps_wan_cons_agg_gro) {
+ sys->ep->client_notify(sys->ep->priv,
+ IPA_RECEIVE, (unsigned long)(skb));
+ return rc;
+ }
+ if (sys->repl_hdlr == ipa_replenish_rx_cache_recycle) {
+ IPAERR("Recycle should enable only with GRO Aggr\n");
+ ipa_assert();
+ }
+ /*
+ * payload splits across 2 buff or more,
+ * take the start of the payload from prev_skb
+ */
+ if (sys->len_rem)
+ wan_rx_handle_splt_pyld(skb, sys);
+
+
+ while (skb->len) {
+ IPADBG("LEN_REM %d\n", skb->len);
+ if (skb->len < IPA_PKT_STATUS_SIZE) {
+ IPAERR("status straddles buffer\n");
+ WARN_ON(1);
+ goto bail;
+ }
+ status = (struct ipa_hw_pkt_status *)skb->data;
+ IPADBG("STATUS opcode=%d src=%d dst=%d len=%d\n",
+ status->status_opcode, status->endp_src_idx,
+ status->endp_dest_idx, status->pkt_len);
+
+ if (sys->status_stat) {
+ sys->status_stat->status[sys->status_stat->curr] =
+ *status;
+ sys->status_stat->curr++;
+ if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
+ sys->status_stat->curr = 0;
+ }
+
+ if (status->status_opcode !=
+ IPA_HW_STATUS_OPCODE_DROPPED_PACKET &&
+ status->status_opcode !=
+ IPA_HW_STATUS_OPCODE_PACKET &&
+ status->status_opcode !=
+ IPA_HW_STATUS_OPCODE_XLAT_PACKET) {
+ IPAERR("unsupported opcode\n");
+ skb_pull(skb, IPA_PKT_STATUS_SIZE);
+ continue;
+ }
+ IPA_STATS_INC_CNT(ipa_ctx->stats.rx_pkts);
+ if (status->endp_dest_idx >= ipa_ctx->ipa_num_pipes ||
+ status->endp_src_idx >= ipa_ctx->ipa_num_pipes ||
+ status->pkt_len > IPA_GENERIC_AGGR_BYTE_LIMIT * 1024) {
+ IPAERR("status fields invalid\n");
+ WARN_ON(1);
+ goto bail;
+ }
+ if (status->pkt_len == 0) {
+ IPADBG("Skip aggr close status\n");
+ skb_pull(skb, IPA_PKT_STATUS_SIZE);
+ IPA_STATS_DEC_CNT(ipa_ctx->stats.rx_pkts);
+ IPA_STATS_INC_CNT(ipa_ctx->stats.wan_aggr_close);
+ continue;
+ }
+ ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+ if (status->endp_dest_idx != ep_idx) {
+ IPAERR("expected endp_dest_idx %d received %d\n",
+ ep_idx, status->endp_dest_idx);
+ WARN_ON(1);
+ goto bail;
+ }
+ /* RX data */
+ if (skb->len == IPA_PKT_STATUS_SIZE) {
+ IPAERR("Ins header in next buffer\n");
+ WARN_ON(1);
+ goto bail;
+ }
+ qmap_hdr = *(u32 *)(status+1);
+ /*
+ * Take the pkt_len_with_pad from the last 2 bytes of the QMAP
+ * header
+ */
+
+ /*QMAP is BE: convert the pkt_len field from BE to LE*/
+ pkt_len_with_pad = ntohs((qmap_hdr>>16) & 0xffff);
+ IPADBG("pkt_len with pad %d\n", pkt_len_with_pad);
+ /*get the CHECKSUM_PROCESS bit*/
+ checksum_trailer_exists = status->status_mask &
+ IPA_HW_PKT_STATUS_MASK_CKSUM_PROCESS;
+ IPADBG("checksum_trailer_exists %d\n",
+ checksum_trailer_exists);
+
+ frame_len = IPA_PKT_STATUS_SIZE +
+ IPA_QMAP_HEADER_LENGTH +
+ pkt_len_with_pad;
+ if (checksum_trailer_exists)
+ frame_len += IPA_DL_CHECKSUM_LENGTH;
+ IPADBG("frame_len %d\n", frame_len);
+
+ skb2 = skb_clone(skb, GFP_KERNEL);
+ if (likely(skb2)) {
+ /*
+ * the len of actual data is smaller than expected
+ * payload split across 2 buff
+ */
+ if (skb->len < frame_len) {
+ IPADBG("SPL skb len %d len %d\n",
+ skb->len, frame_len);
+ sys->prev_skb = skb2;
+ sys->len_rem = frame_len - skb->len;
+ skb_pull(skb, skb->len);
+ } else {
+ skb_trim(skb2, frame_len);
+ IPADBG("rx avail for %d\n",
+ status->endp_dest_idx);
+ IPADBG(
+ "removing Status element from skb and sending to WAN client");
+ skb_pull(skb2, IPA_PKT_STATUS_SIZE);
+ skb2->truesize = skb2->len +
+ sizeof(struct sk_buff) +
+ (ALIGN(frame_len, 32) *
+ unused / used_align);
+ sys->ep->client_notify(sys->ep->priv,
+ IPA_RECEIVE, (unsigned long)(skb2));
+ skb_pull(skb, frame_len);
+ }
+ } else {
+ IPAERR("fail to clone\n");
+ if (skb->len < frame_len) {
+ sys->prev_skb = NULL;
+ sys->len_rem = frame_len - skb->len;
+ skb_pull(skb, skb->len);
+ } else {
+ skb_pull(skb, frame_len);
+ }
+ }
+ };
+bail:
+ sys->free_skb(skb);
+ return rc;
+}
+
+static int ipa_rx_pyld_hdlr(struct sk_buff *rx_skb, struct ipa_sys_context *sys)
+{
+ struct ipa_a5_mux_hdr *mux_hdr;
+ unsigned int pull_len;
+ unsigned int padding;
+ struct ipa_ep_context *ep;
+ unsigned int src_pipe;
+
+ mux_hdr = (struct ipa_a5_mux_hdr *)rx_skb->data;
+
+ src_pipe = mux_hdr->src_pipe_index;
+
+ IPADBG("RX pkt len=%d IID=0x%x src=%d, flags=0x%x, meta=0x%x\n",
+ rx_skb->len, ntohs(mux_hdr->interface_id),
+ src_pipe, mux_hdr->flags, ntohl(mux_hdr->metadata));
+
+ IPA_DUMP_BUFF(rx_skb->data, 0, rx_skb->len);
+
+ IPA_STATS_INC_CNT(ipa_ctx->stats.rx_pkts);
+ IPA_STATS_EXCP_CNT(mux_hdr->flags, ipa_ctx->stats.rx_excp_pkts);
+
+ /*
+ * Any packets arriving over AMPDU_TX should be dispatched
+ * to the regular WLAN RX data-path.
+ */
+ if (unlikely(src_pipe == WLAN_AMPDU_TX_EP))
+ src_pipe = WLAN_PROD_TX_EP;
+
+ ep = &ipa_ctx->ep[src_pipe];
+ spin_lock(&ipa_ctx->disconnect_lock);
+ if (unlikely(src_pipe >= ipa_ctx->ipa_num_pipes ||
+ !ep->valid || !ep->client_notify)) {
+ IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
+ src_pipe, ep->valid, ep->client_notify);
+ dev_kfree_skb_any(rx_skb);
+ spin_unlock(&ipa_ctx->disconnect_lock);
+ return 0;
+ }
+
+ pull_len = sizeof(struct ipa_a5_mux_hdr);
+
+ /*
+ * IP packet starts on word boundary
+ * remove the MUX header and any padding and pass the frame to
+ * the client which registered a rx callback on the "src pipe"
+ */
+ padding = ep->cfg.hdr.hdr_len & 0x3;
+ if (padding)
+ pull_len += 4 - padding;
+
+ IPADBG("pulling %d bytes from skb\n", pull_len);
+ skb_pull(rx_skb, pull_len);
+ ep->client_notify(ep->priv, IPA_RECEIVE,
+ (unsigned long)(rx_skb));
+ spin_unlock(&ipa_ctx->disconnect_lock);
+ return 0;
+}
+
+static struct sk_buff *ipa_get_skb_ipa_rx(unsigned int len, gfp_t flags)
+{
+ return __dev_alloc_skb(len, flags);
+}
+
+static struct sk_buff *ipa_get_skb_ipa_rx_headroom(unsigned int len,
+ gfp_t flags)
+{
+ struct sk_buff *skb;
+
+ skb = __dev_alloc_skb(len + IPA_HEADROOM, flags);
+ if (skb)
+ skb_reserve(skb, IPA_HEADROOM);
+
+ return skb;
+}
+
+static void ipa_free_skb_rx(struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+}
+
+void ipa_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
+{
+ struct sk_buff *rx_skb = (struct sk_buff *)data;
+ struct ipa_hw_pkt_status *status;
+ struct ipa_ep_context *ep;
+ unsigned int src_pipe;
+ u32 metadata;
+
+ status = (struct ipa_hw_pkt_status *)rx_skb->data;
+ src_pipe = status->endp_src_idx;
+ metadata = status->metadata;
+ ep = &ipa_ctx->ep[src_pipe];
+ if (unlikely(src_pipe >= ipa_ctx->ipa_num_pipes ||
+ !ep->valid ||
+ !ep->client_notify)) {
+ IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
+ src_pipe, ep->valid, ep->client_notify);
+ dev_kfree_skb_any(rx_skb);
+ return;
+ }
+ if (!status->exception)
+ skb_pull(rx_skb, IPA_PKT_STATUS_SIZE +
+ IPA_LAN_RX_HEADER_LENGTH);
+ else
+ skb_pull(rx_skb, IPA_PKT_STATUS_SIZE);
+
+ /*
+ * Metadata Info
+ * ------------------------------------------
+ * | 3 | 2 | 1 | 0 |
+ * | fw_desc | vdev_id | qmap mux id | Resv |
+ * ------------------------------------------
+ */
+ *(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF);
+ IPADBG("meta_data: 0x%x cb: 0x%x\n",
+ metadata, *(u32 *)rx_skb->cb);
+
+ ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
+}
+
+void ipa2_recycle_wan_skb(struct sk_buff *skb)
+{
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ int ep_idx = ipa2_get_ep_mapping(
+ IPA_CLIENT_APPS_WAN_CONS);
+ gfp_t flag = GFP_NOWAIT | __GFP_NOWARN |
+ (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
+
+ if (unlikely(ep_idx == -1)) {
+ IPAERR("dest EP does not exist\n");
+ ipa_assert();
+ }
+
+ rx_pkt = kmem_cache_zalloc(
+ ipa_ctx->rx_pkt_wrapper_cache, flag);
+ if (!rx_pkt)
+ ipa_assert();
+
+ INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
+ rx_pkt->sys = ipa_ctx->ep[ep_idx].sys;
+
+ rx_pkt->data.skb = skb;
+ rx_pkt->data.dma_addr = 0;
+ ipa_skb_recycle(rx_pkt->data.skb);
+ skb_reserve(rx_pkt->data.skb, IPA_HEADROOM);
+ INIT_LIST_HEAD(&rx_pkt->link);
+ spin_lock_bh(&rx_pkt->sys->spinlock);
+ list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list);
+ spin_unlock_bh(&rx_pkt->sys->spinlock);
+}
+
+static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size)
+{
+ struct ipa_rx_pkt_wrapper *rx_pkt_expected;
+ struct sk_buff *rx_skb;
+
+ if (unlikely(list_empty(&sys->head_desc_list))) {
+ WARN_ON(1);
+ return;
+ }
+ rx_pkt_expected = list_first_entry(&sys->head_desc_list,
+ struct ipa_rx_pkt_wrapper,
+ link);
+ list_del(&rx_pkt_expected->link);
+ sys->len--;
+ if (size)
+ rx_pkt_expected->len = size;
+ rx_skb = rx_pkt_expected->data.skb;
+ dma_unmap_single(ipa_ctx->pdev, rx_pkt_expected->data.dma_addr,
+ sys->rx_buff_sz, DMA_FROM_DEVICE);
+ skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
+ rx_skb->len = rx_pkt_expected->len;
+ *(unsigned int *)rx_skb->cb = rx_skb->len;
+ rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
+ sys->pyld_hdlr(rx_skb, sys);
+ sys->repl_hdlr(sys);
+ kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt_expected);
+
+}
+
+static void ipa_wlan_wq_rx_common(struct ipa_sys_context *sys, u32 size)
+{
+ struct ipa_rx_pkt_wrapper *rx_pkt_expected;
+ struct sk_buff *rx_skb;
+
+ if (unlikely(list_empty(&sys->head_desc_list))) {
+ WARN_ON(1);
+ return;
+ }
+ rx_pkt_expected = list_first_entry(&sys->head_desc_list,
+ struct ipa_rx_pkt_wrapper,
+ link);
+ list_del(&rx_pkt_expected->link);
+ sys->len--;
+
+ if (size)
+ rx_pkt_expected->len = size;
+
+ rx_skb = rx_pkt_expected->data.skb;
+ skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
+ rx_skb->len = rx_pkt_expected->len;
+ rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
+ sys->ep->wstats.tx_pkts_rcvd++;
+ if (sys->len <= IPA_WLAN_RX_POOL_SZ_LOW_WM) {
+ ipa2_free_skb(&rx_pkt_expected->data);
+ sys->ep->wstats.tx_pkts_dropped++;
+ } else {
+ sys->ep->wstats.tx_pkts_sent++;
+ sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
+ (unsigned long)(&rx_pkt_expected->data));
+ }
+ ipa_replenish_wlan_rx_cache(sys);
+}
+
+static void ipa_dma_memcpy_notify(struct ipa_sys_context *sys,
+ struct sps_iovec *iovec)
+{
+ IPADBG("ENTER.\n");
+ if (unlikely(list_empty(&sys->head_desc_list))) {
+ IPAERR("descriptor list is empty!\n");
+ WARN_ON(1);
+ return;
+ }
+ if (!(iovec->flags & SPS_IOVEC_FLAG_EOT)) {
+ IPAERR("received unexpected event. sps flag is 0x%x\n"
+ , iovec->flags);
+ WARN_ON(1);
+ return;
+ }
+ sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
+ (unsigned long)(iovec));
+ IPADBG("EXIT\n");
+}
+
+static void ipa_wq_rx_avail(struct work_struct *work)
+{
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ struct ipa_sys_context *sys;
+
+ rx_pkt = container_of(work, struct ipa_rx_pkt_wrapper, work);
+ if (unlikely(rx_pkt == NULL))
+ WARN_ON(1);
+ sys = rx_pkt->sys;
+ ipa_wq_rx_common(sys, 0);
+}
+
+/**
+ * ipa_sps_irq_rx_no_aggr_notify() - Callback function which will be called by
+ * the SPS driver after a Rx operation is complete.
+ * Called in an interrupt context.
+ * @notify: SPS driver supplied notification struct
+ *
+ * This function defer the work for this event to a workqueue.
+ */
+void ipa_sps_irq_rx_no_aggr_notify(struct sps_event_notify *notify)
+{
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+
+ switch (notify->event_id) {
+ case SPS_EVENT_EOT:
+ rx_pkt = notify->data.transfer.user;
+ if (IPA_CLIENT_IS_APPS_CONS(rx_pkt->sys->ep->client))
+ atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
+ rx_pkt->len = notify->data.transfer.iovec.size;
+ IPADBG("event %d notified sys=%p len=%u\n", notify->event_id,
+ notify->user, rx_pkt->len);
+ queue_work(rx_pkt->sys->wq, &rx_pkt->work);
+ break;
+ default:
+ IPAERR("received unexpected event id %d sys=%p\n",
+ notify->event_id, notify->user);
+ }
+}
+
+static int ipa_odu_rx_pyld_hdlr(struct sk_buff *rx_skb,
+ struct ipa_sys_context *sys)
+{
+ if (sys->ep->client_notify) {
+ sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
+ (unsigned long)(rx_skb));
+ } else {
+ dev_kfree_skb_any(rx_skb);
+ WARN_ON(1);
+ }
+
+ return 0;
+}
+
+static int ipa_assign_policy_v2(struct ipa_sys_connect_params *in,
+ struct ipa_sys_context *sys)
+{
+ unsigned long int aggr_byte_limit;
+
+ sys->ep->status.status_en = true;
+ sys->ep->wakelock_client = IPA_WAKELOCK_REF_CLIENT_MAX;
+ if (IPA_CLIENT_IS_PROD(in->client)) {
+ if (!sys->ep->skip_ep_cfg) {
+ sys->policy = IPA_POLICY_NOINTR_MODE;
+ sys->sps_option = SPS_O_AUTO_ENABLE;
+ sys->sps_callback = NULL;
+ sys->ep->status.status_ep = ipa2_get_ep_mapping(
+ IPA_CLIENT_APPS_LAN_CONS);
+ if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client))
+ sys->ep->status.status_en = false;
+ } else {
+ sys->policy = IPA_POLICY_INTR_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE |
+ SPS_O_EOT);
+ sys->sps_callback =
+ ipa_sps_irq_tx_no_aggr_notify;
+ }
+ return 0;
+ }
+
+ aggr_byte_limit =
+ (unsigned long int)IPA_GENERIC_RX_BUFF_SZ(
+ ipa_adjust_ra_buff_base_sz(
+ in->ipa_ep_cfg.aggr.aggr_byte_limit));
+
+ if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
+ in->client == IPA_CLIENT_APPS_WAN_CONS) {
+ sys->policy = IPA_POLICY_INTR_POLL_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+ | SPS_O_ACK_TRANSFERS);
+ sys->sps_callback = ipa_sps_irq_rx_notify;
+ INIT_WORK(&sys->work, ipa_wq_handle_rx);
+ INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+ switch_to_intr_rx_work_func);
+ INIT_DELAYED_WORK(&sys->replenish_rx_work,
+ replenish_rx_work_func);
+ INIT_WORK(&sys->repl_work, ipa_wq_repl_rx);
+ atomic_set(&sys->curr_polling_state, 0);
+ sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
+ IPA_GENERIC_RX_BUFF_BASE_SZ) -
+ IPA_HEADROOM;
+ sys->get_skb = ipa_get_skb_ipa_rx_headroom;
+ sys->free_skb = ipa_free_skb_rx;
+ in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
+ in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
+ in->ipa_ep_cfg.aggr.aggr_time_limit =
+ IPA_GENERIC_AGGR_TIME_LIMIT;
+ if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
+ sys->pyld_hdlr = ipa_lan_rx_pyld_hdlr;
+ if (nr_cpu_ids > 1) {
+ sys->repl_hdlr =
+ ipa_fast_replenish_rx_cache;
+ sys->repl_trig_thresh =
+ sys->rx_pool_sz / 8;
+ } else {
+ sys->repl_hdlr =
+ ipa_replenish_rx_cache;
+ }
+ sys->rx_pool_sz =
+ ipa_ctx->lan_rx_ring_size;
+ in->ipa_ep_cfg.aggr.aggr_byte_limit =
+ IPA_GENERIC_AGGR_BYTE_LIMIT;
+ in->ipa_ep_cfg.aggr.aggr_pkt_limit =
+ IPA_GENERIC_AGGR_PKT_LIMIT;
+ sys->ep->wakelock_client =
+ IPA_WAKELOCK_REF_CLIENT_LAN_RX;
+ } else if (in->client ==
+ IPA_CLIENT_APPS_WAN_CONS) {
+ sys->pyld_hdlr = ipa_wan_rx_pyld_hdlr;
+ if (in->napi_enabled) {
+ sys->repl_hdlr =
+ ipa_replenish_rx_cache_recycle;
+ sys->rx_pool_sz =
+ IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+ } else {
+ if (nr_cpu_ids > 1) {
+ sys->repl_hdlr =
+ ipa_fast_replenish_rx_cache;
+ sys->repl_trig_thresh =
+ sys->rx_pool_sz / 8;
+ } else {
+ sys->repl_hdlr =
+ ipa_replenish_rx_cache;
+ }
+ sys->rx_pool_sz =
+ ipa_ctx->wan_rx_ring_size;
+ }
+ sys->ep->wakelock_client =
+ IPA_WAKELOCK_REF_CLIENT_WAN_RX;
+ in->ipa_ep_cfg.aggr.aggr_sw_eof_active
+ = true;
+ if (ipa_ctx->ipa_client_apps_wan_cons_agg_gro) {
+ IPAERR("get close-by %u\n",
+ ipa_adjust_ra_buff_base_sz(
+ in->ipa_ep_cfg.aggr.
+ aggr_byte_limit));
+ IPAERR("set rx_buff_sz %lu\n", aggr_byte_limit);
+ /* disable ipa_status */
+ sys->ep->status.
+ status_en = false;
+ sys->rx_buff_sz =
+ IPA_GENERIC_RX_BUFF_SZ(
+ ipa_adjust_ra_buff_base_sz(
+ in->ipa_ep_cfg.aggr.
+ aggr_byte_limit - IPA_HEADROOM));
+ in->ipa_ep_cfg.aggr.
+ aggr_byte_limit =
+ sys->rx_buff_sz < in->
+ ipa_ep_cfg.aggr.aggr_byte_limit ?
+ IPA_ADJUST_AGGR_BYTE_LIMIT(
+ sys->rx_buff_sz) :
+ IPA_ADJUST_AGGR_BYTE_LIMIT(
+ in->ipa_ep_cfg.
+ aggr.aggr_byte_limit);
+ IPAERR("set aggr_limit %lu\n",
+ (unsigned long int)
+ in->ipa_ep_cfg.aggr.
+ aggr_byte_limit);
+ } else {
+ in->ipa_ep_cfg.aggr.
+ aggr_byte_limit =
+ IPA_GENERIC_AGGR_BYTE_LIMIT;
+ in->ipa_ep_cfg.aggr.
+ aggr_pkt_limit =
+ IPA_GENERIC_AGGR_PKT_LIMIT;
+ }
+ }
+ } else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) {
+ IPADBG("assigning policy to client:%d",
+ in->client);
+
+ sys->ep->status.status_en = false;
+ sys->policy = IPA_POLICY_INTR_POLL_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+ | SPS_O_ACK_TRANSFERS);
+ sys->sps_callback = ipa_sps_irq_rx_notify;
+ INIT_WORK(&sys->work, ipa_wq_handle_rx);
+ INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+ switch_to_intr_rx_work_func);
+ INIT_DELAYED_WORK(&sys->replenish_rx_work,
+ replenish_rx_work_func);
+ atomic_set(&sys->curr_polling_state, 0);
+ sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ;
+ sys->rx_pool_sz = in->desc_fifo_sz /
+ sizeof(struct sps_iovec) - 1;
+ if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
+ sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
+ sys->pyld_hdlr = NULL;
+ sys->repl_hdlr = ipa_replenish_wlan_rx_cache;
+ sys->get_skb = ipa_get_skb_ipa_rx;
+ sys->free_skb = ipa_free_skb_rx;
+ in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+ sys->ep->wakelock_client =
+ IPA_WAKELOCK_REF_CLIENT_WLAN_RX;
+ } else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
+ IPADBG("assigning policy to client:%d",
+ in->client);
+
+ sys->ep->status.status_en = false;
+ sys->policy = IPA_POLICY_INTR_POLL_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+ | SPS_O_ACK_TRANSFERS);
+ sys->sps_callback = ipa_sps_irq_rx_notify;
+ INIT_WORK(&sys->work, ipa_wq_handle_rx);
+ INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+ switch_to_intr_rx_work_func);
+ INIT_DELAYED_WORK(&sys->replenish_rx_work,
+ replenish_rx_work_func);
+ atomic_set(&sys->curr_polling_state, 0);
+ sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
+ sys->rx_pool_sz = in->desc_fifo_sz /
+ sizeof(struct sps_iovec) - 1;
+ if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ)
+ sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ;
+ sys->pyld_hdlr = ipa_odu_rx_pyld_hdlr;
+ sys->get_skb = ipa_get_skb_ipa_rx;
+ sys->free_skb = ipa_free_skb_rx;
+ sys->repl_hdlr = ipa_replenish_rx_cache;
+ sys->ep->wakelock_client =
+ IPA_WAKELOCK_REF_CLIENT_ODU_RX;
+ } else if (in->client ==
+ IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
+ IPADBG("assigning policy to client:%d",
+ in->client);
+ sys->ep->status.status_en = false;
+ sys->policy = IPA_POLICY_INTR_POLL_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+ | SPS_O_ACK_TRANSFERS);
+ sys->sps_callback = ipa_sps_irq_rx_notify;
+ INIT_WORK(&sys->work, ipa_wq_handle_rx);
+ INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+ switch_to_intr_rx_work_func);
+ } else if (in->client ==
+ IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
+ IPADBG("assigning policy to client:%d",
+ in->client);
+ sys->ep->status.status_en = false;
+ sys->policy = IPA_POLICY_NOINTR_MODE;
+ sys->sps_option = SPS_O_AUTO_ENABLE |
+ SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ } else {
+ IPAERR("Need to install a RX pipe hdlr\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ipa_assign_policy(struct ipa_sys_connect_params *in,
+ struct ipa_sys_context *sys)
+{
+ if (in->client == IPA_CLIENT_APPS_CMD_PROD) {
+ sys->policy = IPA_POLICY_INTR_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT);
+ sys->sps_callback = ipa_sps_irq_tx_no_aggr_notify;
+ return 0;
+ }
+
+ if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) {
+ if (in->client == IPA_CLIENT_APPS_LAN_WAN_PROD) {
+ sys->policy = IPA_POLICY_INTR_POLL_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT |
+ SPS_O_ACK_TRANSFERS);
+ sys->sps_callback = ipa_sps_irq_tx_notify;
+ INIT_WORK(&sys->work, ipa_wq_handle_tx);
+ INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+ switch_to_intr_tx_work_func);
+ atomic_set(&sys->curr_polling_state, 0);
+ } else if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
+ sys->policy = IPA_POLICY_INTR_POLL_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT |
+ SPS_O_ACK_TRANSFERS);
+ sys->sps_callback = ipa_sps_irq_rx_notify;
+ INIT_WORK(&sys->work, ipa_wq_handle_rx);
+ INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+ switch_to_intr_rx_work_func);
+ INIT_DELAYED_WORK(&sys->replenish_rx_work,
+ replenish_rx_work_func);
+ atomic_set(&sys->curr_polling_state, 0);
+ sys->rx_buff_sz = IPA_RX_SKB_SIZE;
+ sys->rx_pool_sz = IPA_RX_POOL_CEIL;
+ sys->pyld_hdlr = ipa_rx_pyld_hdlr;
+ sys->get_skb = ipa_get_skb_ipa_rx;
+ sys->free_skb = ipa_free_skb_rx;
+ sys->repl_hdlr = ipa_replenish_rx_cache;
+ } else if (IPA_CLIENT_IS_PROD(in->client)) {
+ sys->policy = IPA_POLICY_INTR_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT);
+ sys->sps_callback = ipa_sps_irq_tx_no_aggr_notify;
+ } else {
+ IPAERR("Need to install a RX pipe hdlr\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return 0;
+ } else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0)
+ return ipa_assign_policy_v2(in, sys);
+
+ IPAERR("Unsupported HW type %d\n", ipa_ctx->ipa_hw_type);
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+/**
+ * ipa_tx_client_rx_notify_release() - Callback function
+ * which will call the user supplied callback function to
+ * release the skb, or release it on its own if no callback
+ * function was supplied
+ *
+ * @user1: [in] - Data Descriptor
+ * @user2: [in] - endpoint idx
+ *
+ * This notified callback is for the destination client
+ * This function is supplied in ipa_tx_dp_mul
+ */
+static void ipa_tx_client_rx_notify_release(void *user1, int user2)
+{
+ struct ipa_tx_data_desc *dd = (struct ipa_tx_data_desc *)user1;
+ int ep_idx = user2;
+
+ IPADBG("Received data desc anchor:%p\n", dd);
+
+ atomic_inc(&ipa_ctx->ep[ep_idx].avail_fifo_desc);
+ ipa_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
+
+ /* wlan host driver waits till tx complete before unload */
+ IPADBG("ep=%d fifo_desc_free_count=%d\n",
+ ep_idx, atomic_read(&ipa_ctx->ep[ep_idx].avail_fifo_desc));
+ IPADBG("calling client notify callback with priv:%p\n",
+ ipa_ctx->ep[ep_idx].priv);
+
+ if (ipa_ctx->ep[ep_idx].client_notify) {
+ ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv,
+ IPA_WRITE_DONE, (unsigned long)user1);
+ ipa_ctx->ep[ep_idx].wstats.rx_hd_reply++;
+ }
+}
+/**
+ * ipa_tx_client_rx_pkt_status() - Callback function
+ * which will call the user supplied callback function to
+ * increase the available fifo descriptor
+ *
+ * @user1: [in] - Data Descriptor
+ * @user2: [in] - endpoint idx
+ *
+ * This notified callback is for the destination client
+ * This function is supplied in ipa_tx_dp_mul
+ */
+static void ipa_tx_client_rx_pkt_status(void *user1, int user2)
+{
+ int ep_idx = user2;
+
+ atomic_inc(&ipa_ctx->ep[ep_idx].avail_fifo_desc);
+ ipa_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
+}
+
+
+/**
+ * ipa2_tx_dp_mul() - Data-path tx handler for multiple packets
+ * @src: [in] - Client that is sending data
+ * @ipa_tx_data_desc: [in] data descriptors from wlan
+ *
+ * this is used for to transfer data descriptors that received
+ * from WLAN1_PROD pipe to IPA HW
+ *
+ * The function will send data descriptors from WLAN1_PROD (one
+ * at a time) using sps_transfer_one. Will set EOT flag for last
+ * descriptor Once this send was done from SPS point-of-view the
+ * IPA driver will get notified by the supplied callback -
+ * ipa_sps_irq_tx_no_aggr_notify()
+ *
+ * ipa_sps_irq_tx_no_aggr_notify will call to the user supplied
+ * callback (from ipa_connect)
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa2_tx_dp_mul(enum ipa_client_type src,
+ struct ipa_tx_data_desc *data_desc)
+{
+ /* The second byte in wlan header holds qmap id */
+#define IPA_WLAN_HDR_QMAP_ID_OFFSET 1
+ struct ipa_tx_data_desc *entry;
+ struct ipa_sys_context *sys;
+ struct ipa_desc desc = { 0 };
+ u32 num_desc, cnt;
+ int ep_idx;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ IPADBG("Received data desc anchor:%p\n", data_desc);
+
+ spin_lock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock);
+
+ ep_idx = ipa2_get_ep_mapping(src);
+ if (unlikely(ep_idx == -1)) {
+ IPAERR("dest EP does not exist.\n");
+ goto fail_send;
+ }
+ IPADBG("ep idx:%d\n", ep_idx);
+ sys = ipa_ctx->ep[ep_idx].sys;
+
+ if (unlikely(ipa_ctx->ep[ep_idx].valid == 0)) {
+ IPAERR("dest EP not valid.\n");
+ goto fail_send;
+ }
+ sys->ep->wstats.rx_hd_rcvd++;
+
+ /* Calculate the number of descriptors */
+ num_desc = 0;
+ list_for_each_entry(entry, &data_desc->link, link) {
+ num_desc++;
+ }
+ IPADBG("Number of Data Descriptors:%d", num_desc);
+
+ if (atomic_read(&sys->ep->avail_fifo_desc) < num_desc) {
+ IPAERR("Insufficient data descriptors available\n");
+ goto fail_send;
+ }
+
+ /* Assign callback only for last data descriptor */
+ cnt = 0;
+ list_for_each_entry(entry, &data_desc->link, link) {
+ IPADBG("Parsing data desc :%d\n", cnt);
+ cnt++;
+ ((u8 *)entry->pyld_buffer)[IPA_WLAN_HDR_QMAP_ID_OFFSET] =
+ (u8)sys->ep->cfg.meta.qmap_id;
+ desc.pyld = entry->pyld_buffer;
+ desc.len = entry->pyld_len;
+ desc.type = IPA_DATA_DESC_SKB;
+ desc.user1 = data_desc;
+ desc.user2 = ep_idx;
+ IPADBG("priv:%p pyld_buf:0x%p pyld_len:%d\n",
+ entry->priv, desc.pyld, desc.len);
+
+ /* In case of last descriptor populate callback */
+ if (cnt == num_desc) {
+ IPADBG("data desc:%p\n", data_desc);
+ desc.callback = ipa_tx_client_rx_notify_release;
+ } else {
+ desc.callback = ipa_tx_client_rx_pkt_status;
+ }
+
+ IPADBG("calling ipa_send_one()\n");
+ if (ipa_send_one(sys, &desc, true)) {
+ IPAERR("fail to send skb\n");
+ sys->ep->wstats.rx_pkt_leak += (cnt-1);
+ sys->ep->wstats.rx_dp_fail++;
+ goto fail_send;
+ }
+
+ if (atomic_read(&sys->ep->avail_fifo_desc) >= 0)
+ atomic_dec(&sys->ep->avail_fifo_desc);
+
+ sys->ep->wstats.rx_pkts_rcvd++;
+ IPADBG("ep=%d fifo desc=%d\n",
+ ep_idx, atomic_read(&sys->ep->avail_fifo_desc));
+ }
+
+ sys->ep->wstats.rx_hd_processed++;
+ spin_unlock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock);
+ return 0;
+
+fail_send:
+ spin_unlock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock);
+ return -EFAULT;
+
+}
+
+void ipa2_free_skb(struct ipa_rx_data *data)
+{
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return;
+ }
+
+ spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
+
+ ipa_ctx->wc_memb.total_tx_pkts_freed++;
+ rx_pkt = container_of(data, struct ipa_rx_pkt_wrapper, data);
+
+ ipa_skb_recycle(rx_pkt->data.skb);
+ (void)skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
+
+ list_add_tail(&rx_pkt->link,
+ &ipa_ctx->wc_memb.wlan_comm_desc_list);
+ ipa_ctx->wc_memb.wlan_comm_free_cnt++;
+
+ spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
+}
+
+
+/* Functions added to support kernel tests */
+
+int ipa2_sys_setup(struct ipa_sys_connect_params *sys_in,
+ unsigned long *ipa_bam_hdl,
+ u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status)
+{
+ struct ipa_ep_context *ep;
+ int ipa_ep_idx;
+ int result = -EINVAL;
+
+ if (sys_in == NULL || clnt_hdl == NULL) {
+ IPAERR("NULL args\n");
+ goto fail_gen;
+ }
+
+ if (ipa_bam_hdl == NULL || ipa_pipe_num == NULL) {
+ IPAERR("NULL args\n");
+ goto fail_gen;
+ }
+ if (sys_in->client >= IPA_CLIENT_MAX) {
+ IPAERR("bad parm client:%d\n", sys_in->client);
+ goto fail_gen;
+ }
+
+ ipa_ep_idx = ipa2_get_ep_mapping(sys_in->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client :%d\n", sys_in->client);
+ goto fail_gen;
+ }
+
+ ep = &ipa_ctx->ep[ipa_ep_idx];
+
+ IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
+
+ if (ep->valid == 1) {
+ if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
+ IPAERR("EP %d already allocated\n", ipa_ep_idx);
+ goto fail_and_disable_clocks;
+ } else {
+ if (ipa2_cfg_ep_hdr(ipa_ep_idx,
+ &sys_in->ipa_ep_cfg.hdr)) {
+ IPAERR("fail to configure hdr prop of EP %d\n",
+ ipa_ep_idx);
+ result = -EFAULT;
+ goto fail_and_disable_clocks;
+ }
+ if (ipa2_cfg_ep_cfg(ipa_ep_idx,
+ &sys_in->ipa_ep_cfg.cfg)) {
+ IPAERR("fail to configure cfg prop of EP %d\n",
+ ipa_ep_idx);
+ result = -EFAULT;
+ goto fail_and_disable_clocks;
+ }
+ IPAERR("client %d (ep: %d) overlay ok sys=%p\n",
+ sys_in->client, ipa_ep_idx, ep->sys);
+ ep->client_notify = sys_in->notify;
+ ep->priv = sys_in->priv;
+ *clnt_hdl = ipa_ep_idx;
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+ return 0;
+ }
+ }
+
+ memset(ep, 0, offsetof(struct ipa_ep_context, sys));
+
+ ep->valid = 1;
+ ep->client = sys_in->client;
+ ep->client_notify = sys_in->notify;
+ ep->priv = sys_in->priv;
+ ep->keep_ipa_awake = true;
+
+ result = ipa_enable_data_path(ipa_ep_idx);
+ if (result) {
+ IPAERR("enable data path failed res=%d clnt=%d.\n",
+ result, ipa_ep_idx);
+ goto fail_gen2;
+ }
+
+ if (!ep->skip_ep_cfg) {
+ if (ipa2_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
+ IPAERR("fail to configure EP.\n");
+ goto fail_gen2;
+ }
+ if (ipa2_cfg_ep_status(ipa_ep_idx, &ep->status)) {
+ IPAERR("fail to configure status of EP.\n");
+ goto fail_gen2;
+ }
+ IPADBG("ep configuration successful\n");
+ } else {
+ IPADBG("skipping ep configuration\n");
+ }
+
+ *clnt_hdl = ipa_ep_idx;
+
+ *ipa_pipe_num = ipa_ep_idx;
+ *ipa_bam_hdl = ipa_ctx->bam_handle;
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+ ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+ IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
+ ipa_ep_idx, ep->sys);
+
+ return 0;
+
+fail_gen2:
+fail_and_disable_clocks:
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+fail_gen:
+ return result;
+}
+
+int ipa2_sys_teardown(u32 clnt_hdl)
+{
+ struct ipa_ep_context *ep;
+
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ ipa_disable_data_path(clnt_hdl);
+ ep->valid = 0;
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+ return 0;
+}
+
+int ipa2_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
+ unsigned long gsi_ev_hdl)
+{
+ IPAERR("GSI not supported in IPAv2");
+ return -EFAULT;
+}
+
+
+/**
+ * ipa_adjust_ra_buff_base_sz()
+ *
+ * Return value: the largest power of two which is smaller
+ * than the input value
+ */
+static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit)
+{
+ aggr_byte_limit += IPA_MTU;
+ aggr_byte_limit += IPA_GENERIC_RX_BUFF_LIMIT;
+ aggr_byte_limit--;
+ aggr_byte_limit |= aggr_byte_limit >> 1;
+ aggr_byte_limit |= aggr_byte_limit >> 2;
+ aggr_byte_limit |= aggr_byte_limit >> 4;
+ aggr_byte_limit |= aggr_byte_limit >> 8;
+ aggr_byte_limit |= aggr_byte_limit >> 16;
+ aggr_byte_limit++;
+ return aggr_byte_limit >> 1;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
new file mode 100644
index 0000000..12eaae8
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
@@ -0,0 +1,1473 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+
+#define IPA_FLT_TABLE_WORD_SIZE (4)
+#define IPA_FLT_ENTRY_MEMORY_ALLIGNMENT (0x3)
+#define IPA_FLT_BIT_MASK (0x1)
+#define IPA_FLT_TABLE_INDEX_NOT_FOUND (-1)
+#define IPA_FLT_STATUS_OF_ADD_FAILED (-1)
+#define IPA_FLT_STATUS_OF_DEL_FAILED (-1)
+#define IPA_FLT_STATUS_OF_MDFY_FAILED (-1)
+
+static int ipa_generate_hw_rule_from_eq(
+ const struct ipa_ipfltri_rule_eq *attrib, u8 **buf)
+{
+ int num_offset_meq_32 = attrib->num_offset_meq_32;
+ int num_ihl_offset_range_16 = attrib->num_ihl_offset_range_16;
+ int num_ihl_offset_meq_32 = attrib->num_ihl_offset_meq_32;
+ int num_offset_meq_128 = attrib->num_offset_meq_128;
+ int i;
+
+ if (attrib->tos_eq_present) {
+ *buf = ipa_write_8(attrib->tos_eq, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->protocol_eq_present) {
+ *buf = ipa_write_8(attrib->protocol_eq, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (num_offset_meq_32) {
+ *buf = ipa_write_8(attrib->offset_meq_32[0].offset, *buf);
+ *buf = ipa_write_32(attrib->offset_meq_32[0].mask, *buf);
+ *buf = ipa_write_32(attrib->offset_meq_32[0].value, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ num_offset_meq_32--;
+ }
+
+ if (num_offset_meq_32) {
+ *buf = ipa_write_8(attrib->offset_meq_32[1].offset, *buf);
+ *buf = ipa_write_32(attrib->offset_meq_32[1].mask, *buf);
+ *buf = ipa_write_32(attrib->offset_meq_32[1].value, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ num_offset_meq_32--;
+ }
+
+ if (num_ihl_offset_range_16) {
+ *buf = ipa_write_8(attrib->ihl_offset_range_16[0].offset, *buf);
+ *buf = ipa_write_16(attrib->ihl_offset_range_16[0].range_high,
+ *buf);
+ *buf = ipa_write_16(attrib->ihl_offset_range_16[0].range_low,
+ *buf);
+ *buf = ipa_pad_to_32(*buf);
+ num_ihl_offset_range_16--;
+ }
+
+ if (num_ihl_offset_range_16) {
+ *buf = ipa_write_8(attrib->ihl_offset_range_16[1].offset, *buf);
+ *buf = ipa_write_16(attrib->ihl_offset_range_16[1].range_high,
+ *buf);
+ *buf = ipa_write_16(attrib->ihl_offset_range_16[1].range_low,
+ *buf);
+ *buf = ipa_pad_to_32(*buf);
+ num_ihl_offset_range_16--;
+ }
+
+ if (attrib->ihl_offset_eq_16_present) {
+ *buf = ipa_write_8(attrib->ihl_offset_eq_16.offset, *buf);
+ *buf = ipa_write_16(attrib->ihl_offset_eq_16.value, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->ihl_offset_eq_32_present) {
+ *buf = ipa_write_8(attrib->ihl_offset_eq_32.offset, *buf);
+ *buf = ipa_write_32(attrib->ihl_offset_eq_32.value, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (num_ihl_offset_meq_32) {
+ *buf = ipa_write_8(attrib->ihl_offset_meq_32[0].offset, *buf);
+ *buf = ipa_write_32(attrib->ihl_offset_meq_32[0].mask, *buf);
+ *buf = ipa_write_32(attrib->ihl_offset_meq_32[0].value, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ num_ihl_offset_meq_32--;
+ }
+
+ /* TODO check layout of 16 byte mask and value */
+ if (num_offset_meq_128) {
+ *buf = ipa_write_8(attrib->offset_meq_128[0].offset, *buf);
+ for (i = 0; i < 16; i++)
+ *buf = ipa_write_8(attrib->offset_meq_128[0].mask[i],
+ *buf);
+ for (i = 0; i < 16; i++)
+ *buf = ipa_write_8(attrib->offset_meq_128[0].value[i],
+ *buf);
+ *buf = ipa_pad_to_32(*buf);
+ num_offset_meq_128--;
+ }
+
+ if (num_offset_meq_128) {
+ *buf = ipa_write_8(attrib->offset_meq_128[1].offset, *buf);
+ for (i = 0; i < 16; i++)
+ *buf = ipa_write_8(attrib->offset_meq_128[1].mask[i],
+ *buf);
+ for (i = 0; i < 16; i++)
+ *buf = ipa_write_8(attrib->offset_meq_128[1].value[i],
+ *buf);
+ *buf = ipa_pad_to_32(*buf);
+ num_offset_meq_128--;
+ }
+
+ if (attrib->tc_eq_present) {
+ *buf = ipa_write_8(attrib->tc_eq, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->fl_eq_present) {
+ *buf = ipa_write_32(attrib->fl_eq, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (num_ihl_offset_meq_32) {
+ *buf = ipa_write_8(attrib->ihl_offset_meq_32[1].offset, *buf);
+ *buf = ipa_write_32(attrib->ihl_offset_meq_32[1].mask, *buf);
+ *buf = ipa_write_32(attrib->ihl_offset_meq_32[1].value, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ num_ihl_offset_meq_32--;
+ }
+
+ if (attrib->metadata_meq32_present) {
+ *buf = ipa_write_8(attrib->metadata_meq32.offset, *buf);
+ *buf = ipa_write_32(attrib->metadata_meq32.mask, *buf);
+ *buf = ipa_write_32(attrib->metadata_meq32.value, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->ipv4_frag_eq_present)
+ *buf = ipa_pad_to_32(*buf);
+
+ return 0;
+}
+
+/**
+ * ipa_generate_flt_hw_rule() - generates the filtering hardware rule
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means
+ * caller wants to know the size of the rule as seen
+ * by HW so they did not pass a valid buffer, we will use a
+ * scratch buffer instead.
+ * With this scheme we are going to
+ * generate the rule twice, once to know size using scratch
+ * buffer and second to write the rule to the actual caller
+ * supplied buffer which is of required size
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_generate_flt_hw_rule(enum ipa_ip_type ip,
+ struct ipa_flt_entry *entry, u8 *buf)
+{
+ struct ipa_flt_rule_hw_hdr *hdr;
+ const struct ipa_flt_rule *rule =
+ (const struct ipa_flt_rule *)&entry->rule;
+ u16 en_rule = 0;
+ u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4];
+ u8 *start;
+
+ if (buf == NULL) {
+ memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
+ buf = (u8 *)tmp;
+ }
+
+ start = buf;
+ hdr = (struct ipa_flt_rule_hw_hdr *)buf;
+ hdr->u.hdr.action = entry->rule.action;
+ hdr->u.hdr.retain_hdr = entry->rule.retain_hdr;
+ hdr->u.hdr.to_uc = entry->rule.to_uc;
+ if (entry->rt_tbl)
+ hdr->u.hdr.rt_tbl_idx = entry->rt_tbl->idx;
+ else
+ hdr->u.hdr.rt_tbl_idx = entry->rule.rt_tbl_idx;
+ hdr->u.hdr.rsvd = 0;
+ buf += sizeof(struct ipa_flt_rule_hw_hdr);
+
+ if (rule->eq_attrib_type) {
+ if (ipa_generate_hw_rule_from_eq(&rule->eq_attrib, &buf)) {
+ IPAERR("fail to generate hw rule\n");
+ return -EPERM;
+ }
+ en_rule = rule->eq_attrib.rule_eq_bitmap;
+ } else {
+ if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
+ IPAERR("fail to generate hw rule\n");
+ return -EPERM;
+ }
+ }
+
+ IPADBG("en_rule 0x%x, action=%d, rt_idx=%d, uc=%d, retain_hdr=%d\n",
+ en_rule,
+ hdr->u.hdr.action,
+ hdr->u.hdr.rt_tbl_idx,
+ hdr->u.hdr.to_uc,
+ hdr->u.hdr.retain_hdr);
+
+ hdr->u.hdr.en_rule = en_rule;
+ ipa_write_32(hdr->u.word, (u8 *)hdr);
+
+ if (entry->hw_len == 0) {
+ entry->hw_len = buf - start;
+ } else if (entry->hw_len != (buf - start)) {
+ IPAERR("hw_len differs b/w passes passed=%x calc=%td\n",
+ entry->hw_len, (buf - start));
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+/**
+ * ipa_get_flt_hw_tbl_size() - returns the size of HW filtering table
+ * @ip: the ip address family type
+ * @hdr_sz: header size
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_get_flt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz)
+{
+ struct ipa_flt_tbl *tbl;
+ struct ipa_flt_entry *entry;
+ u32 total_sz = 0;
+ u32 rule_set_sz;
+ int i;
+
+ *hdr_sz = 0;
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+ rule_set_sz = 0;
+ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ if (ipa_generate_flt_hw_rule(ip, entry, NULL)) {
+ IPAERR("failed to find HW FLT rule size\n");
+ return -EPERM;
+ }
+ IPADBG("glob ip %d len %d\n", ip, entry->hw_len);
+ rule_set_sz += entry->hw_len;
+ }
+
+ if (rule_set_sz) {
+ tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE;
+ /* this rule-set uses a word in header block */
+ *hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
+ if (!tbl->in_sys) {
+ /* add the terminator */
+ total_sz += (rule_set_sz + IPA_FLT_TABLE_WORD_SIZE);
+ total_sz = (total_sz +
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) &
+ ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+ }
+ }
+
+ for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
+ tbl = &ipa_ctx->flt_tbl[i][ip];
+ rule_set_sz = 0;
+ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ if (ipa_generate_flt_hw_rule(ip, entry, NULL)) {
+ IPAERR("failed to find HW FLT rule size\n");
+ return -EPERM;
+ }
+ IPADBG("pipe %d len %d\n", i, entry->hw_len);
+ rule_set_sz += entry->hw_len;
+ }
+
+ if (rule_set_sz) {
+ tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE;
+ /* this rule-set uses a word in header block */
+ *hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
+ if (!tbl->in_sys) {
+ /* add the terminator */
+ total_sz += (rule_set_sz +
+ IPA_FLT_TABLE_WORD_SIZE);
+ total_sz = (total_sz +
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) &
+ ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+ }
+ }
+ }
+
+ *hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
+ total_sz += *hdr_sz;
+ IPADBG("FLT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip);
+
+ return total_sz;
+}
+
+static int ipa_generate_flt_hw_tbl_common(enum ipa_ip_type ip, u8 *base,
+ u8 *hdr, u32 body_start_offset, u8 *hdr2, u32 *hdr_top)
+{
+ struct ipa_flt_tbl *tbl;
+ struct ipa_flt_entry *entry;
+ int i;
+ u32 offset;
+ u8 *body;
+ struct ipa_mem_buffer flt_tbl_mem;
+ u8 *ftbl_membody;
+
+ *hdr_top = 0;
+ body = base;
+
+#define IPA_WRITE_FLT_HDR(idx, val) { \
+ if (idx <= 5) { \
+ *((u32 *)hdr + 1 + idx) = val; \
+ } else if (idx >= 6 && idx <= 10) { \
+ WARN_ON(1); \
+ } else if (idx >= 11 && idx <= 19) { \
+ *((u32 *)hdr2 + idx - 11) = val; \
+ } else { \
+ WARN_ON(1); \
+ } \
+}
+
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+
+ if (!list_empty(&tbl->head_flt_rule_list)) {
+ *hdr_top |= IPA_FLT_BIT_MASK;
+
+ if (!tbl->in_sys) {
+ offset = body - base + body_start_offset;
+ if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) {
+ IPAERR("offset is not word multiple %d\n",
+ offset);
+ goto proc_err;
+ }
+
+ offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+ /* rule is at an offset from base */
+ offset |= IPA_FLT_BIT_MASK;
+
+ if (hdr2)
+ *(u32 *)hdr = offset;
+ else
+ hdr = ipa_write_32(offset, hdr);
+
+ /* generate the rule-set */
+ list_for_each_entry(entry, &tbl->head_flt_rule_list,
+ link) {
+ if (ipa_generate_flt_hw_rule(ip, entry, body)) {
+ IPAERR("failed to gen HW FLT rule\n");
+ goto proc_err;
+ }
+ body += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ body = ipa_write_32(0, body);
+ if ((long)body & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)
+ /* advance body to next word boundary */
+ body = body + (IPA_FLT_TABLE_WORD_SIZE -
+ ((long)body &
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT));
+ } else {
+ WARN_ON(tbl->sz == 0);
+ /* allocate memory for the flt tbl */
+ flt_tbl_mem.size = tbl->sz;
+ flt_tbl_mem.base =
+ dma_alloc_coherent(ipa_ctx->pdev, flt_tbl_mem.size,
+ &flt_tbl_mem.phys_base, GFP_KERNEL);
+ if (!flt_tbl_mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n",
+ flt_tbl_mem.size);
+ WARN_ON(1);
+ goto proc_err;
+ }
+
+ WARN_ON(flt_tbl_mem.phys_base &
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT);
+ ftbl_membody = flt_tbl_mem.base;
+ memset(flt_tbl_mem.base, 0, flt_tbl_mem.size);
+
+ if (hdr2)
+ *(u32 *)hdr = flt_tbl_mem.phys_base;
+ else
+ hdr = ipa_write_32(flt_tbl_mem.phys_base, hdr);
+
+ /* generate the rule-set */
+ list_for_each_entry(entry, &tbl->head_flt_rule_list,
+ link) {
+ if (ipa_generate_flt_hw_rule(ip, entry,
+ ftbl_membody)) {
+ IPAERR("failed to gen HW FLT rule\n");
+ WARN_ON(1);
+ }
+ ftbl_membody += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ ftbl_membody = ipa_write_32(0, ftbl_membody);
+ if (tbl->curr_mem.phys_base) {
+ WARN_ON(tbl->prev_mem.phys_base);
+ tbl->prev_mem = tbl->curr_mem;
+ }
+ tbl->curr_mem = flt_tbl_mem;
+ }
+ }
+
+ for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
+ tbl = &ipa_ctx->flt_tbl[i][ip];
+ if (!list_empty(&tbl->head_flt_rule_list)) {
+ /* pipe "i" is at bit "i+1" */
+ *hdr_top |= (1 << (i + 1));
+
+ if (!tbl->in_sys) {
+ offset = body - base + body_start_offset;
+ if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) {
+ IPAERR("ofst is not word multiple %d\n",
+ offset);
+ goto proc_err;
+ }
+ offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+ /* rule is at an offset from base */
+ offset |= IPA_FLT_BIT_MASK;
+
+ if (hdr2)
+ IPA_WRITE_FLT_HDR(i, offset)
+ else
+ hdr = ipa_write_32(offset, hdr);
+
+ /* generate the rule-set */
+ list_for_each_entry(entry,
+ &tbl->head_flt_rule_list,
+ link) {
+ if (ipa_generate_flt_hw_rule(ip, entry,
+ body)) {
+ IPAERR("fail gen FLT rule\n");
+ goto proc_err;
+ }
+ body += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ body = ipa_write_32(0, body);
+ if ((long)body &
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)
+ /* advance body to next word boundary */
+ body = body + (IPA_FLT_TABLE_WORD_SIZE -
+ ((long)body &
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT));
+ } else {
+ WARN_ON(tbl->sz == 0);
+ /* allocate memory for the flt tbl */
+ flt_tbl_mem.size = tbl->sz;
+ flt_tbl_mem.base =
+ dma_alloc_coherent(ipa_ctx->pdev,
+ flt_tbl_mem.size,
+ &flt_tbl_mem.phys_base,
+ GFP_KERNEL);
+ if (!flt_tbl_mem.base) {
+ IPAERR("fail alloc DMA buff size %d\n",
+ flt_tbl_mem.size);
+ WARN_ON(1);
+ goto proc_err;
+ }
+
+ WARN_ON(flt_tbl_mem.phys_base &
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT);
+
+ ftbl_membody = flt_tbl_mem.base;
+ memset(flt_tbl_mem.base, 0, flt_tbl_mem.size);
+
+ if (hdr2)
+ IPA_WRITE_FLT_HDR(i,
+ flt_tbl_mem.phys_base)
+ else
+ hdr = ipa_write_32(
+ flt_tbl_mem.phys_base, hdr);
+
+ /* generate the rule-set */
+ list_for_each_entry(entry,
+ &tbl->head_flt_rule_list,
+ link) {
+ if (ipa_generate_flt_hw_rule(ip, entry,
+ ftbl_membody)) {
+ IPAERR("fail gen FLT rule\n");
+ WARN_ON(1);
+ }
+ ftbl_membody += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ ftbl_membody =
+ ipa_write_32(0, ftbl_membody);
+ if (tbl->curr_mem.phys_base) {
+ WARN_ON(tbl->prev_mem.phys_base);
+ tbl->prev_mem = tbl->curr_mem;
+ }
+ tbl->curr_mem = flt_tbl_mem;
+ }
+ }
+ }
+
+ return 0;
+
+proc_err:
+ return -EPERM;
+}
+
+
+/**
+ * ipa_generate_flt_hw_tbl() - generates the filtering hardware table
+ * @ip: [in] the ip address family type
+ * @mem: [out] buffer to put the filtering table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+static int ipa_generate_flt_hw_tbl_v1_1(enum ipa_ip_type ip,
+ struct ipa_mem_buffer *mem)
+{
+ u32 hdr_top = 0;
+ u32 hdr_sz;
+ u8 *hdr;
+ u8 *body;
+ u8 *base;
+
+ mem->size = ipa_get_flt_hw_tbl_size(ip, &hdr_sz);
+ mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size);
+
+ if (mem->size == 0) {
+ IPAERR("flt tbl empty ip=%d\n", ip);
+ goto error;
+ }
+ mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
+ &mem->phys_base, GFP_KERNEL);
+ if (!mem->base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+ goto error;
+ }
+
+ memset(mem->base, 0, mem->size);
+
+ /* build the flt tbl in the DMA buffer to submit to IPA HW */
+ base = hdr = (u8 *)mem->base;
+ body = base + hdr_sz;
+
+ /* write a dummy header to move cursor */
+ hdr = ipa_write_32(hdr_top, hdr);
+
+ if (ipa_generate_flt_hw_tbl_common(ip, body, hdr, hdr_sz, 0,
+ &hdr_top)) {
+ IPAERR("fail to generate FLT HW table\n");
+ goto proc_err;
+ }
+
+ /* now write the hdr_top */
+ ipa_write_32(hdr_top, base);
+
+ IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+ return 0;
+
+proc_err:
+ dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base);
+error:
+ return -EPERM;
+}
+
+static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip)
+{
+ struct ipa_flt_tbl *tbl;
+ int i;
+
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+ if (tbl->prev_mem.phys_base) {
+ IPADBG("reaping glob flt tbl (prev) ip=%d\n", ip);
+ dma_free_coherent(ipa_ctx->pdev, tbl->prev_mem.size,
+ tbl->prev_mem.base, tbl->prev_mem.phys_base);
+ memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
+ }
+
+ if (list_empty(&tbl->head_flt_rule_list)) {
+ if (tbl->curr_mem.phys_base) {
+ IPADBG("reaping glob flt tbl (curr) ip=%d\n", ip);
+ dma_free_coherent(ipa_ctx->pdev, tbl->curr_mem.size,
+ tbl->curr_mem.base,
+ tbl->curr_mem.phys_base);
+ memset(&tbl->curr_mem, 0, sizeof(tbl->curr_mem));
+ }
+ }
+
+ for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
+ tbl = &ipa_ctx->flt_tbl[i][ip];
+ if (tbl->prev_mem.phys_base) {
+ IPADBG("reaping flt tbl (prev) pipe=%d ip=%d\n", i, ip);
+ dma_free_coherent(ipa_ctx->pdev, tbl->prev_mem.size,
+ tbl->prev_mem.base,
+ tbl->prev_mem.phys_base);
+ memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
+ }
+
+ if (list_empty(&tbl->head_flt_rule_list)) {
+ if (tbl->curr_mem.phys_base) {
+ IPADBG("reaping flt tbl (curr) pipe=%d ip=%d\n",
+ i, ip);
+ dma_free_coherent(ipa_ctx->pdev,
+ tbl->curr_mem.size,
+ tbl->curr_mem.base,
+ tbl->curr_mem.phys_base);
+ memset(&tbl->curr_mem, 0,
+ sizeof(tbl->curr_mem));
+ }
+ }
+ }
+}
+
+int __ipa_commit_flt_v1_1(enum ipa_ip_type ip)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_mem_buffer *mem;
+ void *cmd;
+ struct ipa_ip_v4_filter_init *v4;
+ struct ipa_ip_v6_filter_init *v6;
+ u16 avail;
+ u16 size;
+
+ mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
+ if (!mem) {
+ IPAERR("failed to alloc memory object\n");
+ goto fail_alloc_mem;
+ }
+
+ if (ip == IPA_IP_v4) {
+ avail = ipa_ctx->ip4_flt_tbl_lcl ? IPA_MEM_v1_RAM_V4_FLT_SIZE :
+ IPA_MEM_PART(v4_flt_size_ddr);
+ size = sizeof(struct ipa_ip_v4_filter_init);
+ } else {
+ avail = ipa_ctx->ip6_flt_tbl_lcl ? IPA_MEM_v1_RAM_V6_FLT_SIZE :
+ IPA_MEM_PART(v6_flt_size_ddr);
+ size = sizeof(struct ipa_ip_v6_filter_init);
+ }
+ cmd = kmalloc(size, GFP_KERNEL);
+ if (!cmd) {
+ IPAERR("failed to alloc immediate command object\n");
+ goto fail_alloc_cmd;
+ }
+
+ if (ipa_generate_flt_hw_tbl_v1_1(ip, mem)) {
+ IPAERR("fail to generate FLT HW TBL ip %d\n", ip);
+ goto fail_hw_tbl_gen;
+ }
+
+ if (mem->size > avail) {
+ IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
+ goto fail_send_cmd;
+ }
+
+ if (ip == IPA_IP_v4) {
+ v4 = (struct ipa_ip_v4_filter_init *)cmd;
+ desc.opcode = IPA_IP_V4_FILTER_INIT;
+ v4->ipv4_rules_addr = mem->phys_base;
+ v4->size_ipv4_rules = mem->size;
+ v4->ipv4_addr = IPA_MEM_v1_RAM_V4_FLT_OFST;
+ } else {
+ v6 = (struct ipa_ip_v6_filter_init *)cmd;
+ desc.opcode = IPA_IP_V6_FILTER_INIT;
+ v6->ipv6_rules_addr = mem->phys_base;
+ v6->size_ipv6_rules = mem->size;
+ v6->ipv6_addr = IPA_MEM_v1_RAM_V6_FLT_OFST;
+ }
+
+ desc.pyld = cmd;
+ desc.len = size;
+ desc.type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ goto fail_send_cmd;
+ }
+
+ __ipa_reap_sys_flt_tbls(ip);
+ dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base);
+ kfree(cmd);
+ kfree(mem);
+
+ return 0;
+
+fail_send_cmd:
+ if (mem->phys_base)
+ dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
+ mem->phys_base);
+fail_hw_tbl_gen:
+ kfree(cmd);
+fail_alloc_cmd:
+ kfree(mem);
+fail_alloc_mem:
+
+ return -EPERM;
+}
+
+static int ipa_generate_flt_hw_tbl_v2(enum ipa_ip_type ip,
+ struct ipa_mem_buffer *mem, struct ipa_mem_buffer *head1,
+ struct ipa_mem_buffer *head2)
+{
+ int i;
+ u32 hdr_sz;
+ int num_words;
+ u32 *entr;
+ u32 body_start_offset;
+ u32 hdr_top;
+
+ if (ip == IPA_IP_v4)
+ body_start_offset = IPA_MEM_PART(apps_v4_flt_ofst) -
+ IPA_MEM_PART(v4_flt_ofst);
+ else
+ body_start_offset = IPA_MEM_PART(apps_v6_flt_ofst) -
+ IPA_MEM_PART(v6_flt_ofst);
+
+ num_words = 7;
+ head1->size = num_words * 4;
+ head1->base = dma_alloc_coherent(ipa_ctx->pdev, head1->size,
+ &head1->phys_base, GFP_KERNEL);
+ if (!head1->base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", head1->size);
+ goto err;
+ }
+ entr = (u32 *)head1->base;
+ for (i = 0; i < num_words; i++) {
+ *entr = ipa_ctx->empty_rt_tbl_mem.phys_base;
+ entr++;
+ }
+
+ num_words = 9;
+ head2->size = num_words * 4;
+ head2->base = dma_alloc_coherent(ipa_ctx->pdev, head2->size,
+ &head2->phys_base, GFP_KERNEL);
+ if (!head2->base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", head2->size);
+ goto head_err;
+ }
+ entr = (u32 *)head2->base;
+ for (i = 0; i < num_words; i++) {
+ *entr = ipa_ctx->empty_rt_tbl_mem.phys_base;
+ entr++;
+ }
+
+ mem->size = ipa_get_flt_hw_tbl_size(ip, &hdr_sz);
+ mem->size -= hdr_sz;
+ mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size);
+
+ if (mem->size) {
+ mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
+ &mem->phys_base, GFP_KERNEL);
+ if (!mem->base) {
+ IPAERR("fail to alloc DMA buff of size %d\n",
+ mem->size);
+ goto body_err;
+ }
+ memset(mem->base, 0, mem->size);
+ }
+
+ if (ipa_generate_flt_hw_tbl_common(ip, mem->base, head1->base,
+ body_start_offset, head2->base, &hdr_top)) {
+ IPAERR("fail to generate FLT HW table\n");
+ goto proc_err;
+ }
+
+ IPADBG("HEAD1\n");
+ IPA_DUMP_BUFF(head1->base, head1->phys_base, head1->size);
+ IPADBG("HEAD2\n");
+ IPA_DUMP_BUFF(head2->base, head2->phys_base, head2->size);
+ if (mem->size) {
+ IPADBG("BODY\n");
+ IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+ }
+
+ return 0;
+
+proc_err:
+ if (mem->size)
+ dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
+ mem->phys_base);
+body_err:
+ dma_free_coherent(ipa_ctx->pdev, head2->size, head2->base,
+ head2->phys_base);
+head_err:
+ dma_free_coherent(ipa_ctx->pdev, head1->size, head1->base,
+ head1->phys_base);
+err:
+ return -EPERM;
+}
+
+int __ipa_commit_flt_v2(enum ipa_ip_type ip)
+{
+ struct ipa_desc *desc;
+ struct ipa_hw_imm_cmd_dma_shared_mem *cmd;
+ struct ipa_mem_buffer body;
+ struct ipa_mem_buffer head1;
+ struct ipa_mem_buffer head2;
+ int rc = 0;
+ u32 local_addrb;
+ u32 local_addrh;
+ bool lcl;
+ int num_desc = 0;
+ int i;
+ u16 avail;
+
+ desc = kzalloc(16 * sizeof(*desc), GFP_ATOMIC);
+ if (desc == NULL) {
+ IPAERR("fail to alloc desc blob ip %d\n", ip);
+ rc = -ENOMEM;
+ goto fail_desc;
+ }
+
+ cmd = kzalloc(16 * sizeof(*cmd), GFP_ATOMIC);
+ if (cmd == NULL) {
+ IPAERR("fail to alloc cmd blob ip %d\n", ip);
+ rc = -ENOMEM;
+ goto fail_imm;
+ }
+
+ if (ip == IPA_IP_v4) {
+ avail = ipa_ctx->ip4_flt_tbl_lcl ?
+ IPA_MEM_PART(apps_v4_flt_size) :
+ IPA_MEM_PART(v4_flt_size_ddr);
+ local_addrh = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v4_flt_ofst) + 4;
+ local_addrb = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(apps_v4_flt_ofst);
+ lcl = ipa_ctx->ip4_flt_tbl_lcl;
+ } else {
+ avail = ipa_ctx->ip6_flt_tbl_lcl ?
+ IPA_MEM_PART(apps_v6_flt_size) :
+ IPA_MEM_PART(v6_flt_size_ddr);
+ local_addrh = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v6_flt_ofst) + 4;
+ local_addrb = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(apps_v6_flt_ofst);
+ lcl = ipa_ctx->ip6_flt_tbl_lcl;
+ }
+
+ if (ipa_generate_flt_hw_tbl_v2(ip, &body, &head1, &head2)) {
+ IPAERR("fail to generate FLT HW TBL ip %d\n", ip);
+ rc = -EFAULT;
+ goto fail_gen;
+ }
+
+ if (body.size > avail) {
+ IPAERR("tbl too big, needed %d avail %d\n", body.size, avail);
+ goto fail_send_cmd;
+ }
+
+ cmd[num_desc].size = 4;
+ cmd[num_desc].system_addr = head1.phys_base;
+ cmd[num_desc].local_addr = local_addrh;
+
+ desc[num_desc].opcode = IPA_DMA_SHARED_MEM;
+ desc[num_desc].pyld = &cmd[num_desc];
+ desc[num_desc].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+ desc[num_desc++].type = IPA_IMM_CMD_DESC;
+
+ for (i = 0; i < 6; i++) {
+ if (ipa_ctx->skip_ep_cfg_shadow[i]) {
+ IPADBG("skip %d\n", i);
+ continue;
+ }
+
+ if (ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS) == i ||
+ ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS) == i ||
+ ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD) == i ||
+ (ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD) == i
+ && ipa_ctx->modem_cfg_emb_pipe_flt)) {
+ IPADBG("skip %d\n", i);
+ continue;
+ }
+
+ if (ip == IPA_IP_v4) {
+ local_addrh = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v4_flt_ofst) +
+ 8 + i * 4;
+ } else {
+ local_addrh = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v6_flt_ofst) +
+ 8 + i * 4;
+ }
+ cmd[num_desc].size = 4;
+ cmd[num_desc].system_addr = head1.phys_base + 4 + i * 4;
+ cmd[num_desc].local_addr = local_addrh;
+
+ desc[num_desc].opcode = IPA_DMA_SHARED_MEM;
+ desc[num_desc].pyld = &cmd[num_desc];
+ desc[num_desc].len =
+ sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+ desc[num_desc++].type = IPA_IMM_CMD_DESC;
+ }
+
+ for (i = 11; i < ipa_ctx->ipa_num_pipes; i++) {
+ if (ipa_ctx->skip_ep_cfg_shadow[i]) {
+ IPADBG("skip %d\n", i);
+ continue;
+ }
+ if (ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD) == i &&
+ ipa_ctx->modem_cfg_emb_pipe_flt) {
+ IPADBG("skip %d\n", i);
+ continue;
+ }
+ if (ip == IPA_IP_v4) {
+ local_addrh = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v4_flt_ofst) +
+ 13 * 4 + (i - 11) * 4;
+ } else {
+ local_addrh = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v6_flt_ofst) +
+ 13 * 4 + (i - 11) * 4;
+ }
+ cmd[num_desc].size = 4;
+ cmd[num_desc].system_addr = head2.phys_base + (i - 11) * 4;
+ cmd[num_desc].local_addr = local_addrh;
+
+ desc[num_desc].opcode = IPA_DMA_SHARED_MEM;
+ desc[num_desc].pyld = &cmd[num_desc];
+ desc[num_desc].len =
+ sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+ desc[num_desc++].type = IPA_IMM_CMD_DESC;
+ }
+
+ if (lcl) {
+ cmd[num_desc].size = body.size;
+ cmd[num_desc].system_addr = body.phys_base;
+ cmd[num_desc].local_addr = local_addrb;
+
+ desc[num_desc].opcode = IPA_DMA_SHARED_MEM;
+ desc[num_desc].pyld = &cmd[num_desc];
+ desc[num_desc].len =
+ sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+ desc[num_desc++].type = IPA_IMM_CMD_DESC;
+
+ if (ipa_send_cmd(num_desc, desc)) {
+ IPAERR("fail to send immediate command\n");
+ rc = -EFAULT;
+ goto fail_send_cmd;
+ }
+ } else {
+ if (ipa_send_cmd(num_desc, desc)) {
+ IPAERR("fail to send immediate command\n");
+ rc = -EFAULT;
+ goto fail_send_cmd;
+ }
+ }
+
+ __ipa_reap_sys_flt_tbls(ip);
+
+fail_send_cmd:
+ if (body.size)
+ dma_free_coherent(ipa_ctx->pdev, body.size, body.base,
+ body.phys_base);
+ dma_free_coherent(ipa_ctx->pdev, head1.size, head1.base,
+ head1.phys_base);
+ dma_free_coherent(ipa_ctx->pdev, head2.size, head2.base,
+ head2.phys_base);
+fail_gen:
+ kfree(cmd);
+fail_imm:
+ kfree(desc);
+fail_desc:
+ return rc;
+}
+
+static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip,
+ const struct ipa_flt_rule *rule, u8 add_rear,
+ u32 *rule_hdl)
+{
+ struct ipa_flt_entry *entry;
+ struct ipa_rt_tbl *rt_tbl = NULL;
+ int id;
+
+ if (rule->action != IPA_PASS_TO_EXCEPTION) {
+ if (!rule->eq_attrib_type) {
+ if (!rule->rt_tbl_hdl) {
+ IPAERR("invalid RT tbl\n");
+ goto error;
+ }
+
+ rt_tbl = ipa_id_find(rule->rt_tbl_hdl);
+ if (rt_tbl == NULL) {
+ IPAERR("RT tbl not found\n");
+ goto error;
+ }
+
+ if (rt_tbl->cookie != IPA_COOKIE) {
+ IPAERR("RT table cookie is invalid\n");
+ goto error;
+ }
+ } else {
+ if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ?
+ IPA_MEM_PART(v4_modem_rt_index_hi) :
+ IPA_MEM_PART(v6_modem_rt_index_hi))) {
+ IPAERR("invalid RT tbl\n");
+ goto error;
+ }
+ }
+ }
+
+ entry = kmem_cache_zalloc(ipa_ctx->flt_rule_cache, GFP_KERNEL);
+ if (!entry) {
+ IPAERR("failed to alloc FLT rule object\n");
+ goto error;
+ }
+ INIT_LIST_HEAD(&entry->link);
+ entry->rule = *rule;
+ entry->cookie = IPA_COOKIE;
+ entry->rt_tbl = rt_tbl;
+ entry->tbl = tbl;
+ if (add_rear) {
+ if (tbl->sticky_rear)
+ list_add_tail(&entry->link,
+ tbl->head_flt_rule_list.prev);
+ else
+ list_add_tail(&entry->link, &tbl->head_flt_rule_list);
+ } else {
+ list_add(&entry->link, &tbl->head_flt_rule_list);
+ }
+ tbl->rule_cnt++;
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt++;
+ id = ipa_id_alloc(entry);
+ if (id < 0) {
+ IPAERR("failed to add to tree\n");
+ WARN_ON(1);
+ }
+ *rule_hdl = id;
+ entry->id = id;
+ IPADBG("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
+
+ return 0;
+
+error:
+ return -EPERM;
+}
+
+static int __ipa_del_flt_rule(u32 rule_hdl)
+{
+ struct ipa_flt_entry *entry;
+ int id;
+
+ entry = ipa_id_find(rule_hdl);
+ if (entry == NULL) {
+ IPAERR("lookup failed\n");
+ return -EINVAL;
+ }
+
+ if (entry->cookie != IPA_COOKIE) {
+ IPAERR("bad params\n");
+ return -EINVAL;
+ }
+ id = entry->id;
+
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt--;
+ IPADBG("del flt rule rule_cnt=%d\n", entry->tbl->rule_cnt);
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+
+ /* remove the handle from the database */
+ ipa_id_remove(id);
+
+ return 0;
+}
+
+static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule,
+ enum ipa_ip_type ip)
+{
+ struct ipa_flt_entry *entry;
+ struct ipa_rt_tbl *rt_tbl = NULL;
+
+ entry = ipa_id_find(frule->rule_hdl);
+ if (entry == NULL) {
+ IPAERR("lookup failed\n");
+ goto error;
+ }
+
+ if (entry->cookie != IPA_COOKIE) {
+ IPAERR("bad params\n");
+ goto error;
+ }
+
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt--;
+
+ if (frule->rule.action != IPA_PASS_TO_EXCEPTION) {
+ if (!frule->rule.eq_attrib_type) {
+ if (!frule->rule.rt_tbl_hdl) {
+ IPAERR("invalid RT tbl\n");
+ goto error;
+ }
+
+ rt_tbl = ipa_id_find(frule->rule.rt_tbl_hdl);
+ if (rt_tbl == NULL) {
+ IPAERR("RT tbl not found\n");
+ goto error;
+ }
+
+ if (rt_tbl->cookie != IPA_COOKIE) {
+ IPAERR("RT table cookie is invalid\n");
+ goto error;
+ }
+ } else {
+ if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ?
+ IPA_MEM_PART(v4_modem_rt_index_hi) :
+ IPA_MEM_PART(v6_modem_rt_index_hi))) {
+ IPAERR("invalid RT tbl\n");
+ goto error;
+ }
+ }
+ }
+
+ entry->rule = frule->rule;
+ entry->rt_tbl = rt_tbl;
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt++;
+ entry->hw_len = 0;
+
+ return 0;
+
+error:
+ return -EPERM;
+}
+
+static int __ipa_add_global_flt_rule(enum ipa_ip_type ip,
+ const struct ipa_flt_rule *rule, u8 add_rear, u32 *rule_hdl)
+{
+ struct ipa_flt_tbl *tbl;
+
+ if (rule == NULL || rule_hdl == NULL) {
+ IPAERR("bad parms rule=%p rule_hdl=%p\n", rule, rule_hdl);
+
+ return -EINVAL;
+ }
+
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+ IPADBG("add global flt rule ip=%d\n", ip);
+
+ return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
+}
+
+static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
+ const struct ipa_flt_rule *rule, u8 add_rear,
+ u32 *rule_hdl)
+{
+ struct ipa_flt_tbl *tbl;
+ int ipa_ep_idx;
+
+ if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
+ IPAERR("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
+ rule_hdl, ep);
+
+ return -EINVAL;
+ }
+ ipa_ep_idx = ipa2_get_ep_mapping(ep);
+ if (ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND) {
+ IPAERR("ep not valid ep=%d\n", ep);
+ return -EINVAL;
+ }
+ if (ipa_ctx->ep[ipa_ep_idx].valid == 0)
+ IPADBG("ep not connected ep_idx=%d\n", ipa_ep_idx);
+
+ tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][ip];
+ IPADBG("add ep flt rule ip=%d ep=%d\n", ip, ep);
+
+ return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
+}
+
+/**
+ * ipa2_add_flt_rule() - Add the specified filtering rules to SW and optionally
+ * commit to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
+{
+ int i;
+ int result;
+
+ if (rules == NULL || rules->num_rules == 0 ||
+ rules->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < rules->num_rules; i++) {
+ if (rules->global)
+ result = __ipa_add_global_flt_rule(rules->ip,
+ &rules->rules[i].rule,
+ rules->rules[i].at_rear,
+ &rules->rules[i].flt_rule_hdl);
+ else
+ result = __ipa_add_ep_flt_rule(rules->ip, rules->ep,
+ &rules->rules[i].rule,
+ rules->rules[i].at_rear,
+ &rules->rules[i].flt_rule_hdl);
+ if (result) {
+ IPAERR("failed to add flt rule %d\n", i);
+ rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
+ } else {
+ rules->rules[i].status = 0;
+ }
+ }
+
+ if (rules->commit)
+ if (ipa_ctx->ctrl->ipa_commit_flt(rules->ip)) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+
+/**
+ * ipa2_del_flt_rule() - Remove the specified filtering rules from SW and
+ * optionally commit to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
+{
+ int i;
+ int result;
+
+ if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < hdls->num_hdls; i++) {
+ if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
+ IPAERR("failed to del rt rule %i\n", i);
+ hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
+ } else {
+ hdls->hdl[i].status = 0;
+ }
+ }
+
+ if (hdls->commit)
+ if (ipa_ctx->ctrl->ipa_commit_flt(hdls->ip)) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+
+/**
+ * ipa2_mdfy_flt_rule() - Modify the specified filtering rules in SW and
+ * optionally commit to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
+{
+ int i;
+ int result;
+
+ if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < hdls->num_rules; i++) {
+ if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) {
+ IPAERR("failed to mdfy rt rule %i\n", i);
+ hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
+ } else {
+ hdls->rules[i].status = 0;
+ }
+ }
+
+ if (hdls->commit)
+ if (ipa_ctx->ctrl->ipa_commit_flt(hdls->ip)) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+
+
+/**
+ * ipa2_commit_flt() - Commit the current SW filtering table of specified type
+ * to IPA HW
+ * @ip: [in] the family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_commit_flt(enum ipa_ip_type ip)
+{
+ int result;
+
+ if (ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+
+ if (ipa_ctx->ctrl->ipa_commit_flt(ip)) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+
+/**
+ * ipa2_reset_flt() - Reset the current SW filtering table of specified type
+ * (does not commit to HW)
+ * @ip: [in] the family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_reset_flt(enum ipa_ip_type ip)
+{
+ struct ipa_flt_tbl *tbl;
+ struct ipa_flt_entry *entry;
+ struct ipa_flt_entry *next;
+ int i;
+ int id;
+
+ if (ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+ mutex_lock(&ipa_ctx->lock);
+ IPADBG("reset flt ip=%d\n", ip);
+ list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list, link) {
+ if (ipa_id_find(entry->id) == NULL) {
+ WARN_ON(1);
+ mutex_unlock(&ipa_ctx->lock);
+ return -EFAULT;
+ }
+
+ if ((ip == IPA_IP_v4 &&
+ entry->rule.attrib.attrib_mask == IPA_FLT_PROTOCOL &&
+ entry->rule.attrib.u.v4.protocol ==
+ IPA_INVALID_L4_PROTOCOL) ||
+ (ip == IPA_IP_v6 &&
+ entry->rule.attrib.attrib_mask == IPA_FLT_NEXT_HDR &&
+ entry->rule.attrib.u.v6.next_hdr ==
+ IPA_INVALID_L4_PROTOCOL))
+ continue;
+
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt--;
+ entry->cookie = 0;
+ id = entry->id;
+ kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+
+ /* remove the handle from the database */
+ ipa_id_remove(id);
+ }
+
+ for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
+ tbl = &ipa_ctx->flt_tbl[i][ip];
+ list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list,
+ link) {
+ if (ipa_id_find(entry->id) == NULL) {
+ WARN_ON(1);
+ mutex_unlock(&ipa_ctx->lock);
+ return -EFAULT;
+ }
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt--;
+ entry->cookie = 0;
+ id = entry->id;
+ kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+
+ /* remove the handle from the database */
+ ipa_id_remove(id);
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return 0;
+}
+
+void ipa_install_dflt_flt_rules(u32 ipa_ep_idx)
+{
+ struct ipa_flt_tbl *tbl;
+ struct ipa_ep_context *ep = &ipa_ctx->ep[ipa_ep_idx];
+ struct ipa_flt_rule rule;
+
+ memset(&rule, 0, sizeof(rule));
+
+ mutex_lock(&ipa_ctx->lock);
+ tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
+ tbl->sticky_rear = true;
+ rule.action = IPA_PASS_TO_EXCEPTION;
+ __ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, false,
+ &ep->dflt_flt4_rule_hdl);
+ ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4);
+
+ tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
+ tbl->sticky_rear = true;
+ rule.action = IPA_PASS_TO_EXCEPTION;
+ __ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, false,
+ &ep->dflt_flt6_rule_hdl);
+ ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6);
+ mutex_unlock(&ipa_ctx->lock);
+}
+
+void ipa_delete_dflt_flt_rules(u32 ipa_ep_idx)
+{
+ struct ipa_ep_context *ep = &ipa_ctx->ep[ipa_ep_idx];
+
+ mutex_lock(&ipa_ctx->lock);
+ if (ep->dflt_flt4_rule_hdl) {
+ __ipa_del_flt_rule(ep->dflt_flt4_rule_hdl);
+ ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4);
+ ep->dflt_flt4_rule_hdl = 0;
+ }
+ if (ep->dflt_flt6_rule_hdl) {
+ __ipa_del_flt_rule(ep->dflt_flt6_rule_hdl);
+ ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6);
+ ep->dflt_flt6_rule_hdl = 0;
+ }
+ mutex_unlock(&ipa_ctx->lock);
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
new file mode 100644
index 0000000..ab14cb7
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -0,0 +1,1369 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+
+static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 60};
+static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64};
+
+#define HDR_TYPE_IS_VALID(type) \
+ ((type) >= 0 && (type) < IPA_HDR_L2_MAX)
+
+#define HDR_PROC_TYPE_IS_VALID(type) \
+ ((type) >= 0 && (type) < IPA_HDR_PROC_MAX)
+
+/* uCP command numbers */
+#define IPA_HDR_UCP_802_3_TO_802_3 6
+#define IPA_HDR_UCP_802_3_TO_ETHII 7
+#define IPA_HDR_UCP_ETHII_TO_802_3 8
+#define IPA_HDR_UCP_ETHII_TO_ETHII 9
+
+/**
+ * ipa_generate_hdr_hw_tbl() - generates the headers table
+ * @mem: [out] buffer to put the header table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+static int ipa_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
+{
+ struct ipa_hdr_entry *entry;
+
+ mem->size = ipa_ctx->hdr_tbl.end;
+
+ if (mem->size == 0) {
+ IPAERR("hdr tbl empty\n");
+ return -EPERM;
+ }
+ IPADBG("tbl_sz=%d\n", ipa_ctx->hdr_tbl.end);
+
+ mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
+ &mem->phys_base, GFP_KERNEL);
+ if (!mem->base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+ return -ENOMEM;
+ }
+
+ memset(mem->base, 0, mem->size);
+ list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+ link) {
+ if (entry->is_hdr_proc_ctx)
+ continue;
+ IPADBG("hdr of len %d ofst=%d\n", entry->hdr_len,
+ entry->offset_entry->offset);
+ memcpy(mem->base + entry->offset_entry->offset, entry->hdr,
+ entry->hdr_len);
+ }
+
+ return 0;
+}
+
+static void ipa_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
+ u32 hdr_base_addr)
+{
+ struct ipa_hdr_proc_ctx_entry *entry;
+
+ list_for_each_entry(entry,
+ &ipa_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
+ link) {
+ IPADBG("processing type %d ofst=%d\n",
+ entry->type, entry->offset_entry->offset);
+ if (entry->type == IPA_HDR_PROC_NONE) {
+ struct ipa_hdr_proc_ctx_add_hdr_seq *ctx;
+
+ ctx = (struct ipa_hdr_proc_ctx_add_hdr_seq *)
+ (mem->base + entry->offset_entry->offset);
+ ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+ ctx->hdr_add.tlv.length = 1;
+ ctx->hdr_add.tlv.value = entry->hdr->hdr_len;
+ ctx->hdr_add.hdr_addr = (entry->hdr->is_hdr_proc_ctx) ?
+ entry->hdr->phys_base :
+ hdr_base_addr +
+ entry->hdr->offset_entry->offset;
+ IPADBG("header address 0x%x\n",
+ ctx->hdr_add.hdr_addr);
+ ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+ ctx->end.length = 0;
+ ctx->end.value = 0;
+ } else {
+ struct ipa_hdr_proc_ctx_add_hdr_cmd_seq *ctx;
+
+ ctx = (struct ipa_hdr_proc_ctx_add_hdr_cmd_seq *)
+ (mem->base + entry->offset_entry->offset);
+ ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+ ctx->hdr_add.tlv.length = 1;
+ ctx->hdr_add.tlv.value = entry->hdr->hdr_len;
+ ctx->hdr_add.hdr_addr = (entry->hdr->is_hdr_proc_ctx) ?
+ entry->hdr->phys_base :
+ hdr_base_addr +
+ entry->hdr->offset_entry->offset;
+ IPADBG("header address 0x%x\n",
+ ctx->hdr_add.hdr_addr);
+ ctx->cmd.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
+ ctx->cmd.length = 0;
+ if (entry->type == IPA_HDR_PROC_ETHII_TO_ETHII)
+ ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_ETHII;
+ else if (entry->type == IPA_HDR_PROC_ETHII_TO_802_3)
+ ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_802_3;
+ else if (entry->type == IPA_HDR_PROC_802_3_TO_ETHII)
+ ctx->cmd.value = IPA_HDR_UCP_802_3_TO_ETHII;
+ else if (entry->type == IPA_HDR_PROC_802_3_TO_802_3)
+ ctx->cmd.value = IPA_HDR_UCP_802_3_TO_802_3;
+ IPADBG("command id %d\n", ctx->cmd.value);
+ ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+ ctx->end.length = 0;
+ ctx->end.value = 0;
+ }
+ }
+}
+
+/**
+ * ipa_generate_hdr_proc_ctx_hw_tbl() -
+ * generates the headers processing context table.
+ * @mem: [out] buffer to put the processing context table
+ * @aligned_mem: [out] actual processing context table (with alignment).
+ * Processing context table needs to be 8 Bytes aligned.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+static int ipa_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr,
+ struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
+{
+ u32 hdr_base_addr;
+
+ mem->size = (ipa_ctx->hdr_proc_ctx_tbl.end) ? : 4;
+
+ /* make sure table is aligned */
+ mem->size += IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
+
+ IPADBG("tbl_sz=%d\n", ipa_ctx->hdr_proc_ctx_tbl.end);
+
+ mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
+ &mem->phys_base, GFP_KERNEL);
+ if (!mem->base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+ return -ENOMEM;
+ }
+
+ aligned_mem->phys_base =
+ IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(mem->phys_base);
+ aligned_mem->base = mem->base +
+ (aligned_mem->phys_base - mem->phys_base);
+ aligned_mem->size = mem->size - IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
+ memset(aligned_mem->base, 0, aligned_mem->size);
+ hdr_base_addr = (ipa_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_ofst) :
+ hdr_sys_addr;
+ ipa_hdr_proc_ctx_to_hw_format(aligned_mem, hdr_base_addr);
+
+ return 0;
+}
+
+/*
+ * __ipa_commit_hdr() commits hdr to hardware
+ * This function needs to be called with a locked mutex.
+ */
+int __ipa_commit_hdr_v1_1(void)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_mem_buffer *mem;
+ struct ipa_hdr_init_local *cmd;
+ u16 len;
+
+ mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
+ if (!mem) {
+ IPAERR("failed to alloc memory object\n");
+ goto fail_alloc_mem;
+ }
+
+ /* the immediate command param size is same for both local and system */
+ len = sizeof(struct ipa_hdr_init_local);
+
+ /*
+ * we can use init_local ptr for init_system due to layout of the
+ * struct
+ */
+ cmd = kmalloc(len, GFP_KERNEL);
+ if (!cmd) {
+ IPAERR("failed to alloc immediate command object\n");
+ goto fail_alloc_cmd;
+ }
+
+ if (ipa_generate_hdr_hw_tbl(mem)) {
+ IPAERR("fail to generate HDR HW TBL\n");
+ goto fail_hw_tbl_gen;
+ }
+
+ if (ipa_ctx->hdr_tbl_lcl) {
+ if (mem->size > IPA_MEM_v1_RAM_HDR_SIZE) {
+ IPAERR("tbl too big, needed %d avail %d\n", mem->size,
+ IPA_MEM_v1_RAM_HDR_SIZE);
+ goto fail_send_cmd;
+ }
+ } else {
+ if (mem->size > IPA_MEM_PART(apps_hdr_size_ddr)) {
+ IPAERR("tbl too big, needed %d avail %d\n", mem->size,
+ IPA_MEM_PART(apps_hdr_size_ddr));
+ goto fail_send_cmd;
+ }
+ }
+
+ cmd->hdr_table_src_addr = mem->phys_base;
+ if (ipa_ctx->hdr_tbl_lcl) {
+ cmd->size_hdr_table = mem->size;
+ cmd->hdr_table_dst_addr = IPA_MEM_v1_RAM_HDR_OFST;
+ desc.opcode = IPA_HDR_INIT_LOCAL;
+ } else {
+ desc.opcode = IPA_HDR_INIT_SYSTEM;
+ }
+ desc.pyld = cmd;
+ desc.len = sizeof(struct ipa_hdr_init_local);
+ desc.type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ goto fail_send_cmd;
+ }
+
+ if (ipa_ctx->hdr_tbl_lcl) {
+ dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
+ mem->phys_base);
+ } else {
+ if (ipa_ctx->hdr_mem.phys_base) {
+ dma_free_coherent(ipa_ctx->pdev, ipa_ctx->hdr_mem.size,
+ ipa_ctx->hdr_mem.base,
+ ipa_ctx->hdr_mem.phys_base);
+ }
+ ipa_ctx->hdr_mem = *mem;
+ }
+ kfree(cmd);
+ kfree(mem);
+
+ return 0;
+
+fail_send_cmd:
+ if (mem->base)
+ dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
+ mem->phys_base);
+fail_hw_tbl_gen:
+ kfree(cmd);
+fail_alloc_cmd:
+ kfree(mem);
+fail_alloc_mem:
+
+ return -EPERM;
+}
+
+int __ipa_commit_hdr_v2(void)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_mem_buffer mem;
+ struct ipa_hdr_init_system cmd;
+ struct ipa_hw_imm_cmd_dma_shared_mem dma_cmd;
+ int rc = -EFAULT;
+
+ if (ipa_generate_hdr_hw_tbl(&mem)) {
+ IPAERR("fail to generate HDR HW TBL\n");
+ goto end;
+ }
+
+ if (ipa_ctx->hdr_tbl_lcl) {
+ if (mem.size > IPA_MEM_PART(apps_hdr_size)) {
+ IPAERR("tbl too big, needed %d avail %d\n", mem.size,
+ IPA_MEM_PART(apps_hdr_size));
+ goto end;
+ } else {
+ dma_cmd.system_addr = mem.phys_base;
+ dma_cmd.size = mem.size;
+ dma_cmd.local_addr = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(apps_hdr_ofst);
+ desc.opcode = IPA_DMA_SHARED_MEM;
+ desc.pyld = &dma_cmd;
+ desc.len =
+ sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+ }
+ } else {
+ if (mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
+ IPAERR("tbl too big, needed %d avail %d\n", mem.size,
+ IPA_MEM_PART(apps_hdr_size_ddr));
+ goto end;
+ } else {
+ cmd.hdr_table_addr = mem.phys_base;
+ desc.opcode = IPA_HDR_INIT_SYSTEM;
+ desc.pyld = &cmd;
+ desc.len = sizeof(struct ipa_hdr_init_system);
+ }
+ }
+
+ desc.type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+ if (ipa_send_cmd(1, &desc))
+ IPAERR("fail to send immediate command\n");
+ else
+ rc = 0;
+
+ if (ipa_ctx->hdr_tbl_lcl) {
+ dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
+ mem.phys_base);
+ } else {
+ if (!rc) {
+ if (ipa_ctx->hdr_mem.phys_base)
+ dma_free_coherent(ipa_ctx->pdev,
+ ipa_ctx->hdr_mem.size,
+ ipa_ctx->hdr_mem.base,
+ ipa_ctx->hdr_mem.phys_base);
+ ipa_ctx->hdr_mem = mem;
+ }
+ }
+
+end:
+ return rc;
+}
+
+int __ipa_commit_hdr_v2_5(void)
+{
+ struct ipa_desc desc[2];
+ struct ipa_mem_buffer hdr_mem;
+ struct ipa_mem_buffer ctx_mem;
+ struct ipa_mem_buffer aligned_ctx_mem;
+ struct ipa_hdr_init_system hdr_init_cmd = {0};
+ struct ipa_hw_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
+ struct ipa_hw_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
+ struct ipa_register_write reg_write_cmd = {0};
+ int rc = -EFAULT;
+ u32 proc_ctx_size;
+ u32 proc_ctx_ofst;
+ u32 proc_ctx_size_ddr;
+
+ memset(desc, 0, 2 * sizeof(struct ipa_desc));
+
+ if (ipa_generate_hdr_hw_tbl(&hdr_mem)) {
+ IPAERR("fail to generate HDR HW TBL\n");
+ goto end;
+ }
+
+ if (ipa_generate_hdr_proc_ctx_hw_tbl(hdr_mem.phys_base, &ctx_mem,
+ &aligned_ctx_mem)) {
+ IPAERR("fail to generate HDR PROC CTX HW TBL\n");
+ goto end;
+ }
+
+ if (ipa_ctx->hdr_tbl_lcl) {
+ if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) {
+ IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
+ IPA_MEM_PART(apps_hdr_size));
+ goto end;
+ } else {
+ dma_cmd_hdr.system_addr = hdr_mem.phys_base;
+ dma_cmd_hdr.size = hdr_mem.size;
+ dma_cmd_hdr.local_addr =
+ ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(apps_hdr_ofst);
+ desc[0].opcode = IPA_DMA_SHARED_MEM;
+ desc[0].pyld = &dma_cmd_hdr;
+ desc[0].len =
+ sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+ }
+ } else {
+ if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
+ IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
+ IPA_MEM_PART(apps_hdr_size_ddr));
+ goto end;
+ } else {
+ hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base;
+ desc[0].opcode = IPA_HDR_INIT_SYSTEM;
+ desc[0].pyld = &hdr_init_cmd;
+ desc[0].len = sizeof(struct ipa_hdr_init_system);
+ }
+ }
+ desc[0].type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
+
+ proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size);
+ proc_ctx_ofst = IPA_MEM_PART(apps_hdr_proc_ctx_ofst);
+ if (ipa_ctx->hdr_proc_ctx_tbl_lcl) {
+ if (aligned_ctx_mem.size > proc_ctx_size) {
+ IPAERR("tbl too big needed %d avail %d\n",
+ aligned_ctx_mem.size,
+ proc_ctx_size);
+ goto end;
+ } else {
+ dma_cmd_ctx.system_addr = aligned_ctx_mem.phys_base;
+ dma_cmd_ctx.size = aligned_ctx_mem.size;
+ dma_cmd_ctx.local_addr =
+ ipa_ctx->smem_restricted_bytes +
+ proc_ctx_ofst;
+ desc[1].opcode = IPA_DMA_SHARED_MEM;
+ desc[1].pyld = &dma_cmd_ctx;
+ desc[1].len =
+ sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+ }
+ } else {
+ proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
+ if (aligned_ctx_mem.size > proc_ctx_size_ddr) {
+ IPAERR("tbl too big, needed %d avail %d\n",
+ aligned_ctx_mem.size,
+ proc_ctx_size_ddr);
+ goto end;
+ } else {
+ reg_write_cmd.offset = IPA_SYS_PKT_PROC_CNTXT_BASE_OFST;
+ reg_write_cmd.value = aligned_ctx_mem.phys_base;
+ reg_write_cmd.value_mask =
+ ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1);
+ desc[1].pyld = ®_write_cmd;
+ desc[1].opcode = IPA_REGISTER_WRITE;
+ desc[1].len = sizeof(reg_write_cmd);
+ }
+ }
+ desc[1].type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(ctx_mem.base, ctx_mem.phys_base, ctx_mem.size);
+
+ if (ipa_send_cmd(2, desc))
+ IPAERR("fail to send immediate command\n");
+ else
+ rc = 0;
+
+ if (ipa_ctx->hdr_tbl_lcl) {
+ dma_free_coherent(ipa_ctx->pdev, hdr_mem.size, hdr_mem.base,
+ hdr_mem.phys_base);
+ } else {
+ if (!rc) {
+ if (ipa_ctx->hdr_mem.phys_base)
+ dma_free_coherent(ipa_ctx->pdev,
+ ipa_ctx->hdr_mem.size,
+ ipa_ctx->hdr_mem.base,
+ ipa_ctx->hdr_mem.phys_base);
+ ipa_ctx->hdr_mem = hdr_mem;
+ }
+ }
+
+ if (ipa_ctx->hdr_proc_ctx_tbl_lcl) {
+ dma_free_coherent(ipa_ctx->pdev, ctx_mem.size, ctx_mem.base,
+ ctx_mem.phys_base);
+ } else {
+ if (!rc) {
+ if (ipa_ctx->hdr_proc_ctx_mem.phys_base)
+ dma_free_coherent(ipa_ctx->pdev,
+ ipa_ctx->hdr_proc_ctx_mem.size,
+ ipa_ctx->hdr_proc_ctx_mem.base,
+ ipa_ctx->hdr_proc_ctx_mem.phys_base);
+ ipa_ctx->hdr_proc_ctx_mem = ctx_mem;
+ }
+ }
+
+end:
+ return rc;
+}
+
+/**
+ * __ipa_commit_hdr_v2_6L() - Commits a header to the IPA HW.
+ *
+ * This function needs to be called with a locked mutex.
+ */
+int __ipa_commit_hdr_v2_6L(void)
+{
+ /* Same implementation as IPAv2 */
+ return __ipa_commit_hdr_v2();
+}
+
+static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
+ bool add_ref_hdr)
+{
+ struct ipa_hdr_entry *hdr_entry;
+ struct ipa_hdr_proc_ctx_entry *entry;
+ struct ipa_hdr_proc_ctx_offset_entry *offset;
+ u32 bin;
+ struct ipa_hdr_proc_ctx_tbl *htbl = &ipa_ctx->hdr_proc_ctx_tbl;
+ int id;
+ int needed_len;
+ int mem_size;
+
+ IPADBG("processing type %d hdr_hdl %d\n",
+ proc_ctx->type, proc_ctx->hdr_hdl);
+
+ if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
+ IPAERR("invalid processing type %d\n", proc_ctx->type);
+ return -EINVAL;
+ }
+
+ hdr_entry = ipa_id_find(proc_ctx->hdr_hdl);
+ if (!hdr_entry || (hdr_entry->cookie != IPA_COOKIE)) {
+ IPAERR("hdr_hdl is invalid\n");
+ return -EINVAL;
+ }
+
+ entry = kmem_cache_zalloc(ipa_ctx->hdr_proc_ctx_cache, GFP_KERNEL);
+ if (!entry) {
+ IPAERR("failed to alloc proc_ctx object\n");
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&entry->link);
+
+ entry->type = proc_ctx->type;
+ entry->hdr = hdr_entry;
+ if (add_ref_hdr)
+ hdr_entry->ref_cnt++;
+ entry->cookie = IPA_COOKIE;
+
+ needed_len = (proc_ctx->type == IPA_HDR_PROC_NONE) ?
+ sizeof(struct ipa_hdr_proc_ctx_add_hdr_seq) :
+ sizeof(struct ipa_hdr_proc_ctx_add_hdr_cmd_seq);
+
+ if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0]) {
+ bin = IPA_HDR_PROC_CTX_BIN0;
+ } else if (needed_len <=
+ ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
+ bin = IPA_HDR_PROC_CTX_BIN1;
+ } else {
+ IPAERR("unexpected needed len %d\n", needed_len);
+ WARN_ON(1);
+ goto bad_len;
+ }
+
+ mem_size = (ipa_ctx->hdr_proc_ctx_tbl_lcl) ?
+ IPA_MEM_PART(apps_hdr_proc_ctx_size) :
+ IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
+ if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
+ IPAERR("hdr proc ctx table overflow\n");
+ goto bad_len;
+ }
+
+ if (list_empty(&htbl->head_free_offset_list[bin])) {
+ offset = kmem_cache_zalloc(ipa_ctx->hdr_proc_ctx_offset_cache,
+ GFP_KERNEL);
+ if (!offset) {
+ IPAERR("failed to alloc offset object\n");
+ goto bad_len;
+ }
+ INIT_LIST_HEAD(&offset->link);
+ /*
+ * for a first item grow, set the bin and offset which are set
+ * in stone
+ */
+ offset->offset = htbl->end;
+ offset->bin = bin;
+ htbl->end += ipa_hdr_proc_ctx_bin_sz[bin];
+ list_add(&offset->link,
+ &htbl->head_offset_list[bin]);
+ } else {
+ /* get the first free slot */
+ offset =
+ list_first_entry(&htbl->head_free_offset_list[bin],
+ struct ipa_hdr_proc_ctx_offset_entry, link);
+ list_move(&offset->link, &htbl->head_offset_list[bin]);
+ }
+
+ entry->offset_entry = offset;
+ list_add(&entry->link, &htbl->head_proc_ctx_entry_list);
+ htbl->proc_ctx_cnt++;
+ IPADBG("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
+ htbl->proc_ctx_cnt, offset->offset);
+
+ id = ipa_id_alloc(entry);
+ if (id < 0) {
+ IPAERR("failed to alloc id\n");
+ WARN_ON(1);
+ }
+ entry->id = id;
+ proc_ctx->proc_ctx_hdl = id;
+ entry->ref_cnt++;
+
+ return 0;
+
+bad_len:
+ if (add_ref_hdr)
+ hdr_entry->ref_cnt--;
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->hdr_proc_ctx_cache, entry);
+ return -EPERM;
+}
+
+
+static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
+{
+ struct ipa_hdr_entry *entry;
+ struct ipa_hdr_offset_entry *offset;
+ u32 bin;
+ struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
+ int id;
+ int mem_size;
+
+ if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
+ IPAERR("bad parm\n");
+ goto error;
+ }
+
+ if (!HDR_TYPE_IS_VALID(hdr->type)) {
+ IPAERR("invalid hdr type %d\n", hdr->type);
+ goto error;
+ }
+
+ entry = kmem_cache_zalloc(ipa_ctx->hdr_cache, GFP_KERNEL);
+ if (!entry) {
+ IPAERR("failed to alloc hdr object\n");
+ goto error;
+ }
+
+ INIT_LIST_HEAD(&entry->link);
+
+ memcpy(entry->hdr, hdr->hdr, hdr->hdr_len);
+ entry->hdr_len = hdr->hdr_len;
+ strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX);
+ entry->is_partial = hdr->is_partial;
+ entry->type = hdr->type;
+ entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
+ entry->eth2_ofst = hdr->eth2_ofst;
+ entry->cookie = IPA_COOKIE;
+
+ if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
+ bin = IPA_HDR_BIN0;
+ else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
+ bin = IPA_HDR_BIN1;
+ else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2])
+ bin = IPA_HDR_BIN2;
+ else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3])
+ bin = IPA_HDR_BIN3;
+ else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
+ bin = IPA_HDR_BIN4;
+ else {
+ IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
+ goto bad_hdr_len;
+ }
+
+ mem_size = (ipa_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) :
+ IPA_MEM_PART(apps_hdr_size_ddr);
+
+ /*
+ * if header does not fit to table, place it in DDR
+ * This is valid for IPA 2.5 and on,
+ * with the exception of IPA2.6L.
+ */
+ if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
+ if (ipa_ctx->ipa_hw_type != IPA_HW_v2_5) {
+ IPAERR("not enough room for header\n");
+ goto bad_hdr_len;
+ } else {
+ entry->is_hdr_proc_ctx = true;
+ entry->phys_base = dma_map_single(ipa_ctx->pdev,
+ entry->hdr,
+ entry->hdr_len,
+ DMA_TO_DEVICE);
+ }
+ } else {
+ entry->is_hdr_proc_ctx = false;
+ if (list_empty(&htbl->head_free_offset_list[bin])) {
+ offset = kmem_cache_zalloc(ipa_ctx->hdr_offset_cache,
+ GFP_KERNEL);
+ if (!offset) {
+ IPAERR("failed to alloc hdr offset object\n");
+ goto bad_hdr_len;
+ }
+ INIT_LIST_HEAD(&offset->link);
+ /*
+ * for a first item grow, set the bin and offset which
+ * are set in stone
+ */
+ offset->offset = htbl->end;
+ offset->bin = bin;
+ htbl->end += ipa_hdr_bin_sz[bin];
+ list_add(&offset->link,
+ &htbl->head_offset_list[bin]);
+ } else {
+ /* get the first free slot */
+ offset =
+ list_first_entry(&htbl->head_free_offset_list[bin],
+ struct ipa_hdr_offset_entry, link);
+ list_move(&offset->link, &htbl->head_offset_list[bin]);
+ }
+
+ entry->offset_entry = offset;
+ }
+
+ list_add(&entry->link, &htbl->head_hdr_entry_list);
+ htbl->hdr_cnt++;
+ if (entry->is_hdr_proc_ctx)
+ IPADBG("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
+ hdr->hdr_len,
+ htbl->hdr_cnt,
+ &entry->phys_base);
+ else
+ IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
+ hdr->hdr_len,
+ htbl->hdr_cnt,
+ entry->offset_entry->offset);
+
+ id = ipa_id_alloc(entry);
+ if (id < 0) {
+ IPAERR("failed to alloc id\n");
+ WARN_ON(1);
+ }
+ entry->id = id;
+ hdr->hdr_hdl = id;
+ entry->ref_cnt++;
+
+ if (entry->is_hdr_proc_ctx) {
+ struct ipa_hdr_proc_ctx_add proc_ctx;
+
+ IPADBG("adding processing context for header %s\n", hdr->name);
+ proc_ctx.type = IPA_HDR_PROC_NONE;
+ proc_ctx.hdr_hdl = id;
+ if (__ipa_add_hdr_proc_ctx(&proc_ctx, false)) {
+ IPAERR("failed to add hdr proc ctx\n");
+ goto fail_add_proc_ctx;
+ }
+ entry->proc_ctx = ipa_id_find(proc_ctx.proc_ctx_hdl);
+ }
+
+ return 0;
+
+fail_add_proc_ctx:
+ entry->ref_cnt--;
+ hdr->hdr_hdl = 0;
+ ipa_id_remove(id);
+ htbl->hdr_cnt--;
+ list_del(&entry->link);
+ dma_unmap_single(ipa_ctx->pdev, entry->phys_base,
+ entry->hdr_len, DMA_TO_DEVICE);
+bad_hdr_len:
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->hdr_cache, entry);
+error:
+ return -EPERM;
+}
+
+static int __ipa_del_hdr_proc_ctx(u32 proc_ctx_hdl, bool release_hdr)
+{
+ struct ipa_hdr_proc_ctx_entry *entry;
+ struct ipa_hdr_proc_ctx_tbl *htbl = &ipa_ctx->hdr_proc_ctx_tbl;
+
+ entry = ipa_id_find(proc_ctx_hdl);
+ if (!entry || (entry->cookie != IPA_COOKIE)) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ IPADBG("del ctx proc cnt=%d ofst=%d\n",
+ htbl->proc_ctx_cnt, entry->offset_entry->offset);
+
+ if (--entry->ref_cnt) {
+ IPADBG("proc_ctx_hdl %x ref_cnt %d\n",
+ proc_ctx_hdl, entry->ref_cnt);
+ return 0;
+ }
+
+ if (release_hdr)
+ __ipa_del_hdr(entry->hdr->id);
+
+ /* move the offset entry to appropriate free list */
+ list_move(&entry->offset_entry->link,
+ &htbl->head_free_offset_list[entry->offset_entry->bin]);
+ list_del(&entry->link);
+ htbl->proc_ctx_cnt--;
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->hdr_proc_ctx_cache, entry);
+
+ /* remove the handle from the database */
+ ipa_id_remove(proc_ctx_hdl);
+
+ return 0;
+}
+
+
+int __ipa_del_hdr(u32 hdr_hdl)
+{
+ struct ipa_hdr_entry *entry;
+ struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
+
+ entry = ipa_id_find(hdr_hdl);
+ if (entry == NULL) {
+ IPAERR("lookup failed\n");
+ return -EINVAL;
+ }
+
+ if (!entry || (entry->cookie != IPA_COOKIE)) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ if (entry->is_hdr_proc_ctx)
+ IPADBG("del hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
+ entry->hdr_len, htbl->hdr_cnt, &entry->phys_base);
+ else
+ IPADBG("del hdr of sz=%d hdr_cnt=%d ofst=%d\n", entry->hdr_len,
+ htbl->hdr_cnt, entry->offset_entry->offset);
+
+ if (--entry->ref_cnt) {
+ IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
+ return 0;
+ }
+
+ if (entry->is_hdr_proc_ctx) {
+ dma_unmap_single(ipa_ctx->pdev,
+ entry->phys_base,
+ entry->hdr_len,
+ DMA_TO_DEVICE);
+ __ipa_del_hdr_proc_ctx(entry->proc_ctx->id, false);
+ } else {
+ /* move the offset entry to appropriate free list */
+ list_move(&entry->offset_entry->link,
+ &htbl->head_free_offset_list[entry->offset_entry->bin]);
+ }
+ list_del(&entry->link);
+ htbl->hdr_cnt--;
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->hdr_cache, entry);
+
+ /* remove the handle from the database */
+ ipa_id_remove(hdr_hdl);
+
+ return 0;
+}
+
+/**
+ * ipa2_add_hdr() - add the specified headers to SW and optionally commit them
+ * to IPA HW
+ * @hdrs: [inout] set of headers to add
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_add_hdr(struct ipa_ioc_add_hdr *hdrs)
+{
+ int i;
+ int result = -EFAULT;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (hdrs == NULL || hdrs->num_hdrs == 0) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ IPADBG("adding %d headers to IPA driver internal data struct\n",
+ hdrs->num_hdrs);
+ for (i = 0; i < hdrs->num_hdrs; i++) {
+ if (__ipa_add_hdr(&hdrs->hdr[i])) {
+ IPAERR("failed to add hdr %d\n", i);
+ hdrs->hdr[i].status = -1;
+ } else {
+ hdrs->hdr[i].status = 0;
+ }
+ }
+
+ if (hdrs->commit) {
+ IPADBG("committing all headers to IPA core");
+ if (ipa_ctx->ctrl->ipa_commit_hdr()) {
+ result = -EPERM;
+ goto bail;
+ }
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+
+/**
+ * ipa2_del_hdr() - Remove the specified headers from SW and optionally commit
+ * them to IPA HW
+ * @hdls: [inout] set of headers to delete
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_del_hdr(struct ipa_ioc_del_hdr *hdls)
+{
+ int i;
+ int result = -EFAULT;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (hdls == NULL || hdls->num_hdls == 0) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < hdls->num_hdls; i++) {
+ if (__ipa_del_hdr(hdls->hdl[i].hdl)) {
+ IPAERR("failed to del hdr %i\n", i);
+ hdls->hdl[i].status = -1;
+ } else {
+ hdls->hdl[i].status = 0;
+ }
+ }
+
+ if (hdls->commit) {
+ if (ipa_ctx->ctrl->ipa_commit_hdr()) {
+ result = -EPERM;
+ goto bail;
+ }
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+
+/**
+ * ipa2_add_hdr_proc_ctx() - add the specified headers to SW
+ * and optionally commit them to IPA HW
+ * @proc_ctxs: [inout] set of processing context headers to add
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
+{
+ int i;
+ int result = -EFAULT;
+
+ if (ipa_ctx->ipa_hw_type <= IPA_HW_v2_0 ||
+ ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) {
+ IPAERR("Processing context not supported on IPA HW %d\n",
+ ipa_ctx->ipa_hw_type);
+ return -EFAULT;
+ }
+
+ if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ IPADBG("adding %d header processing contextes to IPA driver\n",
+ proc_ctxs->num_proc_ctxs);
+ for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
+ if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
+ IPAERR("failed to add hdr pric ctx %d\n", i);
+ proc_ctxs->proc_ctx[i].status = -1;
+ } else {
+ proc_ctxs->proc_ctx[i].status = 0;
+ }
+ }
+
+ if (proc_ctxs->commit) {
+ IPADBG("committing all headers to IPA core");
+ if (ipa_ctx->ctrl->ipa_commit_hdr()) {
+ result = -EPERM;
+ goto bail;
+ }
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+
+/**
+ * ipa2_del_hdr_proc_ctx() -
+ * Remove the specified processing context headers from SW and
+ * optionally commit them to IPA HW.
+ * @hdls: [inout] set of processing context headers to delete
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
+{
+ int i;
+ int result;
+
+ if (ipa_ctx->ipa_hw_type <= IPA_HW_v2_0 ||
+ ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) {
+ IPAERR("Processing context not supported on IPA HW %d\n",
+ ipa_ctx->ipa_hw_type);
+ return -EFAULT;
+ }
+
+ if (hdls == NULL || hdls->num_hdls == 0) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < hdls->num_hdls; i++) {
+ if (__ipa_del_hdr_proc_ctx(hdls->hdl[i].hdl, true)) {
+ IPAERR("failed to del hdr %i\n", i);
+ hdls->hdl[i].status = -1;
+ } else {
+ hdls->hdl[i].status = 0;
+ }
+ }
+
+ if (hdls->commit) {
+ if (ipa_ctx->ctrl->ipa_commit_hdr()) {
+ result = -EPERM;
+ goto bail;
+ }
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+
+/**
+ * ipa2_commit_hdr() - commit to IPA HW the current header table in SW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_commit_hdr(void)
+{
+ int result = -EFAULT;
+
+ /*
+ * issue a commit on the routing module since routing rules point to
+ * header table entries
+ */
+ if (ipa2_commit_rt(IPA_IP_v4))
+ return -EPERM;
+ if (ipa2_commit_rt(IPA_IP_v6))
+ return -EPERM;
+
+ mutex_lock(&ipa_ctx->lock);
+ if (ipa_ctx->ctrl->ipa_commit_hdr()) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+
+/**
+ * ipa2_reset_hdr() - reset the current header table in SW (does not commit to
+ * HW)
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_reset_hdr(void)
+{
+ struct ipa_hdr_entry *entry;
+ struct ipa_hdr_entry *next;
+ struct ipa_hdr_proc_ctx_entry *ctx_entry;
+ struct ipa_hdr_proc_ctx_entry *ctx_next;
+ struct ipa_hdr_offset_entry *off_entry;
+ struct ipa_hdr_offset_entry *off_next;
+ struct ipa_hdr_proc_ctx_offset_entry *ctx_off_entry;
+ struct ipa_hdr_proc_ctx_offset_entry *ctx_off_next;
+ int i;
+
+ /*
+ * issue a reset on the routing module since routing rules point to
+ * header table entries
+ */
+ if (ipa2_reset_rt(IPA_IP_v4))
+ IPAERR("fail to reset v4 rt\n");
+ if (ipa2_reset_rt(IPA_IP_v6))
+ IPAERR("fail to reset v4 rt\n");
+
+ mutex_lock(&ipa_ctx->lock);
+ IPADBG("reset hdr\n");
+ list_for_each_entry_safe(entry, next,
+ &ipa_ctx->hdr_tbl.head_hdr_entry_list, link) {
+
+ /* do not remove the default header */
+ if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
+ if (entry->is_hdr_proc_ctx) {
+ mutex_unlock(&ipa_ctx->lock);
+ WARN_ON(1);
+ IPAERR("default header is proc ctx\n");
+ return -EFAULT;
+ }
+ continue;
+ }
+
+ if (ipa_id_find(entry->id) == NULL) {
+ mutex_unlock(&ipa_ctx->lock);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+ if (entry->is_hdr_proc_ctx) {
+ dma_unmap_single(ipa_ctx->pdev,
+ entry->phys_base,
+ entry->hdr_len,
+ DMA_TO_DEVICE);
+ entry->proc_ctx = NULL;
+ }
+ list_del(&entry->link);
+ entry->ref_cnt = 0;
+ entry->cookie = 0;
+
+ /* remove the handle from the database */
+ ipa_id_remove(entry->id);
+ kmem_cache_free(ipa_ctx->hdr_cache, entry);
+
+ }
+ for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+ list_for_each_entry_safe(off_entry, off_next,
+ &ipa_ctx->hdr_tbl.head_offset_list[i],
+ link) {
+
+ /*
+ * do not remove the default exception header which is
+ * at offset 0
+ */
+ if (off_entry->offset == 0)
+ continue;
+
+ list_del(&off_entry->link);
+ kmem_cache_free(ipa_ctx->hdr_offset_cache, off_entry);
+ }
+ list_for_each_entry_safe(off_entry, off_next,
+ &ipa_ctx->hdr_tbl.head_free_offset_list[i],
+ link) {
+ list_del(&off_entry->link);
+ kmem_cache_free(ipa_ctx->hdr_offset_cache, off_entry);
+ }
+ }
+ /* there is one header of size 8 */
+ ipa_ctx->hdr_tbl.end = 8;
+ ipa_ctx->hdr_tbl.hdr_cnt = 1;
+
+ IPADBG("reset hdr proc ctx\n");
+ list_for_each_entry_safe(
+ ctx_entry,
+ ctx_next,
+ &ipa_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
+ link) {
+
+ if (ipa_id_find(ctx_entry->id) == NULL) {
+ mutex_unlock(&ipa_ctx->lock);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+ list_del(&ctx_entry->link);
+ ctx_entry->ref_cnt = 0;
+ ctx_entry->cookie = 0;
+
+ /* remove the handle from the database */
+ ipa_id_remove(ctx_entry->id);
+ kmem_cache_free(ipa_ctx->hdr_proc_ctx_cache, ctx_entry);
+
+ }
+ for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
+ list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+ &ipa_ctx->hdr_proc_ctx_tbl.head_offset_list[i],
+ link) {
+
+ list_del(&ctx_off_entry->link);
+ kmem_cache_free(ipa_ctx->hdr_proc_ctx_offset_cache,
+ ctx_off_entry);
+ }
+ list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+ &ipa_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i],
+ link) {
+ list_del(&ctx_off_entry->link);
+ kmem_cache_free(ipa_ctx->hdr_proc_ctx_offset_cache,
+ ctx_off_entry);
+ }
+ }
+ ipa_ctx->hdr_proc_ctx_tbl.end = 0;
+ ipa_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0;
+ mutex_unlock(&ipa_ctx->lock);
+
+ return 0;
+}
+
+static struct ipa_hdr_entry *__ipa_find_hdr(const char *name)
+{
+ struct ipa_hdr_entry *entry;
+
+ list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+ link) {
+ if (!strcmp(name, entry->name))
+ return entry;
+ }
+
+ return NULL;
+}
+
+/**
+ * ipa2_get_hdr() - Lookup the specified header resource
+ * @lookup: [inout] header to lookup and its handle
+ *
+ * lookup the specified header resource and return handle if it exists
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ * Caller should call ipa_put_hdr later if this function succeeds
+ */
+int ipa2_get_hdr(struct ipa_ioc_get_hdr *lookup)
+{
+ struct ipa_hdr_entry *entry;
+ int result = -1;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (lookup == NULL) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+ mutex_lock(&ipa_ctx->lock);
+ entry = __ipa_find_hdr(lookup->name);
+ if (entry) {
+ lookup->hdl = entry->id;
+ result = 0;
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+
+/**
+ * __ipa_release_hdr() - drop reference to header and cause
+ * deletion if reference count permits
+ * @hdr_hdl: [in] handle of header to be released
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int __ipa_release_hdr(u32 hdr_hdl)
+{
+ int result = 0;
+
+ if (__ipa_del_hdr(hdr_hdl)) {
+ IPADBG("fail to del hdr %x\n", hdr_hdl);
+ result = -EFAULT;
+ goto bail;
+ }
+
+ /* commit for put */
+ if (ipa_ctx->ctrl->ipa_commit_hdr()) {
+ IPAERR("fail to commit hdr\n");
+ result = -EFAULT;
+ goto bail;
+ }
+
+bail:
+ return result;
+}
+
+/**
+ * __ipa_release_hdr_proc_ctx() - drop reference to processing context
+ * and cause deletion if reference count permits
+ * @proc_ctx_hdl: [in] handle of processing context to be released
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int __ipa_release_hdr_proc_ctx(u32 proc_ctx_hdl)
+{
+ int result = 0;
+
+ if (__ipa_del_hdr_proc_ctx(proc_ctx_hdl, true)) {
+ IPADBG("fail to del hdr %x\n", proc_ctx_hdl);
+ result = -EFAULT;
+ goto bail;
+ }
+
+ /* commit for put */
+ if (ipa_ctx->ctrl->ipa_commit_hdr()) {
+ IPAERR("fail to commit hdr\n");
+ result = -EFAULT;
+ goto bail;
+ }
+
+bail:
+ return result;
+}
+
+/**
+ * ipa2_put_hdr() - Release the specified header handle
+ * @hdr_hdl: [in] the header handle to release
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_put_hdr(u32 hdr_hdl)
+{
+ struct ipa_hdr_entry *entry;
+ int result = -EFAULT;
+
+ mutex_lock(&ipa_ctx->lock);
+
+ entry = ipa_id_find(hdr_hdl);
+ if (entry == NULL) {
+ IPAERR("lookup failed\n");
+ result = -EINVAL;
+ goto bail;
+ }
+
+ if (entry->cookie != IPA_COOKIE) {
+ IPAERR("invalid header entry\n");
+ result = -EINVAL;
+ goto bail;
+ }
+
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+
+/**
+ * ipa2_copy_hdr() - Lookup the specified header resource and return a copy of
+ * it
+ * @copy: [inout] header to lookup and its copy
+ *
+ * lookup the specified header resource and return a copy of it (along with its
+ * attributes) if it exists, this would be called for partial headers
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_copy_hdr(struct ipa_ioc_copy_hdr *copy)
+{
+ struct ipa_hdr_entry *entry;
+ int result = -EFAULT;
+
+ if (copy == NULL) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+ mutex_lock(&ipa_ctx->lock);
+ entry = __ipa_find_hdr(copy->name);
+ if (entry) {
+ memcpy(copy->hdr, entry->hdr, entry->hdr_len);
+ copy->hdr_len = entry->hdr_len;
+ copy->type = entry->type;
+ copy->is_partial = entry->is_partial;
+ copy->is_eth2_ofst_valid = entry->is_eth2_ofst_valid;
+ copy->eth2_ofst = entry->eth2_ofst;
+ result = 0;
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hw_defs.h b/drivers/platform/msm/ipa/ipa_v2/ipa_hw_defs.h
new file mode 100644
index 0000000..f12a3c6
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hw_defs.h
@@ -0,0 +1,450 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_HW_DEFS_H
+#define _IPA_HW_DEFS_H
+#include <linux/bitops.h>
+
+/* This header defines various HW related data types */
+
+/* immediate command op-codes */
+#define IPA_DECIPH_INIT (1)
+#define IPA_PPP_FRM_INIT (2)
+#define IPA_IP_V4_FILTER_INIT (3)
+#define IPA_IP_V6_FILTER_INIT (4)
+#define IPA_IP_V4_NAT_INIT (5)
+#define IPA_IP_V6_NAT_INIT (6)
+#define IPA_IP_V4_ROUTING_INIT (7)
+#define IPA_IP_V6_ROUTING_INIT (8)
+#define IPA_HDR_INIT_LOCAL (9)
+#define IPA_HDR_INIT_SYSTEM (10)
+#define IPA_DECIPH_SETUP (11)
+#define IPA_REGISTER_WRITE (12)
+#define IPA_NAT_DMA (14)
+#define IPA_IP_PACKET_TAG (15)
+#define IPA_IP_PACKET_INIT (16)
+#define IPA_DMA_SHARED_MEM (19)
+#define IPA_IP_PACKET_TAG_STATUS (20)
+
+/* Processing context TLV type */
+#define IPA_PROC_CTX_TLV_TYPE_END 0
+#define IPA_PROC_CTX_TLV_TYPE_HDR_ADD 1
+#define IPA_PROC_CTX_TLV_TYPE_PROC_CMD 3
+
+
+/**
+ * struct ipa_flt_rule_hw_hdr - HW header of IPA filter rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @action: post routing action
+ * @rt_tbl_idx: index in routing table
+ * @retain_hdr: added to add back to the packet the header removed
+ * as part of header removal. This will be done as part of
+ * header insertion block.
+ * @to_uc: direct IPA to sent the packet to uc instead of
+ * the intended destination. This will be performed just after
+ * routing block processing, so routing will have determined
+ * destination end point and uc will receive this information
+ * together with the packet as part of the HW packet TX commands
+ * @rsvd: reserved bits
+ */
+struct ipa_flt_rule_hw_hdr {
+ union {
+ u32 word;
+ struct {
+ u32 en_rule:16;
+ u32 action:5;
+ u32 rt_tbl_idx:5;
+ u32 retain_hdr:1;
+ u32 to_uc:1;
+ u32 rsvd:4;
+ } hdr;
+ } u;
+};
+
+/**
+ * struct ipa_rt_rule_hw_hdr - HW header of IPA routing rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @pipe_dest_idx: destination pipe index
+ * @system: changed from local to system due to HW change
+ * @hdr_offset: header offset
+ * @proc_ctx: whether hdr_offset points to header table or to
+ * header processing context table
+ */
+struct ipa_rt_rule_hw_hdr {
+ union {
+ u32 word;
+ struct {
+ u32 en_rule:16;
+ u32 pipe_dest_idx:5;
+ u32 system:1;
+ u32 hdr_offset:10;
+ } hdr;
+ struct {
+ u32 en_rule:16;
+ u32 pipe_dest_idx:5;
+ u32 system:1;
+ u32 hdr_offset:9;
+ u32 proc_ctx:1;
+ } hdr_v2_5;
+ } u;
+};
+
+/**
+ * struct ipa_ip_v4_filter_init - IPA_IP_V4_FILTER_INIT command payload
+ * @ipv4_rules_addr: address of ipv4 rules
+ * @size_ipv4_rules: size of the above
+ * @ipv4_addr: ipv4 address
+ * @rsvd: reserved
+ */
+struct ipa_ip_v4_filter_init {
+ u64 ipv4_rules_addr:32;
+ u64 size_ipv4_rules:12;
+ u64 ipv4_addr:16;
+ u64 rsvd:4;
+};
+
+/**
+ * struct ipa_ip_v6_filter_init - IPA_IP_V6_FILTER_INIT command payload
+ * @ipv6_rules_addr: address of ipv6 rules
+ * @size_ipv6_rules: size of the above
+ * @ipv6_addr: ipv6 address
+ */
+struct ipa_ip_v6_filter_init {
+ u64 ipv6_rules_addr:32;
+ u64 size_ipv6_rules:16;
+ u64 ipv6_addr:16;
+};
+
+/**
+ * struct ipa_ip_v4_routing_init - IPA_IP_V4_ROUTING_INIT command payload
+ * @ipv4_rules_addr: address of ipv4 rules
+ * @size_ipv4_rules: size of the above
+ * @ipv4_addr: ipv4 address
+ * @rsvd: reserved
+ */
+struct ipa_ip_v4_routing_init {
+ u64 ipv4_rules_addr:32;
+ u64 size_ipv4_rules:12;
+ u64 ipv4_addr:16;
+ u64 rsvd:4;
+};
+
+/**
+ * struct ipa_ip_v6_routing_init - IPA_IP_V6_ROUTING_INIT command payload
+ * @ipv6_rules_addr: address of ipv6 rules
+ * @size_ipv6_rules: size of the above
+ * @ipv6_addr: ipv6 address
+ */
+struct ipa_ip_v6_routing_init {
+ u64 ipv6_rules_addr:32;
+ u64 size_ipv6_rules:16;
+ u64 ipv6_addr:16;
+};
+
+/**
+ * struct ipa_hdr_init_local - IPA_HDR_INIT_LOCAL command payload
+ * @hdr_table_src_addr: word address of header table in system memory where the
+ * table starts (use as source for memory copying)
+ * @size_hdr_table: size of the above (in bytes)
+ * @hdr_table_dst_addr: header address in IPA sram (used as dst for memory copy)
+ * @rsvd: reserved
+ */
+struct ipa_hdr_init_local {
+ u64 hdr_table_src_addr:32;
+ u64 size_hdr_table:12;
+ u64 hdr_table_dst_addr:16;
+ u64 rsvd:4;
+};
+
+/**
+ * struct ipa_hdr_init_system - IPA_HDR_INIT_SYSTEM command payload
+ * @hdr_table_addr: word address of header table in system memory where the
+ * table starts (use as source for memory copying)
+ * @rsvd: reserved
+ */
+struct ipa_hdr_init_system {
+ u64 hdr_table_addr:32;
+ u64 rsvd:32;
+};
+
+/**
+ * struct ipa_hdr_proc_ctx_tlv -
+ * HW structure of IPA processing context header - TLV part
+ * @type: 0 - end type
+ * 1 - header addition type
+ * 3 - processing command type
+ * @length: number of bytes after tlv
+ * for type:
+ * 0 - needs to be 0
+ * 1 - header addition length
+ * 3 - number of 32B including type and length.
+ * @value: specific value for type
+ * for type:
+ * 0 - needs to be 0
+ * 1 - header length
+ * 3 - command ID (see IPA_HDR_UCP_* definitions)
+ */
+struct ipa_hdr_proc_ctx_tlv {
+ u32 type:8;
+ u32 length:8;
+ u32 value:16;
+};
+
+/**
+ * struct ipa_hdr_proc_ctx_hdr_add -
+ * HW structure of IPA processing context - add header tlv
+ * @tlv: IPA processing context TLV
+ * @hdr_addr: processing context header address
+ */
+struct ipa_hdr_proc_ctx_hdr_add {
+ struct ipa_hdr_proc_ctx_tlv tlv;
+ u32 hdr_addr;
+};
+
+#define IPA_A5_MUX_HDR_EXCP_FLAG_IP BIT(7)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_NAT BIT(6)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT BIT(5)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_TAG BIT(4)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED BIT(3)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_IHL BIT(2)
+
+/**
+ * struct ipa_a5_mux_hdr - A5 MUX header definition
+ * @interface_id: interface ID
+ * @src_pipe_index: source pipe index
+ * @flags: flags
+ * @metadata: metadata
+ *
+ * A5 MUX header is in BE, A5 runs in LE. This struct definition
+ * allows A5 SW to correctly parse the header
+ */
+struct ipa_a5_mux_hdr {
+ u16 interface_id;
+ u8 src_pipe_index;
+ u8 flags;
+ u32 metadata;
+};
+
+/**
+ * struct ipa_register_write - IPA_REGISTER_WRITE command payload
+ * @rsvd: reserved
+ * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear
+ * @offset: offset from IPA base address
+ * @value: value to write to register
+ * @value_mask: mask specifying which value bits to write to the register
+ */
+struct ipa_register_write {
+ u32 rsvd:15;
+ u32 skip_pipeline_clear:1;
+ u32 offset:16;
+ u32 value:32;
+ u32 value_mask:32;
+};
+
+/**
+ * struct ipa_nat_dma - IPA_NAT_DMA command payload
+ * @table_index: NAT table index
+ * @rsvd1: reserved
+ * @base_addr: base address
+ * @rsvd2: reserved
+ * @offset: offset
+ * @data: metadata
+ * @rsvd3: reserved
+ */
+struct ipa_nat_dma {
+ u64 table_index:3;
+ u64 rsvd1:1;
+ u64 base_addr:2;
+ u64 rsvd2:2;
+ u64 offset:32;
+ u64 data:16;
+ u64 rsvd3:8;
+};
+
+/**
+ * struct ipa_nat_dma - IPA_IP_PACKET_INIT command payload
+ * @destination_pipe_index: destination pipe index
+ * @rsvd1: reserved
+ * @metadata: metadata
+ * @rsvd2: reserved
+ */
+struct ipa_ip_packet_init {
+ u64 destination_pipe_index:5;
+ u64 rsvd1:3;
+ u64 metadata:32;
+ u64 rsvd2:24;
+};
+
+/**
+ * struct ipa_nat_dma - IPA_IP_V4_NAT_INIT command payload
+ * @ipv4_rules_addr: ipv4 rules address
+ * @ipv4_expansion_rules_addr: ipv4 expansion rules address
+ * @index_table_addr: index tables address
+ * @index_table_expansion_addr: index expansion table address
+ * @table_index: index in table
+ * @ipv4_rules_addr_type: ipv4 address type
+ * @ipv4_expansion_rules_addr_type: ipv4 expansion address type
+ * @index_table_addr_type: index table address type
+ * @index_table_expansion_addr_type: index expansion table type
+ * @size_base_tables: size of base tables
+ * @size_expansion_tables: size of expansion tables
+ * @rsvd2: reserved
+ * @public_ip_addr: public IP address
+ */
+struct ipa_ip_v4_nat_init {
+ u64 ipv4_rules_addr:32;
+ u64 ipv4_expansion_rules_addr:32;
+ u64 index_table_addr:32;
+ u64 index_table_expansion_addr:32;
+ u64 table_index:3;
+ u64 rsvd1:1;
+ u64 ipv4_rules_addr_type:1;
+ u64 ipv4_expansion_rules_addr_type:1;
+ u64 index_table_addr_type:1;
+ u64 index_table_expansion_addr_type:1;
+ u64 size_base_tables:12;
+ u64 size_expansion_tables:10;
+ u64 rsvd2:2;
+ u64 public_ip_addr:32;
+};
+
+/**
+ * struct ipa_ip_packet_tag - IPA_IP_PACKET_TAG command payload
+ * @tag: tag value returned with response
+ */
+struct ipa_ip_packet_tag {
+ u32 tag;
+};
+
+/**
+ * struct ipa_ip_packet_tag_status - IPA_IP_PACKET_TAG_STATUS command payload
+ * @rsvd: reserved
+ * @tag_f_1: tag value returned within status
+ * @tag_f_2: tag value returned within status
+ */
+struct ipa_ip_packet_tag_status {
+ u32 rsvd:16;
+ u32 tag_f_1:16;
+ u32 tag_f_2:32;
+};
+
+/*! @brief Struct for the IPAv2.0 and IPAv2.5 UL packet status header */
+struct ipa_hw_pkt_status {
+ u32 status_opcode:8;
+ u32 exception:8;
+ u32 status_mask:16;
+ u32 pkt_len:16;
+ u32 endp_src_idx:5;
+ u32 reserved_1:3;
+ u32 endp_dest_idx:5;
+ u32 reserved_2:3;
+ u32 metadata:32;
+ union {
+ struct {
+ u32 filt_local:1;
+ u32 filt_global:1;
+ u32 filt_pipe_idx:5;
+ u32 filt_match:1;
+ u32 filt_rule_idx:6;
+ u32 ret_hdr:1;
+ u32 reserved_3:1;
+ u32 tag_f_1:16;
+
+ } ipa_hw_v2_0_pkt_status;
+ struct {
+ u32 filt_local:1;
+ u32 filt_global:1;
+ u32 filt_pipe_idx:5;
+ u32 ret_hdr:1;
+ u32 filt_rule_idx:8;
+ u32 tag_f_1:16;
+
+ } ipa_hw_v2_5_pkt_status;
+ };
+
+ u32 tag_f_2:32;
+ u32 time_day_ctr:32;
+ u32 nat_hit:1;
+ u32 nat_tbl_idx:13;
+ u32 nat_type:2;
+ u32 route_local:1;
+ u32 route_tbl_idx:5;
+ u32 route_match:1;
+ u32 ucp:1;
+ u32 route_rule_idx:8;
+ u32 hdr_local:1;
+ u32 hdr_offset:10;
+ u32 frag_hit:1;
+ u32 frag_rule:4;
+ u32 reserved_4:16;
+};
+
+#define IPA_PKT_STATUS_SIZE 32
+
+/*! @brief Status header opcodes */
+enum ipa_hw_status_opcode {
+ IPA_HW_STATUS_OPCODE_MIN,
+ IPA_HW_STATUS_OPCODE_PACKET = IPA_HW_STATUS_OPCODE_MIN,
+ IPA_HW_STATUS_OPCODE_NEW_FRAG_RULE,
+ IPA_HW_STATUS_OPCODE_DROPPED_PACKET,
+ IPA_HW_STATUS_OPCODE_SUSPENDED_PACKET,
+ IPA_HW_STATUS_OPCODE_XLAT_PACKET = 6,
+ IPA_HW_STATUS_OPCODE_MAX
+};
+
+/*! @brief Possible Masks received in status */
+enum ipa_hw_pkt_status_mask {
+ IPA_HW_PKT_STATUS_MASK_FRAG_PROCESS = 0x1,
+ IPA_HW_PKT_STATUS_MASK_FILT_PROCESS = 0x2,
+ IPA_HW_PKT_STATUS_MASK_NAT_PROCESS = 0x4,
+ IPA_HW_PKT_STATUS_MASK_ROUTE_PROCESS = 0x8,
+ IPA_HW_PKT_STATUS_MASK_TAG_VALID = 0x10,
+ IPA_HW_PKT_STATUS_MASK_FRAGMENT = 0x20,
+ IPA_HW_PKT_STATUS_MASK_FIRST_FRAGMENT = 0x40,
+ IPA_HW_PKT_STATUS_MASK_V4 = 0x80,
+ IPA_HW_PKT_STATUS_MASK_CKSUM_PROCESS = 0x100,
+ IPA_HW_PKT_STATUS_MASK_AGGR_PROCESS = 0x200,
+ IPA_HW_PKT_STATUS_MASK_DEST_EOT = 0x400,
+ IPA_HW_PKT_STATUS_MASK_DEAGGR_PROCESS = 0x800,
+ IPA_HW_PKT_STATUS_MASK_DEAGG_FIRST = 0x1000,
+ IPA_HW_PKT_STATUS_MASK_SRC_EOT = 0x2000
+};
+
+/*! @brief Possible Exceptions received in status */
+enum ipa_hw_pkt_status_exception {
+ IPA_HW_PKT_STATUS_EXCEPTION_NONE = 0x0,
+ IPA_HW_PKT_STATUS_EXCEPTION_DEAGGR = 0x1,
+ IPA_HW_PKT_STATUS_EXCEPTION_REPL = 0x2,
+ IPA_HW_PKT_STATUS_EXCEPTION_IPTYPE = 0x4,
+ IPA_HW_PKT_STATUS_EXCEPTION_IHL = 0x8,
+ IPA_HW_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS = 0x10,
+ IPA_HW_PKT_STATUS_EXCEPTION_SW_FILT = 0x20,
+ IPA_HW_PKT_STATUS_EXCEPTION_NAT = 0x40,
+ IPA_HW_PKT_STATUS_EXCEPTION_ACTUAL_MAX,
+ IPA_HW_PKT_STATUS_EXCEPTION_MAX = 0xFF
+};
+
+/*! @brief IPA_HW_IMM_CMD_DMA_SHARED_MEM Immediate Command Parameters */
+struct ipa_hw_imm_cmd_dma_shared_mem {
+ u32 reserved_1:16;
+ u32 size:16;
+ u32 system_addr:32;
+ u32 local_addr:16;
+ u32 direction:1;
+ u32 skip_pipeline_clear:1;
+ u32 reserved_2:14;
+ u32 padding:32;
+};
+
+#endif /* _IPA_HW_DEFS_H */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
new file mode 100644
index 0000000..b12fba8
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -0,0 +1,1838 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_I_H_
+#define _IPA_I_H_
+
+#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/export.h>
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/ipa.h>
+#include <linux/msm-sps.h>
+#include <linux/platform_device.h>
+#include <asm/dma-iommu.h>
+#include <linux/iommu.h>
+#include <linux/ipa_uc_offload.h>
+#include "ipa_hw_defs.h"
+#include "ipa_ram_mmap.h"
+#include "ipa_reg.h"
+#include "ipa_qmi_service.h"
+#include "../ipa_api.h"
+#include "../ipa_common_i.h"
+#include "ipa_uc_offload_i.h"
+
+#define DRV_NAME "ipa"
+#define NAT_DEV_NAME "ipaNatTable"
+#define IPA_COOKIE 0x57831603
+#define MTU_BYTE 1500
+
+#define IPA_MAX_NUM_PIPES 0x14
+#define IPA_WAN_CONS_DESC_FIFO_SZ 0x5E80
+#define IPA_WAN_NAPI_CONS_RX_POOL_SZ 3000
+#define IPA_SYS_DESC_FIFO_SZ 0x2000
+#define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000
+#define IPA_LAN_RX_HEADER_LENGTH (2)
+#define IPA_QMAP_HEADER_LENGTH (4)
+#define IPA_DL_CHECKSUM_LENGTH (8)
+#define IPA_NUM_DESC_PER_SW_TX (2)
+#define IPA_GENERIC_RX_POOL_SZ 1000
+
+#define IPA_MAX_STATUS_STAT_NUM 30
+
+#define IPADBG(fmt, args...) \
+ pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+#define IPAERR(fmt, args...) \
+ pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+
+#define WLAN_AMPDU_TX_EP 15
+#define WLAN_PROD_TX_EP 19
+#define WLAN1_CONS_RX_EP 14
+#define WLAN2_CONS_RX_EP 16
+#define WLAN3_CONS_RX_EP 17
+#define WLAN4_CONS_RX_EP 18
+
+#define MAX_NUM_EXCP 8
+
+#define IPA_STATS
+
+#ifdef IPA_STATS
+#define IPA_STATS_INC_CNT(val) (++val)
+#define IPA_STATS_DEC_CNT(val) (--val)
+#define IPA_STATS_EXCP_CNT(flags, base) do { \
+ int i; \
+ for (i = 0; i < MAX_NUM_EXCP; i++) \
+ if (flags & BIT(i)) \
+ ++base[i]; \
+ if (flags == 0) \
+ ++base[MAX_NUM_EXCP - 1]; \
+ } while (0)
+#else
+#define IPA_STATS_INC_CNT(x) do { } while (0)
+#define IPA_STATS_DEC_CNT(x)
+#define IPA_STATS_EXCP_CNT(flags, base) do { } while (0)
+#endif
+
+#define IPA_TOS_EQ BIT(0)
+#define IPA_PROTOCOL_EQ BIT(1)
+#define IPA_OFFSET_MEQ32_0 BIT(2)
+#define IPA_OFFSET_MEQ32_1 BIT(3)
+#define IPA_IHL_OFFSET_RANGE16_0 BIT(4)
+#define IPA_IHL_OFFSET_RANGE16_1 BIT(5)
+#define IPA_IHL_OFFSET_EQ_16 BIT(6)
+#define IPA_IHL_OFFSET_EQ_32 BIT(7)
+#define IPA_IHL_OFFSET_MEQ32_0 BIT(8)
+#define IPA_OFFSET_MEQ128_0 BIT(9)
+#define IPA_OFFSET_MEQ128_1 BIT(10)
+#define IPA_TC_EQ BIT(11)
+#define IPA_FL_EQ BIT(12)
+#define IPA_IHL_OFFSET_MEQ32_1 BIT(13)
+#define IPA_METADATA_COMPARE BIT(14)
+#define IPA_IS_FRAG BIT(15)
+
+#define IPA_HDR_BIN0 0
+#define IPA_HDR_BIN1 1
+#define IPA_HDR_BIN2 2
+#define IPA_HDR_BIN3 3
+#define IPA_HDR_BIN4 4
+#define IPA_HDR_BIN_MAX 5
+
+#define IPA_HDR_PROC_CTX_BIN0 0
+#define IPA_HDR_PROC_CTX_BIN1 1
+#define IPA_HDR_PROC_CTX_BIN_MAX 2
+
+#define IPA_EVENT_THRESHOLD 0x10
+
+/*
+ * Due to ZLT issue with USB 3.0 core, IPA BAM threashold need to be set
+ * to max packet size + 1. After setting the threshold, USB core
+ * will not be notified on ZLTs
+ */
+#define IPA_USB_EVENT_THRESHOLD 0x4001
+
+#define IPA_RX_POOL_CEIL 32
+#define IPA_RX_SKB_SIZE 1792
+
+#define IPA_A5_MUX_HDR_NAME "ipa_excp_hdr"
+#define IPA_LAN_RX_HDR_NAME "ipa_lan_hdr"
+#define IPA_INVALID_L4_PROTOCOL 0xFF
+
+#define IPA_SETFIELD(val, shift, mask) (((val) << (shift)) & (mask))
+#define IPA_SETFIELD_IN_REG(reg, val, shift, mask) \
+ (reg |= ((val) << (shift)) & (mask))
+
+#define IPA_HW_TABLE_ALIGNMENT(start_ofst) \
+ (((start_ofst) + 127) & ~127)
+#define IPA_RT_FLT_HW_RULE_BUF_SIZE (128)
+
+#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE 8
+#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(start_ofst) \
+ (((start_ofst) + IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1) & \
+ ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1))
+
+#define MAX_RESOURCE_TO_CLIENTS (IPA_CLIENT_MAX)
+#define IPA_MEM_PART(x_) (ipa_ctx->ctrl->mem_partition.x_)
+
+#define IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES 120
+#define IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN 96
+#define IPA2_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50
+#define IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN 40
+
+struct ipa2_active_client_htable_entry {
+ struct hlist_node list;
+ char id_string[IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN];
+ int count;
+ enum ipa_active_client_log_type type;
+};
+
+struct ipa2_active_clients_log_ctx {
+ char *log_buffer[IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES];
+ int log_head;
+ int log_tail;
+ bool log_rdy;
+ struct hlist_head htable[IPA2_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE];
+};
+
+
+struct ipa_client_names {
+ enum ipa_client_type names[MAX_RESOURCE_TO_CLIENTS];
+ int length;
+};
+
+struct ipa_smmu_cb_ctx {
+ bool valid;
+ struct device *dev;
+ struct dma_iommu_mapping *mapping;
+ struct iommu_domain *iommu;
+ unsigned long next_addr;
+ u32 va_start;
+ u32 va_size;
+ u32 va_end;
+};
+
+/**
+ * struct ipa_flt_entry - IPA filtering table entry
+ * @link: entry's link in global filtering enrties list
+ * @rule: filter rule
+ * @cookie: cookie used for validity check
+ * @tbl: filter table
+ * @rt_tbl: routing table
+ * @hw_len: entry's size
+ */
+struct ipa_flt_entry {
+ struct list_head link;
+ struct ipa_flt_rule rule;
+ u32 cookie;
+ struct ipa_flt_tbl *tbl;
+ struct ipa_rt_tbl *rt_tbl;
+ u32 hw_len;
+ int id;
+};
+
+/**
+ * struct ipa_rt_tbl - IPA routing table
+ * @link: table's link in global routing tables list
+ * @head_rt_rule_list: head of routing rules list
+ * @name: routing table name
+ * @idx: routing table index
+ * @rule_cnt: number of rules in routing table
+ * @ref_cnt: reference counter of routing table
+ * @set: collection of routing tables
+ * @cookie: cookie used for validity check
+ * @in_sys: flag indicating if the table is located in system memory
+ * @sz: the size of the routing table
+ * @curr_mem: current routing tables block in sys memory
+ * @prev_mem: previous routing table block in sys memory
+ * @id: routing table id
+ */
+struct ipa_rt_tbl {
+ struct list_head link;
+ struct list_head head_rt_rule_list;
+ char name[IPA_RESOURCE_NAME_MAX];
+ u32 idx;
+ u32 rule_cnt;
+ u32 ref_cnt;
+ struct ipa_rt_tbl_set *set;
+ u32 cookie;
+ bool in_sys;
+ u32 sz;
+ struct ipa_mem_buffer curr_mem;
+ struct ipa_mem_buffer prev_mem;
+ int id;
+};
+
+/**
+ * struct ipa_hdr_entry - IPA header table entry
+ * @link: entry's link in global header table entries list
+ * @hdr: the header
+ * @hdr_len: header length
+ * @name: name of header table entry
+ * @type: l2 header type
+ * @is_partial: flag indicating if header table entry is partial
+ * @is_hdr_proc_ctx: false - hdr entry resides in hdr table,
+ * true - hdr entry resides in DDR and pointed to by proc ctx
+ * @phys_base: physical address of entry in DDR when is_hdr_proc_ctx is true,
+ * else 0
+ * @proc_ctx: processing context header
+ * @offset_entry: entry's offset
+ * @cookie: cookie used for validity check
+ * @ref_cnt: reference counter of routing table
+ * @id: header entry id
+ * @is_eth2_ofst_valid: is eth2_ofst field valid?
+ * @eth2_ofst: offset to start of Ethernet-II/802.3 header
+ */
+struct ipa_hdr_entry {
+ struct list_head link;
+ u8 hdr[IPA_HDR_MAX_SIZE];
+ u32 hdr_len;
+ char name[IPA_RESOURCE_NAME_MAX];
+ enum ipa_hdr_l2_type type;
+ u8 is_partial;
+ bool is_hdr_proc_ctx;
+ dma_addr_t phys_base;
+ struct ipa_hdr_proc_ctx_entry *proc_ctx;
+ struct ipa_hdr_offset_entry *offset_entry;
+ u32 cookie;
+ u32 ref_cnt;
+ int id;
+ u8 is_eth2_ofst_valid;
+ u16 eth2_ofst;
+};
+
+/**
+ * struct ipa_hdr_tbl - IPA header table
+ * @head_hdr_entry_list: header entries list
+ * @head_offset_list: header offset list
+ * @head_free_offset_list: header free offset list
+ * @hdr_cnt: number of headers
+ * @end: the last header index
+ */
+struct ipa_hdr_tbl {
+ struct list_head head_hdr_entry_list;
+ struct list_head head_offset_list[IPA_HDR_BIN_MAX];
+ struct list_head head_free_offset_list[IPA_HDR_BIN_MAX];
+ u32 hdr_cnt;
+ u32 end;
+};
+
+/**
+ * struct ipa_hdr_offset_entry - IPA header offset entry
+ * @link: entry's link in global processing context header offset entries list
+ * @offset: the offset
+ * @bin: bin
+ */
+struct ipa_hdr_proc_ctx_offset_entry {
+ struct list_head link;
+ u32 offset;
+ u32 bin;
+};
+
+/**
+ * struct ipa_hdr_proc_ctx_add_hdr_seq -
+ * IPA processing context header - add header sequence
+ * @hdr_add: add header command
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hdr_proc_ctx_add_hdr_seq {
+ struct ipa_hdr_proc_ctx_hdr_add hdr_add;
+ struct ipa_hdr_proc_ctx_tlv end;
+};
+
+/**
+ * struct ipa_hdr_proc_ctx_add_hdr_cmd_seq -
+ * IPA processing context header - process command sequence
+ * @hdr_add: add header command
+ * @cmd: tlv processing command (cmd.type must be 3)
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hdr_proc_ctx_add_hdr_cmd_seq {
+ struct ipa_hdr_proc_ctx_hdr_add hdr_add;
+ struct ipa_hdr_proc_ctx_tlv cmd;
+ struct ipa_hdr_proc_ctx_tlv end;
+};
+
+/**
+ *struct ipa_hdr_proc_ctx_entry - IPA processing context header table entry
+ * @link: entry's link in global header table entries list
+ * @type:
+ * @offset_entry: entry's offset
+ * @hdr: the header
+ * @cookie: cookie used for validity check
+ * @ref_cnt: reference counter of routing table
+ * @id: processing context header entry id
+ */
+struct ipa_hdr_proc_ctx_entry {
+ struct list_head link;
+ enum ipa_hdr_proc_type type;
+ struct ipa_hdr_proc_ctx_offset_entry *offset_entry;
+ struct ipa_hdr_entry *hdr;
+ u32 cookie;
+ u32 ref_cnt;
+ int id;
+};
+
+/**
+ * struct ipa_hdr_proc_ctx_tbl - IPA processing context header table
+ * @head_proc_ctx_entry_list: header entries list
+ * @head_offset_list: header offset list
+ * @head_free_offset_list: header free offset list
+ * @proc_ctx_cnt: number of processing context headers
+ * @end: the last processing context header index
+ * @start_offset: offset in words of processing context header table
+ */
+struct ipa_hdr_proc_ctx_tbl {
+ struct list_head head_proc_ctx_entry_list;
+ struct list_head head_offset_list[IPA_HDR_PROC_CTX_BIN_MAX];
+ struct list_head head_free_offset_list[IPA_HDR_PROC_CTX_BIN_MAX];
+ u32 proc_ctx_cnt;
+ u32 end;
+ u32 start_offset;
+};
+
+/**
+ * struct ipa_flt_tbl - IPA filter table
+ * @head_flt_rule_list: filter rules list
+ * @rule_cnt: number of filter rules
+ * @in_sys: flag indicating if filter table is located in system memory
+ * @sz: the size of the filter table
+ * @end: the last header index
+ * @curr_mem: current filter tables block in sys memory
+ * @prev_mem: previous filter table block in sys memory
+ */
+struct ipa_flt_tbl {
+ struct list_head head_flt_rule_list;
+ u32 rule_cnt;
+ bool in_sys;
+ u32 sz;
+ struct ipa_mem_buffer curr_mem;
+ struct ipa_mem_buffer prev_mem;
+ bool sticky_rear;
+};
+
+/**
+ * struct ipa_rt_entry - IPA routing table entry
+ * @link: entry's link in global routing table entries list
+ * @rule: routing rule
+ * @cookie: cookie used for validity check
+ * @tbl: routing table
+ * @hdr: header table
+ * @proc_ctx: processing context table
+ * @hw_len: the length of the table
+ */
+struct ipa_rt_entry {
+ struct list_head link;
+ struct ipa_rt_rule rule;
+ u32 cookie;
+ struct ipa_rt_tbl *tbl;
+ struct ipa_hdr_entry *hdr;
+ struct ipa_hdr_proc_ctx_entry *proc_ctx;
+ u32 hw_len;
+ int id;
+};
+
+/**
+ * struct ipa_rt_tbl_set - collection of routing tables
+ * @head_rt_tbl_list: collection of routing tables
+ * @tbl_cnt: number of routing tables
+ */
+struct ipa_rt_tbl_set {
+ struct list_head head_rt_tbl_list;
+ u32 tbl_cnt;
+};
+
+/**
+ * struct ipa_ep_cfg_status - status configuration in IPA end-point
+ * @status_en: Determines if end point supports Status Indications. SW should
+ * set this bit in order to enable Statuses. Output Pipe - send
+ * Status indications only if bit is set. Input Pipe - forward Status
+ * indication to STATUS_ENDP only if bit is set. Valid for Input
+ * and Output Pipes (IPA Consumer and Producer)
+ * @status_ep: Statuses generated for this endpoint will be forwarded to the
+ * specified Status End Point. Status endpoint needs to be
+ * configured with STATUS_EN=1 Valid only for Input Pipes (IPA
+ * Consumer)
+ */
+struct ipa_ep_cfg_status {
+ bool status_en;
+ u8 status_ep;
+};
+
+/**
+ * struct ipa_wlan_stats - Wlan stats for each wlan endpoint
+ * @rx_pkts_rcvd: Packets sent by wlan driver
+ * @rx_pkts_status_rcvd: Status packets received from ipa hw
+ * @rx_hd_processed: Data Descriptors processed by IPA Driver
+ * @rx_hd_reply: Data Descriptors recycled by wlan driver
+ * @rx_hd_rcvd: Data Descriptors sent by wlan driver
+ * @rx_pkt_leak: Packet count that are not recycled
+ * @rx_dp_fail: Packets failed to transfer to IPA HW
+ * @tx_pkts_rcvd: SKB Buffers received from ipa hw
+ * @tx_pkts_sent: SKB Buffers sent to wlan driver
+ * @tx_pkts_dropped: Dropped packets count
+ */
+struct ipa_wlan_stats {
+ u32 rx_pkts_rcvd;
+ u32 rx_pkts_status_rcvd;
+ u32 rx_hd_processed;
+ u32 rx_hd_reply;
+ u32 rx_hd_rcvd;
+ u32 rx_pkt_leak;
+ u32 rx_dp_fail;
+ u32 tx_pkts_rcvd;
+ u32 tx_pkts_sent;
+ u32 tx_pkts_dropped;
+};
+
+/**
+ * struct ipa_wlan_comm_memb - Wlan comm members
+ * @wlan_spinlock: protects wlan comm buff list and its size
+ * @ipa_tx_mul_spinlock: protects tx dp mul transfer
+ * @wlan_comm_total_cnt: wlan common skb buffers allocated count
+ * @wlan_comm_free_cnt: wlan common skb buffer free count
+ * @total_tx_pkts_freed: Recycled Buffer count
+ * @wlan_comm_desc_list: wlan common skb buffer list
+ */
+struct ipa_wlan_comm_memb {
+ spinlock_t wlan_spinlock;
+ spinlock_t ipa_tx_mul_spinlock;
+ u32 wlan_comm_total_cnt;
+ u32 wlan_comm_free_cnt;
+ u32 total_tx_pkts_freed;
+ struct list_head wlan_comm_desc_list;
+ atomic_t active_clnt_cnt;
+};
+
+struct ipa_status_stats {
+ struct ipa_hw_pkt_status status[IPA_MAX_STATUS_STAT_NUM];
+ int curr;
+};
+
+enum ipa_wakelock_ref_client {
+ IPA_WAKELOCK_REF_CLIENT_TX = 0,
+ IPA_WAKELOCK_REF_CLIENT_LAN_RX = 1,
+ IPA_WAKELOCK_REF_CLIENT_WAN_RX = 2,
+ IPA_WAKELOCK_REF_CLIENT_WLAN_RX = 3,
+ IPA_WAKELOCK_REF_CLIENT_ODU_RX = 4,
+ IPA_WAKELOCK_REF_CLIENT_SPS = 5,
+ IPA_WAKELOCK_REF_CLIENT_MAX
+};
+
+/**
+ * struct ipa_ep_context - IPA end point context
+ * @valid: flag indicating id EP context is valid
+ * @client: EP client type
+ * @ep_hdl: EP's client SPS handle
+ * @cfg: EP cionfiguration
+ * @dst_pipe_index: destination pipe index
+ * @rt_tbl_idx: routing table index
+ * @connect: SPS connect
+ * @priv: user provided information which will forwarded once the user is
+ * notified for new data avail
+ * @client_notify: user provided CB for EP events notification, the event is
+ * data revived.
+ * @desc_fifo_in_pipe_mem: flag indicating if descriptors FIFO uses pipe memory
+ * @data_fifo_in_pipe_mem: flag indicating if data FIFO uses pipe memory
+ * @desc_fifo_pipe_mem_ofst: descriptors FIFO pipe memory offset
+ * @data_fifo_pipe_mem_ofst: data FIFO pipe memory offset
+ * @desc_fifo_client_allocated: if descriptors FIFO was allocated by a client
+ * @data_fifo_client_allocated: if data FIFO was allocated by a client
+ * @skip_ep_cfg: boolean field that determines if EP should be configured
+ * by IPA driver
+ * @keep_ipa_awake: when true, IPA will not be clock gated
+ * @rx_replenish_threshold: Indicates the WM value which requires the RX
+ * descriptors replenish function to be called to
+ * avoid the RX pipe to run out of descriptors
+ * and cause HOLB.
+ * @disconnect_in_progress: Indicates client disconnect in progress.
+ * @qmi_request_sent: Indicates whether QMI request to enable clear data path
+ * request is sent or not.
+ * @napi_enabled: when true, IPA call client callback to start polling
+ */
+struct ipa_ep_context {
+ int valid;
+ enum ipa_client_type client;
+ struct sps_pipe *ep_hdl;
+ struct ipa_ep_cfg cfg;
+ struct ipa_ep_cfg_holb holb;
+ struct ipa_ep_cfg_status status;
+ u32 dst_pipe_index;
+ u32 rt_tbl_idx;
+ struct sps_connect connect;
+ void *priv;
+ void (*client_notify)(void *priv, enum ipa_dp_evt_type evt,
+ unsigned long data);
+ bool desc_fifo_in_pipe_mem;
+ bool data_fifo_in_pipe_mem;
+ u32 desc_fifo_pipe_mem_ofst;
+ u32 data_fifo_pipe_mem_ofst;
+ bool desc_fifo_client_allocated;
+ bool data_fifo_client_allocated;
+ atomic_t avail_fifo_desc;
+ u32 dflt_flt4_rule_hdl;
+ u32 dflt_flt6_rule_hdl;
+ bool skip_ep_cfg;
+ bool keep_ipa_awake;
+ struct ipa_wlan_stats wstats;
+ u32 uc_offload_state;
+ u32 rx_replenish_threshold;
+ bool disconnect_in_progress;
+ u32 qmi_request_sent;
+ enum ipa_wakelock_ref_client wakelock_client;
+ bool napi_enabled;
+ bool switch_to_intr;
+ int inactive_cycles;
+ u32 eot_in_poll_err;
+ bool ep_disabled;
+
+ /* sys MUST be the last element of this struct */
+ struct ipa_sys_context *sys;
+};
+
+enum ipa_sys_pipe_policy {
+ IPA_POLICY_INTR_MODE,
+ IPA_POLICY_NOINTR_MODE,
+ IPA_POLICY_INTR_POLL_MODE,
+};
+
+struct ipa_repl_ctx {
+ struct ipa_rx_pkt_wrapper **cache;
+ atomic_t head_idx;
+ atomic_t tail_idx;
+ u32 capacity;
+};
+
+/**
+ * struct ipa_sys_context - IPA endpoint context for system to BAM pipes
+ * @head_desc_list: header descriptors list
+ * @len: the size of the above list
+ * @spinlock: protects the list and its size
+ * @event: used to request CALLBACK mode from SPS driver
+ * @ep: IPA EP context
+ *
+ * IPA context specific to the system-bam pipes a.k.a LAN IN/OUT and WAN
+ */
+struct ipa_sys_context {
+ u32 len;
+ struct sps_register_event event;
+ atomic_t curr_polling_state;
+ struct delayed_work switch_to_intr_work;
+ enum ipa_sys_pipe_policy policy;
+ int (*pyld_hdlr)(struct sk_buff *skb, struct ipa_sys_context *sys);
+ struct sk_buff * (*get_skb)(unsigned int len, gfp_t flags);
+ void (*free_skb)(struct sk_buff *skb);
+ u32 rx_buff_sz;
+ u32 rx_pool_sz;
+ struct sk_buff *prev_skb;
+ unsigned int len_rem;
+ unsigned int len_pad;
+ unsigned int len_partial;
+ bool drop_packet;
+ struct work_struct work;
+ void (*sps_callback)(struct sps_event_notify *notify);
+ enum sps_option sps_option;
+ struct delayed_work replenish_rx_work;
+ struct work_struct repl_work;
+ void (*repl_hdlr)(struct ipa_sys_context *sys);
+ struct ipa_repl_ctx repl;
+ unsigned int repl_trig_cnt;
+ unsigned int repl_trig_thresh;
+
+ /* ordering is important - mutable fields go above */
+ struct ipa_ep_context *ep;
+ struct list_head head_desc_list;
+ struct list_head rcycl_list;
+ spinlock_t spinlock;
+ struct workqueue_struct *wq;
+ struct workqueue_struct *repl_wq;
+ struct ipa_status_stats *status_stat;
+ /* ordering is important - other immutable fields go below */
+};
+
+/**
+ * enum ipa_desc_type - IPA decriptors type
+ *
+ * IPA decriptors type, IPA supports DD and ICD but no CD
+ */
+enum ipa_desc_type {
+ IPA_DATA_DESC,
+ IPA_DATA_DESC_SKB,
+ IPA_DATA_DESC_SKB_PAGED,
+ IPA_IMM_CMD_DESC
+};
+
+/**
+ * struct ipa_tx_pkt_wrapper - IPA Tx packet wrapper
+ * @type: specify if this packet is for the skb or immediate command
+ * @mem: memory buffer used by this Tx packet
+ * @work: work struct for current Tx packet
+ * @link: linked to the wrappers on that pipe
+ * @callback: IPA client provided callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @sys: corresponding IPA sys context
+ * @mult: valid only for first of a "multiple" transfer,
+ * holds info for the "sps_transfer" buffer
+ * @cnt: 1 for single transfers,
+ * >1 and <0xFFFF for first of a "multiple" transfer,
+ * 0xFFFF for last desc, 0 for rest of "multiple' transfer
+ * @bounce: va of bounce buffer
+ * @unmap_dma: in case this is true, the buffer will not be dma unmapped
+ *
+ * This struct can wrap both data packet and immediate command packet.
+ */
+struct ipa_tx_pkt_wrapper {
+ enum ipa_desc_type type;
+ struct ipa_mem_buffer mem;
+ struct work_struct work;
+ struct list_head link;
+ void (*callback)(void *user1, int user2);
+ void *user1;
+ int user2;
+ struct ipa_sys_context *sys;
+ struct ipa_mem_buffer mult;
+ u32 cnt;
+ void *bounce;
+ bool no_unmap_dma;
+};
+
+/**
+ * struct ipa_desc - IPA descriptor
+ * @type: skb or immediate command or plain old data
+ * @pyld: points to skb
+ * @frag: points to paged fragment
+ * or kmalloc'ed immediate command parameters/plain old data
+ * @dma_address: dma mapped address of pyld
+ * @dma_address_valid: valid field for dma_address
+ * @len: length of the pyld
+ * @opcode: for immediate commands
+ * @callback: IPA client provided completion callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @xfer_done: completion object for sync completion
+ */
+struct ipa_desc {
+ enum ipa_desc_type type;
+ void *pyld;
+ skb_frag_t *frag;
+ dma_addr_t dma_address;
+ bool dma_address_valid;
+ u16 len;
+ u16 opcode;
+ void (*callback)(void *user1, int user2);
+ void *user1;
+ int user2;
+ struct completion xfer_done;
+};
+
+/**
+ * struct ipa_rx_pkt_wrapper - IPA Rx packet wrapper
+ * @skb: skb
+ * @dma_address: DMA address of this Rx packet
+ * @link: linked to the Rx packets on that pipe
+ * @len: how many bytes are copied into skb's flat buffer
+ */
+struct ipa_rx_pkt_wrapper {
+ struct list_head link;
+ struct ipa_rx_data data;
+ u32 len;
+ struct work_struct work;
+ struct ipa_sys_context *sys;
+};
+
+/**
+ * struct ipa_nat_mem - IPA NAT memory description
+ * @class: pointer to the struct class
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ * @dev_num: device number
+ * @vaddr: virtual address
+ * @dma_handle: DMA handle
+ * @size: NAT memory size
+ * @is_mapped: flag indicating if NAT memory is mapped
+ * @is_sys_mem: flag indicating if NAT memory is sys memory
+ * @is_dev_init: flag indicating if NAT device is initialized
+ * @lock: NAT memory mutex
+ * @nat_base_address: nat table virutal address
+ * @ipv4_rules_addr: base nat table address
+ * @ipv4_expansion_rules_addr: expansion table address
+ * @index_table_addr: index table address
+ * @index_table_expansion_addr: index expansion table address
+ * @size_base_tables: base table size
+ * @size_expansion_tables: expansion table size
+ * @public_ip_addr: ip address of nat table
+ */
+struct ipa_nat_mem {
+ struct class *class;
+ struct device *dev;
+ struct cdev cdev;
+ dev_t dev_num;
+ void *vaddr;
+ dma_addr_t dma_handle;
+ size_t size;
+ bool is_mapped;
+ bool is_sys_mem;
+ bool is_dev_init;
+ bool is_dev;
+ struct mutex lock;
+ void *nat_base_address;
+ char *ipv4_rules_addr;
+ char *ipv4_expansion_rules_addr;
+ char *index_table_addr;
+ char *index_table_expansion_addr;
+ u32 size_base_tables;
+ u32 size_expansion_tables;
+ u32 public_ip_addr;
+ void *tmp_vaddr;
+ dma_addr_t tmp_dma_handle;
+ bool is_tmp_mem;
+};
+
+/**
+ * enum ipa_hw_mode - IPA hardware mode
+ * @IPA_HW_Normal: Regular IPA hardware
+ * @IPA_HW_Virtual: IPA hardware supporting virtual memory allocation
+ * @IPA_HW_PCIE: IPA hardware supporting memory allocation over PCIE Bridge
+ */
+enum ipa_hw_mode {
+ IPA_HW_MODE_NORMAL = 0,
+ IPA_HW_MODE_VIRTUAL = 1,
+ IPA_HW_MODE_PCIE = 2
+};
+
+enum ipa_config_this_ep {
+ IPA_CONFIGURE_THIS_EP,
+ IPA_DO_NOT_CONFIGURE_THIS_EP,
+};
+
+struct ipa_stats {
+ u32 tx_sw_pkts;
+ u32 tx_hw_pkts;
+ u32 rx_pkts;
+ u32 rx_excp_pkts[MAX_NUM_EXCP];
+ u32 rx_repl_repost;
+ u32 tx_pkts_compl;
+ u32 rx_q_len;
+ u32 msg_w[IPA_EVENT_MAX_NUM];
+ u32 msg_r[IPA_EVENT_MAX_NUM];
+ u32 stat_compl;
+ u32 aggr_close;
+ u32 wan_aggr_close;
+ u32 wan_rx_empty;
+ u32 wan_repl_rx_empty;
+ u32 lan_rx_empty;
+ u32 lan_repl_rx_empty;
+ u32 flow_enable;
+ u32 flow_disable;
+ u32 tx_non_linear;
+};
+
+struct ipa_active_clients {
+ struct mutex mutex;
+ spinlock_t spinlock;
+ bool mutex_locked;
+ int cnt;
+};
+
+struct ipa_wakelock_ref_cnt {
+ spinlock_t spinlock;
+ u32 cnt;
+};
+
+struct ipa_tag_completion {
+ struct completion comp;
+ atomic_t cnt;
+};
+
+struct ipa_controller;
+
+/**
+ * struct ipa_uc_hdlrs - IPA uC callback functions
+ * @ipa_uc_loaded_hdlr: Function handler when uC is loaded
+ * @ipa_uc_event_hdlr: Event handler function
+ * @ipa_uc_response_hdlr: Response handler function
+ * @ipa_uc_event_log_info_hdlr: Log event handler function
+ */
+struct ipa_uc_hdlrs {
+ void (*ipa_uc_loaded_hdlr)(void);
+
+ void (*ipa_uc_event_hdlr)
+ (struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio);
+ int (*ipa_uc_response_hdlr)
+ (struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio,
+ u32 *uc_status);
+ void (*ipa_uc_event_log_info_hdlr)
+ (struct IpaHwEventLogInfoData_t *uc_event_top_mmio);
+};
+
+/**
+ * enum ipa_hw_flags - flags which defines the behavior of HW
+ *
+ * @IPA_HW_FLAG_HALT_SYSTEM_ON_ASSERT_FAILURE: Halt system in case of assert
+ * failure.
+ * @IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR: Channel error would be reported
+ * in the event ring only. No event to CPU.
+ * @IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP: No need to report event
+ * IPA_HW_2_CPU_EVENT_MHI_WAKE_UP_REQUEST
+ * @IPA_HW_FLAG_WORK_OVER_DDR: Perform all transaction to external addresses by
+ * QMB (avoid memcpy)
+ * @IPA_HW_FLAG_NO_REPORT_OOB: If set do not report that the device is OOB in
+ * IN Channel
+ * @IPA_HW_FLAG_NO_REPORT_DB_MODE: If set, do not report that the device is
+ * entering a mode where it expects a doorbell to be rung for OUT Channel
+ * @IPA_HW_FLAG_NO_START_OOB_TIMER
+ */
+enum ipa_hw_flags {
+ IPA_HW_FLAG_HALT_SYSTEM_ON_ASSERT_FAILURE = 0x01,
+ IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR = 0x02,
+ IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP = 0x04,
+ IPA_HW_FLAG_WORK_OVER_DDR = 0x08,
+ IPA_HW_FLAG_NO_REPORT_OOB = 0x10,
+ IPA_HW_FLAG_NO_REPORT_DB_MODE = 0x20,
+ IPA_HW_FLAG_NO_START_OOB_TIMER = 0x40
+};
+
+/**
+ * struct ipa_uc_ctx - IPA uC context
+ * @uc_inited: Indicates if uC interface has been initialized
+ * @uc_loaded: Indicates if uC has loaded
+ * @uc_failed: Indicates if uC has failed / returned an error
+ * @uc_lock: uC interface lock to allow only one uC interaction at a time
+ * @uc_completation: Completion mechanism to wait for uC commands
+ * @uc_sram_mmio: Pointer to uC mapped memory
+ * @pending_cmd: The last command sent waiting to be ACKed
+ * @uc_status: The last status provided by the uC
+ * @uc_zip_error: uC has notified the APPS upon a ZIP engine error
+ * @uc_error_type: error type from uC error event
+ */
+struct ipa_uc_ctx {
+ bool uc_inited;
+ bool uc_loaded;
+ bool uc_failed;
+ struct mutex uc_lock;
+ struct completion uc_completion;
+ struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio;
+ struct IpaHwEventLogInfoData_t *uc_event_top_mmio;
+ u32 uc_event_top_ofst;
+ u32 pending_cmd;
+ u32 uc_status;
+ bool uc_zip_error;
+ u32 uc_error_type;
+};
+
+/**
+ * struct ipa_uc_wdi_ctx
+ * @wdi_uc_top_ofst:
+ * @wdi_uc_top_mmio:
+ * @wdi_uc_stats_ofst:
+ * @wdi_uc_stats_mmio:
+ */
+struct ipa_uc_wdi_ctx {
+ /* WDI specific fields */
+ u32 wdi_uc_stats_ofst;
+ struct IpaHwStatsWDIInfoData_t *wdi_uc_stats_mmio;
+ void *priv;
+ ipa_uc_ready_cb uc_ready_cb;
+};
+
+/**
+ * struct ipa_sps_pm - SPS power management related members
+ * @dec_clients: true if need to decrease active clients count
+ * @eot_activity: represent EOT interrupt activity to determine to reset
+ * the inactivity timer
+ * @sps_pm_lock: Lock to protect the sps_pm functionality.
+ */
+struct ipa_sps_pm {
+ atomic_t dec_clients;
+ atomic_t eot_activity;
+ struct mutex sps_pm_lock;
+};
+
+/**
+ * struct ipacm_client_info - the client-info indicated from IPACM
+ * @ipacm_client_enum: the enum to indicate tether-client
+ * @ipacm_client_uplink: the bool to indicate pipe for uplink
+ */
+struct ipacm_client_info {
+ enum ipacm_client_enum client_enum;
+ bool uplink;
+};
+
+/**
+ * struct ipa_context - IPA context
+ * @class: pointer to the struct class
+ * @dev_num: device number
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ * @bam_handle: IPA driver's BAM handle
+ * @ep: list of all end points
+ * @skip_ep_cfg_shadow: state to update filter table correctly across
+ power-save
+ * @resume_on_connect: resume ep on ipa_connect
+ * @flt_tbl: list of all IPA filter tables
+ * @mode: IPA operating mode
+ * @mmio: iomem
+ * @ipa_wrapper_base: IPA wrapper base address
+ * @glob_flt_tbl: global filter table
+ * @hdr_tbl: IPA header table
+ * @hdr_proc_ctx_tbl: IPA processing context table
+ * @rt_tbl_set: list of routing tables each of which is a list of rules
+ * @reap_rt_tbl_set: list of sys mem routing tables waiting to be reaped
+ * @flt_rule_cache: filter rule cache
+ * @rt_rule_cache: routing rule cache
+ * @hdr_cache: header cache
+ * @hdr_offset_cache: header offset cache
+ * @hdr_proc_ctx_cache: processing context cache
+ * @hdr_proc_ctx_offset_cache: processing context offset cache
+ * @rt_tbl_cache: routing table cache
+ * @tx_pkt_wrapper_cache: Tx packets cache
+ * @rx_pkt_wrapper_cache: Rx packets cache
+ * @rt_idx_bitmap: routing table index bitmap
+ * @lock: this does NOT protect the linked lists within ipa_sys_context
+ * @smem_sz: shared memory size available for SW use starting
+ * from non-restricted bytes
+ * @smem_restricted_bytes: the bytes that SW should not use in the shared mem
+ * @nat_mem: NAT memory
+ * @excp_hdr_hdl: exception header handle
+ * @dflt_v4_rt_rule_hdl: default v4 routing rule handle
+ * @dflt_v6_rt_rule_hdl: default v6 routing rule handle
+ * @aggregation_type: aggregation type used on USB client endpoint
+ * @aggregation_byte_limit: aggregation byte limit used on USB client endpoint
+ * @aggregation_time_limit: aggregation time limit used on USB client endpoint
+ * @hdr_tbl_lcl: where hdr tbl resides 1-local, 0-system
+ * @hdr_proc_ctx_tbl_lcl: where proc_ctx tbl resides true-local, false-system
+ * @hdr_mem: header memory
+ * @hdr_proc_ctx_mem: processing context memory
+ * @ip4_rt_tbl_lcl: where ip4 rt tables reside 1-local; 0-system
+ * @ip6_rt_tbl_lcl: where ip6 rt tables reside 1-local; 0-system
+ * @ip4_flt_tbl_lcl: where ip4 flt tables reside 1-local; 0-system
+ * @ip6_flt_tbl_lcl: where ip6 flt tables reside 1-local; 0-system
+ * @empty_rt_tbl_mem: empty routing tables memory
+ * @power_mgmt_wq: workqueue for power management
+ * @sps_power_mgmt_wq: workqueue SPS related power management
+ * @tag_process_before_gating: indicates whether to start tag process before
+ * gating IPA clocks
+ * @sps_pm: sps power management related information
+ * @disconnect_lock: protects LAN_CONS packet receive notification CB
+ * @pipe_mem_pool: pipe memory pool
+ * @dma_pool: special purpose DMA pool
+ * @ipa_active_clients: structure for reference counting connected IPA clients
+ * @ipa_hw_type: type of IPA HW type (e.g. IPA 1.0, IPA 1.1 etc')
+ * @ipa_hw_mode: mode of IPA HW mode (e.g. Normal, Virtual or over PCIe)
+ * @use_ipa_teth_bridge: use tethering bridge driver
+ * @ipa_bam_remote_mode: ipa bam is in remote mode
+ * @modem_cfg_emb_pipe_flt: modem configure embedded pipe filtering rules
+ * @ipa_bus_hdl: msm driver handle for the data path bus
+ * @ctrl: holds the core specific operations based on
+ * core version (vtable like)
+ * @enable_clock_scaling: clock scaling is enabled ?
+ * @curr_ipa_clk_rate: ipa_clk current rate
+ * @wcstats: wlan common buffer stats
+ * @uc_ctx: uC interface context
+ * @uc_wdi_ctx: WDI specific fields for uC interface
+ * @ipa_num_pipes: The number of pipes used by IPA HW
+ * @skip_uc_pipe_reset: Indicates whether pipe reset via uC needs to be avoided
+ * @ipa_client_apps_wan_cons_agg_gro: RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA
+ * @w_lock: Indicates the wakeup source.
+ * @wakelock_ref_cnt: Indicates the number of times wakelock is acquired
+
+ * IPA context - holds all relevant info about IPA driver and its state
+ */
+struct ipa_context {
+ struct class *class;
+ dev_t dev_num;
+ struct device *dev;
+ struct cdev cdev;
+ unsigned long bam_handle;
+ struct ipa_ep_context ep[IPA_MAX_NUM_PIPES];
+ bool skip_ep_cfg_shadow[IPA_MAX_NUM_PIPES];
+ bool resume_on_connect[IPA_CLIENT_MAX];
+ struct ipa_flt_tbl flt_tbl[IPA_MAX_NUM_PIPES][IPA_IP_MAX];
+ void __iomem *mmio;
+ u32 ipa_wrapper_base;
+ u32 ipa_wrapper_size;
+ struct ipa_flt_tbl glob_flt_tbl[IPA_IP_MAX];
+ struct ipa_hdr_tbl hdr_tbl;
+ struct ipa_hdr_proc_ctx_tbl hdr_proc_ctx_tbl;
+ struct ipa_rt_tbl_set rt_tbl_set[IPA_IP_MAX];
+ struct ipa_rt_tbl_set reap_rt_tbl_set[IPA_IP_MAX];
+ struct kmem_cache *flt_rule_cache;
+ struct kmem_cache *rt_rule_cache;
+ struct kmem_cache *hdr_cache;
+ struct kmem_cache *hdr_offset_cache;
+ struct kmem_cache *hdr_proc_ctx_cache;
+ struct kmem_cache *hdr_proc_ctx_offset_cache;
+ struct kmem_cache *rt_tbl_cache;
+ struct kmem_cache *tx_pkt_wrapper_cache;
+ struct kmem_cache *rx_pkt_wrapper_cache;
+ unsigned long rt_idx_bitmap[IPA_IP_MAX];
+ struct mutex lock;
+ u16 smem_sz;
+ u16 smem_restricted_bytes;
+ u16 smem_reqd_sz;
+ struct ipa_nat_mem nat_mem;
+ u32 excp_hdr_hdl;
+ u32 dflt_v4_rt_rule_hdl;
+ u32 dflt_v6_rt_rule_hdl;
+ uint aggregation_type;
+ uint aggregation_byte_limit;
+ uint aggregation_time_limit;
+ bool hdr_tbl_lcl;
+ bool hdr_proc_ctx_tbl_lcl;
+ struct ipa_mem_buffer hdr_mem;
+ struct ipa_mem_buffer hdr_proc_ctx_mem;
+ bool ip4_rt_tbl_lcl;
+ bool ip6_rt_tbl_lcl;
+ bool ip4_flt_tbl_lcl;
+ bool ip6_flt_tbl_lcl;
+ struct ipa_mem_buffer empty_rt_tbl_mem;
+ struct gen_pool *pipe_mem_pool;
+ struct dma_pool *dma_pool;
+ struct ipa_active_clients ipa_active_clients;
+ struct ipa2_active_clients_log_ctx ipa2_active_clients_logging;
+ struct workqueue_struct *power_mgmt_wq;
+ struct workqueue_struct *sps_power_mgmt_wq;
+ bool tag_process_before_gating;
+ struct ipa_sps_pm sps_pm;
+ u32 clnt_hdl_cmd;
+ u32 clnt_hdl_data_in;
+ u32 clnt_hdl_data_out;
+ spinlock_t disconnect_lock;
+ u8 a5_pipe_index;
+ struct list_head intf_list;
+ struct list_head msg_list;
+ struct list_head pull_msg_list;
+ struct mutex msg_lock;
+ wait_queue_head_t msg_waitq;
+ enum ipa_hw_type ipa_hw_type;
+ enum ipa_hw_mode ipa_hw_mode;
+ bool use_ipa_teth_bridge;
+ bool ipa_bam_remote_mode;
+ bool modem_cfg_emb_pipe_flt;
+ /* featurize if memory footprint becomes a concern */
+ struct ipa_stats stats;
+ void *smem_pipe_mem;
+ u32 ipa_bus_hdl;
+ struct ipa_controller *ctrl;
+ struct idr ipa_idr;
+ struct device *pdev;
+ struct device *uc_pdev;
+ spinlock_t idr_lock;
+ u32 enable_clock_scaling;
+ u32 curr_ipa_clk_rate;
+ bool q6_proxy_clk_vote_valid;
+ u32 ipa_num_pipes;
+
+ struct ipa_wlan_comm_memb wc_memb;
+
+ struct ipa_uc_ctx uc_ctx;
+
+ struct ipa_uc_wdi_ctx uc_wdi_ctx;
+ struct ipa_uc_ntn_ctx uc_ntn_ctx;
+ u32 wan_rx_ring_size;
+ u32 lan_rx_ring_size;
+ bool skip_uc_pipe_reset;
+ bool smmu_present;
+ bool smmu_s1_bypass;
+ unsigned long peer_bam_iova;
+ phys_addr_t peer_bam_pa;
+ u32 peer_bam_map_size;
+ unsigned long peer_bam_dev;
+ u32 peer_bam_map_cnt;
+ u32 wdi_map_cnt;
+ bool use_dma_zone;
+ struct wakeup_source w_lock;
+ struct ipa_wakelock_ref_cnt wakelock_ref_cnt;
+
+ /* RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA */
+ bool ipa_client_apps_wan_cons_agg_gro;
+ /* M-release support to know client pipes */
+ struct ipacm_client_info ipacm_client[IPA_MAX_NUM_PIPES];
+ bool tethered_flow_control;
+ u32 ipa_rx_min_timeout_usec;
+ u32 ipa_rx_max_timeout_usec;
+ u32 ipa_polling_iteration;
+};
+
+/**
+ * struct ipa_route - IPA route
+ * @route_dis: route disable
+ * @route_def_pipe: route default pipe
+ * @route_def_hdr_table: route default header table
+ * @route_def_hdr_ofst: route default header offset table
+ * @route_frag_def_pipe: Default pipe to route fragmented exception
+ * packets and frag new rule statues, if source pipe does not have
+ * a notification status pipe defined.
+ */
+struct ipa_route {
+ u32 route_dis;
+ u32 route_def_pipe;
+ u32 route_def_hdr_table;
+ u32 route_def_hdr_ofst;
+ u8 route_frag_def_pipe;
+};
+
+/**
+ * enum ipa_pipe_mem_type - IPA pipe memory type
+ * @IPA_SPS_PIPE_MEM: Default, SPS dedicated pipe memory
+ * @IPA_PRIVATE_MEM: IPA's private memory
+ * @IPA_SYSTEM_MEM: System RAM, requires allocation
+ */
+enum ipa_pipe_mem_type {
+ IPA_SPS_PIPE_MEM = 0,
+ IPA_PRIVATE_MEM = 1,
+ IPA_SYSTEM_MEM = 2,
+};
+
+struct ipa_plat_drv_res {
+ bool use_ipa_teth_bridge;
+ u32 ipa_mem_base;
+ u32 ipa_mem_size;
+ u32 bam_mem_base;
+ u32 bam_mem_size;
+ u32 ipa_irq;
+ u32 bam_irq;
+ u32 ipa_pipe_mem_start_ofst;
+ u32 ipa_pipe_mem_size;
+ enum ipa_hw_type ipa_hw_type;
+ enum ipa_hw_mode ipa_hw_mode;
+ u32 ee;
+ bool ipa_bam_remote_mode;
+ bool modem_cfg_emb_pipe_flt;
+ u32 wan_rx_ring_size;
+ u32 lan_rx_ring_size;
+ bool skip_uc_pipe_reset;
+ bool use_dma_zone;
+ bool tethered_flow_control;
+ u32 ipa_rx_polling_sleep_msec;
+ u32 ipa_polling_iteration;
+};
+
+struct ipa_mem_partition {
+ u16 ofst_start;
+ u16 nat_ofst;
+ u16 nat_size;
+ u16 v4_flt_ofst;
+ u16 v4_flt_size;
+ u16 v4_flt_size_ddr;
+ u16 v6_flt_ofst;
+ u16 v6_flt_size;
+ u16 v6_flt_size_ddr;
+ u16 v4_rt_ofst;
+ u16 v4_num_index;
+ u16 v4_modem_rt_index_lo;
+ u16 v4_modem_rt_index_hi;
+ u16 v4_apps_rt_index_lo;
+ u16 v4_apps_rt_index_hi;
+ u16 v4_rt_size;
+ u16 v4_rt_size_ddr;
+ u16 v6_rt_ofst;
+ u16 v6_num_index;
+ u16 v6_modem_rt_index_lo;
+ u16 v6_modem_rt_index_hi;
+ u16 v6_apps_rt_index_lo;
+ u16 v6_apps_rt_index_hi;
+ u16 v6_rt_size;
+ u16 v6_rt_size_ddr;
+ u16 modem_hdr_ofst;
+ u16 modem_hdr_size;
+ u16 apps_hdr_ofst;
+ u16 apps_hdr_size;
+ u16 apps_hdr_size_ddr;
+ u16 modem_hdr_proc_ctx_ofst;
+ u16 modem_hdr_proc_ctx_size;
+ u16 apps_hdr_proc_ctx_ofst;
+ u16 apps_hdr_proc_ctx_size;
+ u16 apps_hdr_proc_ctx_size_ddr;
+ u16 modem_comp_decomp_ofst;
+ u16 modem_comp_decomp_size;
+ u16 modem_ofst;
+ u16 modem_size;
+ u16 apps_v4_flt_ofst;
+ u16 apps_v4_flt_size;
+ u16 apps_v6_flt_ofst;
+ u16 apps_v6_flt_size;
+ u16 uc_info_ofst;
+ u16 uc_info_size;
+ u16 end_ofst;
+ u16 apps_v4_rt_ofst;
+ u16 apps_v4_rt_size;
+ u16 apps_v6_rt_ofst;
+ u16 apps_v6_rt_size;
+};
+
+struct ipa_controller {
+ struct ipa_mem_partition mem_partition;
+ u32 ipa_clk_rate_turbo;
+ u32 ipa_clk_rate_nominal;
+ u32 ipa_clk_rate_svs;
+ u32 clock_scaling_bw_threshold_turbo;
+ u32 clock_scaling_bw_threshold_nominal;
+ u32 ipa_reg_base_ofst;
+ u32 max_holb_tmr_val;
+ void (*ipa_sram_read_settings)(void);
+ int (*ipa_init_sram)(void);
+ int (*ipa_init_hdr)(void);
+ int (*ipa_init_rt4)(void);
+ int (*ipa_init_rt6)(void);
+ int (*ipa_init_flt4)(void);
+ int (*ipa_init_flt6)(void);
+ void (*ipa_cfg_ep_hdr)(u32 pipe_number,
+ const struct ipa_ep_cfg_hdr *ipa_ep_hdr_cfg);
+ int (*ipa_cfg_ep_hdr_ext)(u32 pipe_number,
+ const struct ipa_ep_cfg_hdr_ext *ipa_ep_hdr_ext_cfg);
+ void (*ipa_cfg_ep_aggr)(u32 pipe_number,
+ const struct ipa_ep_cfg_aggr *ipa_ep_agrr_cfg);
+ int (*ipa_cfg_ep_deaggr)(u32 pipe_index,
+ const struct ipa_ep_cfg_deaggr *ep_deaggr);
+ void (*ipa_cfg_ep_nat)(u32 pipe_number,
+ const struct ipa_ep_cfg_nat *ipa_ep_nat_cfg);
+ void (*ipa_cfg_ep_mode)(u32 pipe_number, u32 dst_pipe_number,
+ const struct ipa_ep_cfg_mode *ep_mode);
+ void (*ipa_cfg_ep_route)(u32 pipe_index, u32 rt_tbl_index);
+ void (*ipa_cfg_ep_holb)(u32 pipe_index,
+ const struct ipa_ep_cfg_holb *ep_holb);
+ void (*ipa_cfg_route)(struct ipa_route *route);
+ int (*ipa_read_gen_reg)(char *buff, int max_len);
+ int (*ipa_read_ep_reg)(char *buff, int max_len, int pipe);
+ void (*ipa_write_dbg_cnt)(int option);
+ int (*ipa_read_dbg_cnt)(char *buf, int max_len);
+ void (*ipa_cfg_ep_status)(u32 clnt_hdl,
+ const struct ipa_ep_cfg_status *ep_status);
+ int (*ipa_commit_flt)(enum ipa_ip_type ip);
+ int (*ipa_commit_rt)(enum ipa_ip_type ip);
+ int (*ipa_generate_rt_hw_rule)(enum ipa_ip_type ip,
+ struct ipa_rt_entry *entry, u8 *buf);
+ int (*ipa_commit_hdr)(void);
+ void (*ipa_cfg_ep_cfg)(u32 clnt_hdl,
+ const struct ipa_ep_cfg_cfg *cfg);
+ void (*ipa_cfg_ep_metadata_mask)(u32 clnt_hdl,
+ const struct ipa_ep_cfg_metadata_mask *metadata_mask);
+ void (*ipa_enable_clks)(void);
+ void (*ipa_disable_clks)(void);
+ struct msm_bus_scale_pdata *msm_bus_data_ptr;
+
+ void (*ipa_cfg_ep_metadata)(u32 pipe_number,
+ const struct ipa_ep_cfg_metadata *);
+};
+
+extern struct ipa_context *ipa_ctx;
+
+/* public APIs */
+/*
+ * Connect / Disconnect
+ */
+int ipa2_connect(const struct ipa_connect_params *in,
+ struct ipa_sps_params *sps, u32 *clnt_hdl);
+int ipa2_disconnect(u32 clnt_hdl);
+
+/*
+ * Resume / Suspend
+ */
+int ipa2_reset_endpoint(u32 clnt_hdl);
+
+/*
+ * Remove ep delay
+ */
+int ipa2_clear_endpoint_delay(u32 clnt_hdl);
+
+/*
+ * Disable ep
+ */
+int ipa2_disable_endpoint(u32 clnt_hdl);
+
+/*
+ * Configuration
+ */
+int ipa2_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
+
+int ipa2_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg);
+
+int ipa2_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg);
+
+int ipa2_cfg_ep_hdr_ext(u32 clnt_hdl,
+ const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg);
+
+int ipa2_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg);
+
+int ipa2_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg);
+
+int ipa2_cfg_ep_deaggr(u32 clnt_hdl,
+ const struct ipa_ep_cfg_deaggr *ipa_ep_cfg);
+
+int ipa2_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg);
+
+int ipa2_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+int ipa2_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *ipa_ep_cfg);
+
+int ipa2_cfg_ep_metadata_mask(u32 clnt_hdl,
+ const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg);
+
+int ipa2_cfg_ep_holb_by_client(enum ipa_client_type client,
+ const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+int ipa2_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl);
+
+/*
+ * Header removal / addition
+ */
+int ipa2_add_hdr(struct ipa_ioc_add_hdr *hdrs);
+
+int ipa2_del_hdr(struct ipa_ioc_del_hdr *hdls);
+
+int ipa2_commit_hdr(void);
+
+int ipa2_reset_hdr(void);
+
+int ipa2_get_hdr(struct ipa_ioc_get_hdr *lookup);
+
+int ipa2_put_hdr(u32 hdr_hdl);
+
+int ipa2_copy_hdr(struct ipa_ioc_copy_hdr *copy);
+
+/*
+ * Header Processing Context
+ */
+int ipa2_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs);
+
+int ipa2_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls);
+
+/*
+ * Routing
+ */
+int ipa2_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
+
+int ipa2_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
+
+int ipa2_commit_rt(enum ipa_ip_type ip);
+
+int ipa2_reset_rt(enum ipa_ip_type ip);
+
+int ipa2_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup);
+
+int ipa2_put_rt_tbl(u32 rt_tbl_hdl);
+
+int ipa2_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in);
+
+int ipa2_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules);
+
+/*
+ * Filtering
+ */
+int ipa2_add_flt_rule(struct ipa_ioc_add_flt_rule *rules);
+
+int ipa2_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls);
+
+int ipa2_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules);
+
+int ipa2_commit_flt(enum ipa_ip_type ip);
+
+int ipa2_reset_flt(enum ipa_ip_type ip);
+
+/*
+ * NAT
+ */
+int ipa2_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem);
+
+int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init);
+
+int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma);
+
+int ipa2_nat_del_cmd(struct ipa_ioc_v4_nat_del *del);
+
+/*
+ * Messaging
+ */
+int ipa2_send_msg(struct ipa_msg_meta *meta, void *buff,
+ ipa_msg_free_fn callback);
+int ipa2_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback);
+int ipa2_deregister_pull_msg(struct ipa_msg_meta *meta);
+
+/*
+ * Interface
+ */
+int ipa2_register_intf(const char *name, const struct ipa_tx_intf *tx,
+ const struct ipa_rx_intf *rx);
+int ipa2_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
+ const struct ipa_rx_intf *rx,
+ const struct ipa_ext_intf *ext);
+int ipa2_deregister_intf(const char *name);
+
+/*
+ * Aggregation
+ */
+int ipa2_set_aggr_mode(enum ipa_aggr_mode mode);
+
+int ipa2_set_qcncm_ndp_sig(char sig[3]);
+
+int ipa2_set_single_ndp_per_mbim(bool enable);
+
+/*
+ * Data path
+ */
+int ipa2_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+ struct ipa_tx_meta *metadata);
+
+/*
+ * To transfer multiple data packets
+ * While passing the data descriptor list, the anchor node
+ * should be of type struct ipa_tx_data_desc not list_head
+*/
+int ipa2_tx_dp_mul(enum ipa_client_type dst,
+ struct ipa_tx_data_desc *data_desc);
+
+void ipa2_free_skb(struct ipa_rx_data *);
+
+/*
+ * System pipes
+ */
+int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl);
+
+int ipa2_teardown_sys_pipe(u32 clnt_hdl);
+
+int ipa2_sys_setup(struct ipa_sys_connect_params *sys_in,
+ unsigned long *ipa_bam_hdl,
+ u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status);
+
+int ipa2_sys_teardown(u32 clnt_hdl);
+
+int ipa2_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
+ unsigned long gsi_ev_hdl);
+
+int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+ struct ipa_wdi_out_params *out);
+int ipa2_disconnect_wdi_pipe(u32 clnt_hdl);
+int ipa2_enable_wdi_pipe(u32 clnt_hdl);
+int ipa2_disable_wdi_pipe(u32 clnt_hdl);
+int ipa2_resume_wdi_pipe(u32 clnt_hdl);
+int ipa2_suspend_wdi_pipe(u32 clnt_hdl);
+int ipa2_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
+u16 ipa2_get_smem_restr_bytes(void);
+int ipa2_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *inp,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp);
+int ipa2_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+
+/*
+ * To retrieve doorbell physical address of
+ * wlan pipes
+ */
+int ipa2_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out);
+
+/*
+ * To register uC ready callback if uC not ready
+ * and also check uC readiness
+ * if uC not ready only, register callback
+ */
+int ipa2_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param);
+/*
+ * To de-register uC ready callback
+ */
+int ipa2_uc_dereg_rdyCB(void);
+
+/*
+ * Tethering bridge (Rmnet / MBIM)
+ */
+int ipa2_teth_bridge_init(struct teth_bridge_init_params *params);
+
+int ipa2_teth_bridge_disconnect(enum ipa_client_type client);
+
+int ipa2_teth_bridge_connect(struct teth_bridge_connect_params *connect_params);
+
+/*
+ * Tethering client info
+ */
+void ipa2_set_client(int index, enum ipacm_client_enum client, bool uplink);
+
+enum ipacm_client_enum ipa2_get_client(int pipe_idx);
+
+bool ipa2_get_client_uplink(int pipe_idx);
+
+/*
+ * IPADMA
+ */
+int ipa2_dma_init(void);
+
+int ipa2_dma_enable(void);
+
+int ipa2_dma_disable(void);
+
+int ipa2_dma_sync_memcpy(u64 dest, u64 src, int len);
+
+int ipa2_dma_async_memcpy(u64 dest, u64 src, int len,
+ void (*user_cb)(void *user1), void *user_param);
+
+int ipa2_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
+
+void ipa2_dma_destroy(void);
+
+/*
+ * MHI APIs for IPA MHI client driver
+ */
+int ipa2_init_mhi(struct ipa_mhi_init_params *params);
+
+int ipa2_mhi_init_engine(struct ipa_mhi_init_engine *params);
+
+int ipa2_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
+ u32 *clnt_hdl);
+
+int ipa2_disconnect_mhi_pipe(u32 clnt_hdl);
+
+bool ipa2_mhi_sps_channel_empty(enum ipa_client_type client);
+
+int ipa2_disable_sps_pipe(enum ipa_client_type client);
+
+int ipa2_mhi_reset_channel_internal(enum ipa_client_type client);
+
+int ipa2_mhi_start_channel_internal(enum ipa_client_type client);
+
+int ipa2_mhi_suspend_ul_channels(void);
+
+int ipa2_mhi_resume_channels_internal(enum ipa_client_type client,
+ bool LPTransitionRejected, bool brstmode_enabled,
+ union __packed gsi_channel_scratch ch_scratch, u8 index);
+
+/*
+ * mux id
+ */
+int ipa2_write_qmap_id(struct ipa_ioc_write_qmapid *param_in);
+
+/*
+ * interrupts
+ */
+int ipa2_add_interrupt_handler(enum ipa_irq_type interrupt,
+ ipa_irq_handler_t handler,
+ bool deferred_flag,
+ void *private_data);
+
+int ipa2_remove_interrupt_handler(enum ipa_irq_type interrupt);
+
+/*
+ * Miscellaneous
+ */
+void ipa2_bam_reg_dump(void);
+
+int ipa2_get_ep_mapping(enum ipa_client_type client);
+
+bool ipa2_is_ready(void);
+
+void ipa2_proxy_clk_vote(void);
+void ipa2_proxy_clk_unvote(void);
+
+bool ipa2_is_client_handle_valid(u32 clnt_hdl);
+
+enum ipa_client_type ipa2_get_client_mapping(int pipe_idx);
+
+enum ipa_rm_resource_name ipa2_get_rm_resource_from_ep(int pipe_idx);
+
+bool ipa2_get_modem_cfg_emb_pipe_flt(void);
+
+/* internal functions */
+
+int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
+ struct ipa_api_controller *api_ctrl);
+
+int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
+ bool in_atomic);
+int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
+ bool in_atomic);
+int ipa2_get_ep_mapping(enum ipa_client_type client);
+
+int ipa_generate_hw_rule(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib,
+ u8 **buf,
+ u16 *en_rule);
+int ipa_init_hw(void);
+struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name);
+int ipa_set_single_ndp_per_mbim(bool);
+int ipa_set_hw_timer_fix_for_mbim_aggr(bool);
+void ipa_debugfs_init(void);
+void ipa_debugfs_remove(void);
+
+void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size);
+
+void ipa_rx_timeout_min_max_calc(u32 *min, u32 *max, s8 time);
+
+#ifdef IPA_DEBUG
+#define IPA_DUMP_BUFF(base, phy_base, size) \
+ ipa_dump_buff_internal(base, phy_base, size)
+#else
+#define IPA_DUMP_BUFF(base, phy_base, size)
+#endif
+int ipa_controller_static_bind(struct ipa_controller *controller,
+ enum ipa_hw_type ipa_hw_type);
+int ipa_cfg_route(struct ipa_route *route);
+int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr);
+int ipa_cfg_filter(u32 disable);
+int ipa_pipe_mem_init(u32 start_ofst, u32 size);
+int ipa_pipe_mem_alloc(u32 *ofst, u32 size);
+int ipa_pipe_mem_free(u32 ofst, u32 size);
+int ipa_straddle_boundary(u32 start, u32 end, u32 boundary);
+struct ipa_context *ipa_get_ctx(void);
+void ipa_enable_clks(void);
+void ipa_disable_clks(void);
+void ipa2_inc_client_enable_clks(struct ipa_active_client_logging_info *id);
+int ipa2_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
+ *id);
+void ipa2_dec_client_disable_clks(struct ipa_active_client_logging_info *id);
+void ipa2_active_clients_log_dec(struct ipa_active_client_logging_info *id,
+ bool int_ctx);
+void ipa2_active_clients_log_inc(struct ipa_active_client_logging_info *id,
+ bool int_ctx);
+int ipa2_active_clients_log_print_buffer(char *buf, int size);
+int ipa2_active_clients_log_print_table(char *buf, int size);
+void ipa2_active_clients_log_clear(void);
+int ipa_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev);
+int __ipa_del_rt_rule(u32 rule_hdl);
+int __ipa_del_hdr(u32 hdr_hdl);
+int __ipa_release_hdr(u32 hdr_hdl);
+int __ipa_release_hdr_proc_ctx(u32 proc_ctx_hdl);
+int _ipa_read_gen_reg_v1_1(char *buff, int max_len);
+int _ipa_read_gen_reg_v2_0(char *buff, int max_len);
+int _ipa_read_ep_reg_v1_1(char *buf, int max_len, int pipe);
+int _ipa_read_ep_reg_v2_0(char *buf, int max_len, int pipe);
+void _ipa_write_dbg_cnt_v1_1(int option);
+void _ipa_write_dbg_cnt_v2_0(int option);
+int _ipa_read_dbg_cnt_v1_1(char *buf, int max_len);
+int _ipa_read_dbg_cnt_v2_0(char *buf, int max_len);
+void _ipa_enable_clks_v1_1(void);
+void _ipa_enable_clks_v2_0(void);
+void _ipa_disable_clks_v1_1(void);
+void _ipa_disable_clks_v2_0(void);
+
+static inline u32 ipa_read_reg(void *base, u32 offset)
+{
+ return ioread32(base + offset);
+}
+
+static inline u32 ipa_read_reg_field(void *base, u32 offset,
+ u32 mask, u32 shift)
+{
+ return (ipa_read_reg(base, offset) & mask) >> shift;
+}
+
+static inline void ipa_write_reg(void *base, u32 offset, u32 val)
+{
+ iowrite32(val, base + offset);
+}
+
+int ipa_bridge_init(void);
+void ipa_bridge_cleanup(void);
+
+ssize_t ipa_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos);
+int ipa_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count);
+int ipa_query_intf(struct ipa_ioc_query_intf *lookup);
+int ipa_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx);
+int ipa_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx);
+int ipa_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext);
+
+void wwan_cleanup(void);
+
+int teth_bridge_driver_init(void);
+void ipa_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data);
+
+int _ipa_init_sram_v2(void);
+int _ipa_init_sram_v2_5(void);
+int _ipa_init_sram_v2_6L(void);
+int _ipa_init_hdr_v2(void);
+int _ipa_init_hdr_v2_5(void);
+int _ipa_init_hdr_v2_6L(void);
+int _ipa_init_rt4_v2(void);
+int _ipa_init_rt6_v2(void);
+int _ipa_init_flt4_v2(void);
+int _ipa_init_flt6_v2(void);
+
+int __ipa_commit_flt_v1_1(enum ipa_ip_type ip);
+int __ipa_commit_flt_v2(enum ipa_ip_type ip);
+int __ipa_commit_rt_v1_1(enum ipa_ip_type ip);
+int __ipa_commit_rt_v2(enum ipa_ip_type ip);
+int __ipa_generate_rt_hw_rule_v2(enum ipa_ip_type ip,
+ struct ipa_rt_entry *entry, u8 *buf);
+int __ipa_generate_rt_hw_rule_v2_5(enum ipa_ip_type ip,
+ struct ipa_rt_entry *entry, u8 *buf);
+int __ipa_generate_rt_hw_rule_v2_6L(enum ipa_ip_type ip,
+ struct ipa_rt_entry *entry, u8 *buf);
+
+int __ipa_commit_hdr_v1_1(void);
+int __ipa_commit_hdr_v2(void);
+int __ipa_commit_hdr_v2_5(void);
+int __ipa_commit_hdr_v2_6L(void);
+int ipa_generate_flt_eq(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_attrib);
+void ipa_skb_recycle(struct sk_buff *skb);
+void ipa_install_dflt_flt_rules(u32 ipa_ep_idx);
+void ipa_delete_dflt_flt_rules(u32 ipa_ep_idx);
+
+int ipa_enable_data_path(u32 clnt_hdl);
+int ipa_disable_data_path(u32 clnt_hdl);
+int ipa_id_alloc(void *ptr);
+void *ipa_id_find(u32 id);
+void ipa_id_remove(u32 id);
+
+int ipa2_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+ u32 bandwidth_mbps);
+
+int ipa2_cfg_ep_status(u32 clnt_hdl,
+ const struct ipa_ep_cfg_status *ipa_ep_cfg);
+int ipa_cfg_aggr_cntr_granularity(u8 aggr_granularity);
+int ipa_cfg_eot_coal_cntr_granularity(u8 eot_coal_granularity);
+
+int ipa2_suspend_resource_no_block(enum ipa_rm_resource_name name);
+int ipa2_suspend_resource_sync(enum ipa_rm_resource_name name);
+int ipa2_resume_resource(enum ipa_rm_resource_name name);
+bool ipa_should_pipe_be_suspended(enum ipa_client_type client);
+int ipa_tag_aggr_force_close(int pipe_num);
+
+void ipa_active_clients_lock(void);
+int ipa_active_clients_trylock(unsigned long *flags);
+void ipa_active_clients_unlock(void);
+void ipa_active_clients_trylock_unlock(unsigned long *flags);
+int ipa_wdi_init(void);
+int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id);
+int ipa_tag_process(struct ipa_desc *desc, int num_descs,
+ unsigned long timeout);
+
+int ipa_q6_pre_shutdown_cleanup(void);
+int ipa_q6_post_shutdown_cleanup(void);
+int ipa_init_q6_smem(void);
+int ipa_q6_monitor_holb_mitigation(bool enable);
+
+int ipa_sps_connect_safe(struct sps_pipe *h, struct sps_connect *connect,
+ enum ipa_client_type ipa_client);
+
+int ipa_uc_interface_init(void);
+int ipa_uc_reset_pipe(enum ipa_client_type ipa_client);
+int ipa_uc_monitor_holb(enum ipa_client_type ipa_client, bool enable);
+int ipa2_uc_state_check(void);
+int ipa_uc_loaded_check(void);
+int ipa_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
+ bool polling_mode, unsigned long timeout_jiffies);
+void ipa_register_panic_hdlr(void);
+void ipa_uc_register_handlers(enum ipa_hw_features feature,
+ struct ipa_uc_hdlrs *hdlrs);
+int create_nat_device(void);
+int ipa_uc_notify_clk_state(bool enabled);
+void ipa_dma_async_memcpy_notify_cb(void *priv,
+ enum ipa_dp_evt_type evt, unsigned long data);
+
+int ipa_uc_update_hw_flags(u32 flags);
+
+int ipa2_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void));
+void ipa2_uc_mhi_cleanup(void);
+int ipa2_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd);
+int ipa_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
+ u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx,
+ u32 first_evt_idx);
+int ipa_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle,
+ int contexArrayIndex, int channelDirection);
+int ipa2_uc_mhi_reset_channel(int channelHandle);
+int ipa2_uc_mhi_suspend_channel(int channelHandle);
+int ipa_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected);
+int ipa2_uc_mhi_stop_event_update_channel(int channelHandle);
+int ipa2_uc_mhi_print_stats(char *dbg_buff, int size);
+int ipa_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
+u32 ipa_get_num_pipes(void);
+u32 ipa_get_sys_yellow_wm(struct ipa_sys_context *sys);
+struct ipa_smmu_cb_ctx *ipa2_get_smmu_ctx(void);
+struct ipa_smmu_cb_ctx *ipa2_get_wlan_smmu_ctx(void);
+struct ipa_smmu_cb_ctx *ipa2_get_uc_smmu_ctx(void);
+struct iommu_domain *ipa_get_uc_smmu_domain(void);
+struct iommu_domain *ipa2_get_wlan_smmu_domain(void);
+int ipa2_ap_suspend(struct device *dev);
+int ipa2_ap_resume(struct device *dev);
+struct iommu_domain *ipa2_get_smmu_domain(void);
+struct device *ipa2_get_dma_dev(void);
+int ipa2_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
+int ipa2_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
+void ipa_suspend_apps_pipes(bool suspend);
+void ipa_update_repl_threshold(enum ipa_client_type ipa_client);
+void ipa_flow_control(enum ipa_client_type ipa_client, bool enable,
+ uint32_t qmap_id);
+int ipa2_restore_suspend_handler(void);
+void ipa_sps_irq_control_all(bool enable);
+void ipa_inc_acquire_wakelock(enum ipa_wakelock_ref_client ref_client);
+void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client);
+int ipa_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot);
+int ipa2_rx_poll(u32 clnt_hdl, int budget);
+void ipa2_recycle_wan_skb(struct sk_buff *skb);
+int ipa_ntn_init(void);
+int ipa2_get_ntn_stats(struct IpaHwStatsNTNInfoData_t *stats);
+int ipa2_register_ipa_ready_cb(void (*ipa_ready_cb)(void *),
+ void *user_data);
+#endif /* _IPA_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c
new file mode 100644
index 0000000..17f577a
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c
@@ -0,0 +1,381 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/interrupt.h>
+#include "ipa_i.h"
+
+#define INTERRUPT_WORKQUEUE_NAME "ipa_interrupt_wq"
+#define IPA_IRQ_NUM_MAX 32
+
+struct ipa_interrupt_info {
+ ipa_irq_handler_t handler;
+ enum ipa_irq_type interrupt;
+ void *private_data;
+ bool deferred_flag;
+};
+
+struct ipa_interrupt_work_wrap {
+ struct work_struct interrupt_work;
+ ipa_irq_handler_t handler;
+ enum ipa_irq_type interrupt;
+ void *private_data;
+ void *interrupt_data;
+};
+
+static struct ipa_interrupt_info ipa_interrupt_to_cb[IPA_IRQ_NUM_MAX];
+static struct workqueue_struct *ipa_interrupt_wq;
+static u32 ipa_ee;
+
+static void ipa_interrupt_defer(struct work_struct *work);
+static DECLARE_WORK(ipa_interrupt_defer_work, ipa_interrupt_defer);
+
+static int ipa2_irq_mapping[IPA_IRQ_MAX] = {
+ [IPA_BAD_SNOC_ACCESS_IRQ] = 0,
+ [IPA_EOT_COAL_IRQ] = 1,
+ [IPA_UC_IRQ_0] = 2,
+ [IPA_UC_IRQ_1] = 3,
+ [IPA_UC_IRQ_2] = 4,
+ [IPA_UC_IRQ_3] = 5,
+ [IPA_UC_IN_Q_NOT_EMPTY_IRQ] = 6,
+ [IPA_UC_RX_CMD_Q_NOT_FULL_IRQ] = 7,
+ [IPA_UC_TX_CMD_Q_NOT_FULL_IRQ] = 8,
+ [IPA_UC_TO_PROC_ACK_Q_NOT_FULL_IRQ] = 9,
+ [IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ] = 10,
+ [IPA_RX_ERR_IRQ] = 11,
+ [IPA_DEAGGR_ERR_IRQ] = 12,
+ [IPA_TX_ERR_IRQ] = 13,
+ [IPA_STEP_MODE_IRQ] = 14,
+ [IPA_PROC_ERR_IRQ] = 15,
+ [IPA_TX_SUSPEND_IRQ] = 16,
+ [IPA_TX_HOLB_DROP_IRQ] = 17,
+ [IPA_BAM_IDLE_IRQ] = 18,
+};
+
+static void deferred_interrupt_work(struct work_struct *work)
+{
+ struct ipa_interrupt_work_wrap *work_data =
+ container_of(work,
+ struct ipa_interrupt_work_wrap,
+ interrupt_work);
+ IPADBG("call handler from workq...\n");
+ work_data->handler(work_data->interrupt, work_data->private_data,
+ work_data->interrupt_data);
+ kfree(work_data->interrupt_data);
+ kfree(work_data);
+}
+
+static bool is_valid_ep(u32 ep_suspend_data)
+{
+ u32 bmsk = 1;
+ u32 i = 0;
+
+ for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
+ if ((ep_suspend_data & bmsk) && (ipa_ctx->ep[i].valid))
+ return true;
+ bmsk = bmsk << 1;
+ }
+ return false;
+}
+
+static int handle_interrupt(int irq_num, bool isr_context)
+{
+ struct ipa_interrupt_info interrupt_info;
+ struct ipa_interrupt_work_wrap *work_data;
+ u32 suspend_data;
+ void *interrupt_data = NULL;
+ struct ipa_tx_suspend_irq_data *suspend_interrupt_data = NULL;
+ int res;
+
+ interrupt_info = ipa_interrupt_to_cb[irq_num];
+ if (interrupt_info.handler == NULL) {
+ IPAERR("A callback function wasn't set for interrupt num %d\n",
+ irq_num);
+ return -EINVAL;
+ }
+
+ switch (interrupt_info.interrupt) {
+ case IPA_TX_SUSPEND_IRQ:
+ suspend_data = ipa_read_reg(ipa_ctx->mmio,
+ IPA_IRQ_SUSPEND_INFO_EE_n_ADDR(ipa_ee));
+ if (!is_valid_ep(suspend_data))
+ return 0;
+
+ suspend_interrupt_data =
+ kzalloc(sizeof(*suspend_interrupt_data), GFP_ATOMIC);
+ if (!suspend_interrupt_data) {
+ IPAERR("failed allocating suspend_interrupt_data\n");
+ return -ENOMEM;
+ }
+ suspend_interrupt_data->endpoints = suspend_data;
+ interrupt_data = suspend_interrupt_data;
+ break;
+ default:
+ break;
+ }
+
+ /* Force defer processing if in ISR context. */
+ if (interrupt_info.deferred_flag || isr_context) {
+ work_data = kzalloc(sizeof(struct ipa_interrupt_work_wrap),
+ GFP_ATOMIC);
+ if (!work_data) {
+ IPAERR("failed allocating ipa_interrupt_work_wrap\n");
+ res = -ENOMEM;
+ goto fail_alloc_work;
+ }
+ INIT_WORK(&work_data->interrupt_work, deferred_interrupt_work);
+ work_data->handler = interrupt_info.handler;
+ work_data->interrupt = interrupt_info.interrupt;
+ work_data->private_data = interrupt_info.private_data;
+ work_data->interrupt_data = interrupt_data;
+ queue_work(ipa_interrupt_wq, &work_data->interrupt_work);
+
+ } else {
+ interrupt_info.handler(interrupt_info.interrupt,
+ interrupt_info.private_data,
+ interrupt_data);
+ kfree(interrupt_data);
+ }
+
+ return 0;
+
+fail_alloc_work:
+ kfree(interrupt_data);
+ return res;
+}
+
+static inline bool is_uc_irq(int irq_num)
+{
+ if (ipa_interrupt_to_cb[irq_num].interrupt >= IPA_UC_IRQ_0 &&
+ ipa_interrupt_to_cb[irq_num].interrupt <= IPA_UC_IRQ_3)
+ return true;
+ else
+ return false;
+}
+
+static void ipa_process_interrupts(bool isr_context)
+{
+ u32 reg;
+ u32 bmsk;
+ u32 i = 0;
+ u32 en;
+ bool uc_irq;
+
+ en = ipa_read_reg(ipa_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee));
+ reg = ipa_read_reg(ipa_ctx->mmio, IPA_IRQ_STTS_EE_n_ADDR(ipa_ee));
+ while (en & reg) {
+ bmsk = 1;
+ for (i = 0; i < IPA_IRQ_NUM_MAX; i++) {
+ if (!(en & reg & bmsk)) {
+ bmsk = bmsk << 1;
+ continue;
+ }
+ uc_irq = is_uc_irq(i);
+ /*
+ * Clear uC interrupt before processing to avoid
+ * clearing unhandled interrupts
+ */
+ if (uc_irq)
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_IRQ_CLR_EE_n_ADDR(ipa_ee), bmsk);
+
+ /* Process the interrupts */
+ handle_interrupt(i, isr_context);
+
+ /*
+ * Clear non uC interrupt after processing
+ * to avoid clearing interrupt data
+ */
+ if (!uc_irq)
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_IRQ_CLR_EE_n_ADDR(ipa_ee), bmsk);
+
+ bmsk = bmsk << 1;
+ }
+ /*
+ * Check pending interrupts that may have
+ * been raised since last read
+ */
+ reg = ipa_read_reg(ipa_ctx->mmio,
+ IPA_IRQ_STTS_EE_n_ADDR(ipa_ee));
+ }
+}
+
+static void ipa_interrupt_defer(struct work_struct *work)
+{
+ IPADBG("processing interrupts in wq\n");
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ ipa_process_interrupts(false);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPADBG("Done\n");
+}
+
+static irqreturn_t ipa_isr(int irq, void *ctxt)
+{
+ unsigned long flags;
+
+ /* defer interrupt handling in case IPA is not clocked on */
+ if (ipa_active_clients_trylock(&flags) == 0) {
+ IPADBG("defer interrupt processing\n");
+ queue_work(ipa_ctx->power_mgmt_wq, &ipa_interrupt_defer_work);
+ return IRQ_HANDLED;
+ }
+
+ if (ipa_ctx->ipa_active_clients.cnt == 0) {
+ IPADBG("defer interrupt processing\n");
+ queue_work(ipa_ctx->power_mgmt_wq, &ipa_interrupt_defer_work);
+ goto bail;
+ }
+
+ ipa_process_interrupts(true);
+
+bail:
+ ipa_active_clients_trylock_unlock(&flags);
+ return IRQ_HANDLED;
+}
+/**
+* ipa2_add_interrupt_handler() - Adds handler to an interrupt type
+* @interrupt: Interrupt type
+* @handler: The handler to be added
+* @deferred_flag: whether the handler processing should be deferred in
+* a workqueue
+* @private_data: the client's private data
+*
+* Adds handler to an interrupt type and enable the specific bit
+* in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled
+*/
+int ipa2_add_interrupt_handler(enum ipa_irq_type interrupt,
+ ipa_irq_handler_t handler,
+ bool deferred_flag,
+ void *private_data)
+{
+ u32 val;
+ u32 bmsk;
+ int irq_num;
+
+ IPADBG("in ipa2_add_interrupt_handler\n");
+ if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ ||
+ interrupt >= IPA_IRQ_MAX) {
+ IPAERR("invalid interrupt number %d\n", interrupt);
+ return -EINVAL;
+ }
+
+ irq_num = ipa2_irq_mapping[interrupt];
+ if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) {
+ IPAERR("interrupt %d not supported\n", interrupt);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ ipa_interrupt_to_cb[irq_num].deferred_flag = deferred_flag;
+ ipa_interrupt_to_cb[irq_num].handler = handler;
+ ipa_interrupt_to_cb[irq_num].private_data = private_data;
+ ipa_interrupt_to_cb[irq_num].interrupt = interrupt;
+
+ val = ipa_read_reg(ipa_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee));
+ IPADBG("read IPA_IRQ_EN_EE_n_ADDR register. reg = %d\n", val);
+ bmsk = 1 << irq_num;
+ val |= bmsk;
+ ipa_write_reg(ipa_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee), val);
+ IPADBG("wrote IPA_IRQ_EN_EE_n_ADDR register. reg = %d\n", val);
+ return 0;
+}
+
+/**
+* ipa2_remove_interrupt_handler() - Removes handler to an interrupt type
+* @interrupt: Interrupt type
+*
+* Removes the handler and disable the specific bit in IRQ_EN register
+*/
+int ipa2_remove_interrupt_handler(enum ipa_irq_type interrupt)
+{
+ u32 val;
+ u32 bmsk;
+ int irq_num;
+
+ if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ ||
+ interrupt >= IPA_IRQ_MAX) {
+ IPAERR("invalid interrupt number %d\n", interrupt);
+ return -EINVAL;
+ }
+
+ irq_num = ipa2_irq_mapping[interrupt];
+ if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) {
+ IPAERR("interrupt %d not supported\n", interrupt);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ kfree(ipa_interrupt_to_cb[irq_num].private_data);
+ ipa_interrupt_to_cb[irq_num].deferred_flag = false;
+ ipa_interrupt_to_cb[irq_num].handler = NULL;
+ ipa_interrupt_to_cb[irq_num].private_data = NULL;
+ ipa_interrupt_to_cb[irq_num].interrupt = -1;
+
+ val = ipa_read_reg(ipa_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee));
+ bmsk = 1 << irq_num;
+ val &= ~bmsk;
+ ipa_write_reg(ipa_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee), val);
+
+ return 0;
+}
+
+/**
+* ipa_interrupts_init() - Initialize the IPA interrupts framework
+* @ipa_irq: The interrupt number to allocate
+* @ee: Execution environment
+* @ipa_dev: The basic device structure representing the IPA driver
+*
+* - Initialize the ipa_interrupt_to_cb array
+* - Clear interrupts status
+* - Register the ipa interrupt handler - ipa_isr
+* - Enable apps processor wakeup by IPA interrupts
+*/
+int ipa_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev)
+{
+ int idx;
+ u32 reg = 0xFFFFFFFF;
+ int res = 0;
+
+ ipa_ee = ee;
+ for (idx = 0; idx < IPA_IRQ_NUM_MAX; idx++) {
+ ipa_interrupt_to_cb[idx].deferred_flag = false;
+ ipa_interrupt_to_cb[idx].handler = NULL;
+ ipa_interrupt_to_cb[idx].private_data = NULL;
+ ipa_interrupt_to_cb[idx].interrupt = -1;
+ }
+
+ ipa_interrupt_wq = create_singlethread_workqueue(
+ INTERRUPT_WORKQUEUE_NAME);
+ if (!ipa_interrupt_wq) {
+ IPAERR("workqueue creation failed\n");
+ return -ENOMEM;
+ }
+
+ /*Clearing interrupts status*/
+ ipa_write_reg(ipa_ctx->mmio, IPA_IRQ_CLR_EE_n_ADDR(ipa_ee), reg);
+
+ res = request_irq(ipa_irq, (irq_handler_t) ipa_isr,
+ IRQF_TRIGGER_RISING, "ipa", ipa_dev);
+ if (res) {
+ IPAERR("fail to register IPA IRQ handler irq=%d\n", ipa_irq);
+ return -ENODEV;
+ }
+ IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq);
+
+ res = enable_irq_wake(ipa_irq);
+ if (res)
+ IPAERR("fail to enable IPA IRQ wakeup irq=%d res=%d\n",
+ ipa_irq, res);
+ else
+ IPADBG("IPA IRQ wakeup enabled irq=%d\n", ipa_irq);
+
+ return 0;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
new file mode 100644
index 0000000..8ec83eb
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
@@ -0,0 +1,607 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include "ipa_i.h"
+
+struct ipa_intf {
+ char name[IPA_RESOURCE_NAME_MAX];
+ struct list_head link;
+ u32 num_tx_props;
+ u32 num_rx_props;
+ u32 num_ext_props;
+ struct ipa_ioc_tx_intf_prop *tx;
+ struct ipa_ioc_rx_intf_prop *rx;
+ struct ipa_ioc_ext_intf_prop *ext;
+ enum ipa_client_type excp_pipe;
+};
+
+struct ipa_push_msg {
+ struct ipa_msg_meta meta;
+ ipa_msg_free_fn callback;
+ void *buff;
+ struct list_head link;
+};
+
+struct ipa_pull_msg {
+ struct ipa_msg_meta meta;
+ ipa_msg_pull_fn callback;
+ struct list_head link;
+};
+
+/**
+ * ipa2_register_intf() - register "logical" interface
+ * @name: [in] interface name
+ * @tx: [in] TX properties of the interface
+ * @rx: [in] RX properties of the interface
+ *
+ * Register an interface and its tx and rx properties, this allows
+ * configuration of rules from user-space
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_register_intf(const char *name, const struct ipa_tx_intf *tx,
+ const struct ipa_rx_intf *rx)
+{
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ return ipa2_register_intf_ext(name, tx, rx, NULL);
+}
+
+/**
+ * ipa2_register_intf_ext() - register "logical" interface which has only
+ * extended properties
+ * @name: [in] interface name
+ * @tx: [in] TX properties of the interface
+ * @rx: [in] RX properties of the interface
+ * @ext: [in] EXT properties of the interface
+ *
+ * Register an interface and its tx, rx and ext properties, this allows
+ * configuration of rules from user-space
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
+ const struct ipa_rx_intf *rx,
+ const struct ipa_ext_intf *ext)
+{
+ struct ipa_intf *intf;
+ u32 len;
+
+ if (name == NULL || (tx == NULL && rx == NULL && ext == NULL)) {
+ IPAERR("invalid params name=%p tx=%p rx=%p ext=%p\n", name,
+ tx, rx, ext);
+ return -EINVAL;
+ }
+
+ if (tx && tx->num_props > IPA_NUM_PROPS_MAX) {
+ IPAERR("invalid tx num_props=%d max=%d\n", tx->num_props,
+ IPA_NUM_PROPS_MAX);
+ return -EINVAL;
+ }
+
+ if (rx && rx->num_props > IPA_NUM_PROPS_MAX) {
+ IPAERR("invalid rx num_props=%d max=%d\n", rx->num_props,
+ IPA_NUM_PROPS_MAX);
+ return -EINVAL;
+ }
+
+ if (ext && ext->num_props > IPA_NUM_PROPS_MAX) {
+ IPAERR("invalid ext num_props=%d max=%d\n", ext->num_props,
+ IPA_NUM_PROPS_MAX);
+ return -EINVAL;
+ }
+
+ len = sizeof(struct ipa_intf);
+ intf = kzalloc(len, GFP_KERNEL);
+ if (intf == NULL) {
+ IPAERR("fail to alloc 0x%x bytes\n", len);
+ return -ENOMEM;
+ }
+
+ strlcpy(intf->name, name, IPA_RESOURCE_NAME_MAX);
+
+ if (tx) {
+ intf->num_tx_props = tx->num_props;
+ len = tx->num_props * sizeof(struct ipa_ioc_tx_intf_prop);
+ intf->tx = kzalloc(len, GFP_KERNEL);
+ if (intf->tx == NULL) {
+ IPAERR("fail to alloc 0x%x bytes\n", len);
+ kfree(intf);
+ return -ENOMEM;
+ }
+ memcpy(intf->tx, tx->prop, len);
+ }
+
+ if (rx) {
+ intf->num_rx_props = rx->num_props;
+ len = rx->num_props * sizeof(struct ipa_ioc_rx_intf_prop);
+ intf->rx = kzalloc(len, GFP_KERNEL);
+ if (intf->rx == NULL) {
+ IPAERR("fail to alloc 0x%x bytes\n", len);
+ kfree(intf->tx);
+ kfree(intf);
+ return -ENOMEM;
+ }
+ memcpy(intf->rx, rx->prop, len);
+ }
+
+ if (ext) {
+ intf->num_ext_props = ext->num_props;
+ len = ext->num_props * sizeof(struct ipa_ioc_ext_intf_prop);
+ intf->ext = kzalloc(len, GFP_KERNEL);
+ if (intf->ext == NULL) {
+ IPAERR("fail to alloc 0x%x bytes\n", len);
+ kfree(intf->rx);
+ kfree(intf->tx);
+ kfree(intf);
+ return -ENOMEM;
+ }
+ memcpy(intf->ext, ext->prop, len);
+ }
+
+ if (ext && ext->excp_pipe_valid)
+ intf->excp_pipe = ext->excp_pipe;
+ else
+ intf->excp_pipe = IPA_CLIENT_APPS_LAN_CONS;
+
+ mutex_lock(&ipa_ctx->lock);
+ list_add_tail(&intf->link, &ipa_ctx->intf_list);
+ mutex_unlock(&ipa_ctx->lock);
+
+ return 0;
+}
+
+/**
+ * ipa2_deregister_intf() - de-register previously registered logical interface
+ * @name: [in] interface name
+ *
+ * De-register a previously registered interface
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_deregister_intf(const char *name)
+{
+ struct ipa_intf *entry;
+ struct ipa_intf *next;
+ int result = -EINVAL;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (name == NULL) {
+ IPAERR("invalid param name=%p\n", name);
+ return result;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ list_for_each_entry_safe(entry, next, &ipa_ctx->intf_list, link) {
+ if (!strcmp(entry->name, name)) {
+ list_del(&entry->link);
+ kfree(entry->ext);
+ kfree(entry->rx);
+ kfree(entry->tx);
+ kfree(entry);
+ result = 0;
+ break;
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+
+/**
+ * ipa_query_intf() - query logical interface properties
+ * @lookup: [inout] interface name and number of properties
+ *
+ * Obtain the handle and number of tx and rx properties for the named
+ * interface, used as part of querying the tx and rx properties for
+ * configuration of various rules from user-space
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_query_intf(struct ipa_ioc_query_intf *lookup)
+{
+ struct ipa_intf *entry;
+ int result = -EINVAL;
+
+ if (lookup == NULL) {
+ IPAERR("invalid param lookup=%p\n", lookup);
+ return result;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ list_for_each_entry(entry, &ipa_ctx->intf_list, link) {
+ if (!strcmp(entry->name, lookup->name)) {
+ lookup->num_tx_props = entry->num_tx_props;
+ lookup->num_rx_props = entry->num_rx_props;
+ lookup->num_ext_props = entry->num_ext_props;
+ lookup->excp_pipe = entry->excp_pipe;
+ result = 0;
+ break;
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+
+/**
+ * ipa_query_intf_tx_props() - qeury TX props of an interface
+ * @tx: [inout] interface tx attributes
+ *
+ * Obtain the tx properties for the specified interface
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx)
+{
+ struct ipa_intf *entry;
+ int result = -EINVAL;
+
+ if (tx == NULL) {
+ IPAERR("invalid param tx=%p\n", tx);
+ return result;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ list_for_each_entry(entry, &ipa_ctx->intf_list, link) {
+ if (!strcmp(entry->name, tx->name)) {
+ memcpy(tx->tx, entry->tx, entry->num_tx_props *
+ sizeof(struct ipa_ioc_tx_intf_prop));
+ result = 0;
+ break;
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+
+/**
+ * ipa_query_intf_rx_props() - qeury RX props of an interface
+ * @rx: [inout] interface rx attributes
+ *
+ * Obtain the rx properties for the specified interface
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx)
+{
+ struct ipa_intf *entry;
+ int result = -EINVAL;
+
+ if (rx == NULL) {
+ IPAERR("invalid param rx=%p\n", rx);
+ return result;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ list_for_each_entry(entry, &ipa_ctx->intf_list, link) {
+ if (!strcmp(entry->name, rx->name)) {
+ memcpy(rx->rx, entry->rx, entry->num_rx_props *
+ sizeof(struct ipa_ioc_rx_intf_prop));
+ result = 0;
+ break;
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+
+/**
+ * ipa_query_intf_ext_props() - qeury EXT props of an interface
+ * @ext: [inout] interface ext attributes
+ *
+ * Obtain the ext properties for the specified interface
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext)
+{
+ struct ipa_intf *entry;
+ int result = -EINVAL;
+
+ if (ext == NULL) {
+ IPAERR("invalid param ext=%p\n", ext);
+ return result;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ list_for_each_entry(entry, &ipa_ctx->intf_list, link) {
+ if (!strcmp(entry->name, ext->name)) {
+ memcpy(ext->ext, entry->ext, entry->num_ext_props *
+ sizeof(struct ipa_ioc_ext_intf_prop));
+ result = 0;
+ break;
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+
+/**
+ * ipa2_send_msg() - Send "message" from kernel client to IPA driver
+ * @meta: [in] message meta-data
+ * @buff: [in] the payload for message
+ * @callback: [in] free callback
+ *
+ * Client supplies the message meta-data and payload which IPA driver buffers
+ * till read by user-space. After read from user space IPA driver invokes the
+ * callback supplied to free the message payload. Client must not touch/free
+ * the message payload after calling this API.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_send_msg(struct ipa_msg_meta *meta, void *buff,
+ ipa_msg_free_fn callback)
+{
+ struct ipa_push_msg *msg;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (meta == NULL || (buff == NULL && callback != NULL) ||
+ (buff != NULL && callback == NULL)) {
+ IPAERR("invalid param meta=%p buff=%p, callback=%p\n",
+ meta, buff, callback);
+ return -EINVAL;
+ }
+
+ if (meta->msg_type >= IPA_EVENT_MAX_NUM) {
+ IPAERR("unsupported message type %d\n", meta->msg_type);
+ return -EINVAL;
+ }
+
+ msg = kzalloc(sizeof(struct ipa_push_msg), GFP_KERNEL);
+ if (msg == NULL) {
+ IPAERR("fail to alloc ipa_msg container\n");
+ return -ENOMEM;
+ }
+
+ msg->meta = *meta;
+ msg->buff = buff;
+ msg->callback = callback;
+
+ mutex_lock(&ipa_ctx->msg_lock);
+ list_add_tail(&msg->link, &ipa_ctx->msg_list);
+ mutex_unlock(&ipa_ctx->msg_lock);
+ IPA_STATS_INC_CNT(ipa_ctx->stats.msg_w[meta->msg_type]);
+
+ wake_up(&ipa_ctx->msg_waitq);
+
+ return 0;
+}
+
+/**
+ * ipa2_register_pull_msg() - register pull message type
+ * @meta: [in] message meta-data
+ * @callback: [in] pull callback
+ *
+ * Register message callback by kernel client with IPA driver for IPA driver to
+ * pull message on-demand.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback)
+{
+ struct ipa_pull_msg *msg;
+
+ if (meta == NULL || callback == NULL) {
+ IPAERR("invalid param meta=%p callback=%p\n", meta, callback);
+ return -EINVAL;
+ }
+
+ msg = kzalloc(sizeof(struct ipa_pull_msg), GFP_KERNEL);
+ if (msg == NULL) {
+ IPAERR("fail to alloc ipa_msg container\n");
+ return -ENOMEM;
+ }
+
+ msg->meta = *meta;
+ msg->callback = callback;
+
+ mutex_lock(&ipa_ctx->msg_lock);
+ list_add_tail(&msg->link, &ipa_ctx->pull_msg_list);
+ mutex_unlock(&ipa_ctx->msg_lock);
+
+ return 0;
+}
+
+/**
+ * ipa2_deregister_pull_msg() - De-register pull message type
+ * @meta: [in] message meta-data
+ *
+ * De-register "message" by kernel client from IPA driver
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_deregister_pull_msg(struct ipa_msg_meta *meta)
+{
+ struct ipa_pull_msg *entry;
+ struct ipa_pull_msg *next;
+ int result = -EINVAL;
+
+ if (meta == NULL) {
+ IPAERR("invalid param name=%p\n", meta);
+ return result;
+ }
+
+ mutex_lock(&ipa_ctx->msg_lock);
+ list_for_each_entry_safe(entry, next, &ipa_ctx->pull_msg_list, link) {
+ if (entry->meta.msg_len == meta->msg_len &&
+ entry->meta.msg_type == meta->msg_type) {
+ list_del(&entry->link);
+ kfree(entry);
+ result = 0;
+ break;
+ }
+ }
+ mutex_unlock(&ipa_ctx->msg_lock);
+ return result;
+}
+
+/**
+ * ipa_read() - read message from IPA device
+ * @filp: [in] file pointer
+ * @buf: [out] buffer to read into
+ * @count: [in] size of above buffer
+ * @f_pos: [inout] file position
+ *
+ * Uer-space should continually read from /dev/ipa, read wll block when there
+ * are no messages to read. Upon return, user-space should read the ipa_msg_meta
+ * from the start of the buffer to know what type of message was read and its
+ * length in the remainder of the buffer. Buffer supplied must be big enough to
+ * hold the message meta-data and the largest defined message type
+ *
+ * Returns: how many bytes copied to buffer
+ *
+ * Note: Should not be called from atomic context
+ */
+ssize_t ipa_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ char __user *start;
+ struct ipa_push_msg *msg = NULL;
+ int ret;
+ DEFINE_WAIT(wait);
+ int locked;
+
+ start = buf;
+
+ while (1) {
+ prepare_to_wait(&ipa_ctx->msg_waitq, &wait, TASK_INTERRUPTIBLE);
+
+ mutex_lock(&ipa_ctx->msg_lock);
+ locked = 1;
+ if (!list_empty(&ipa_ctx->msg_list)) {
+ msg = list_first_entry(&ipa_ctx->msg_list,
+ struct ipa_push_msg, link);
+ list_del(&msg->link);
+ }
+
+ IPADBG("msg=%p\n", msg);
+
+ if (msg) {
+ locked = 0;
+ mutex_unlock(&ipa_ctx->msg_lock);
+ if (copy_to_user(buf, &msg->meta,
+ sizeof(struct ipa_msg_meta))) {
+ ret = -EFAULT;
+ break;
+ }
+ buf += sizeof(struct ipa_msg_meta);
+ count -= sizeof(struct ipa_msg_meta);
+ if (msg->buff) {
+ if (copy_to_user(buf, msg->buff,
+ msg->meta.msg_len)) {
+ ret = -EFAULT;
+ break;
+ }
+ buf += msg->meta.msg_len;
+ count -= msg->meta.msg_len;
+ msg->callback(msg->buff, msg->meta.msg_len,
+ msg->meta.msg_type);
+ }
+ IPA_STATS_INC_CNT(
+ ipa_ctx->stats.msg_r[msg->meta.msg_type]);
+ kfree(msg);
+ }
+
+ ret = -EAGAIN;
+ if (filp->f_flags & O_NONBLOCK)
+ break;
+
+ ret = -EINTR;
+ if (signal_pending(current))
+ break;
+
+ if (start != buf)
+ break;
+
+ locked = 0;
+ mutex_unlock(&ipa_ctx->msg_lock);
+ schedule();
+ }
+
+ finish_wait(&ipa_ctx->msg_waitq, &wait);
+ if (start != buf && ret != -EFAULT)
+ ret = buf - start;
+
+ if (locked)
+ mutex_unlock(&ipa_ctx->msg_lock);
+
+ return ret;
+}
+
+/**
+ * ipa_pull_msg() - pull the specified message from client
+ * @meta: [in] message meta-data
+ * @buf: [out] buffer to read into
+ * @count: [in] size of above buffer
+ *
+ * Populate the supplied buffer with the pull message which is fetched
+ * from client, the message must have previously been registered with
+ * the IPA driver
+ *
+ * Returns: how many bytes copied to buffer
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count)
+{
+ struct ipa_pull_msg *entry;
+ int result = -EINVAL;
+
+ if (meta == NULL || buff == NULL || !count) {
+ IPAERR("invalid param name=%p buff=%p count=%zu\n",
+ meta, buff, count);
+ return result;
+ }
+
+ mutex_lock(&ipa_ctx->msg_lock);
+ list_for_each_entry(entry, &ipa_ctx->pull_msg_list, link) {
+ if (entry->meta.msg_len == meta->msg_len &&
+ entry->meta.msg_type == meta->msg_type) {
+ result = entry->callback(buff, count, meta->msg_type);
+ break;
+ }
+ }
+ mutex_unlock(&ipa_ctx->msg_lock);
+ return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c
new file mode 100644
index 0000000..e8f25c9
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c
@@ -0,0 +1,319 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/ipa.h>
+#include <linux/ipa_mhi.h>
+#include "ipa_i.h"
+#include "ipa_qmi_service.h"
+
+#define IPA_MHI_DRV_NAME
+#define IPA_MHI_DBG(fmt, args...) \
+ pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args)
+#define IPA_MHI_ERR(fmt, args...) \
+ pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+#define IPA_MHI_FUNC_ENTRY() \
+ IPA_MHI_DBG("ENTRY\n")
+#define IPA_MHI_FUNC_EXIT() \
+ IPA_MHI_DBG("EXIT\n")
+
+bool ipa2_mhi_sps_channel_empty(enum ipa_client_type client)
+{
+ u32 pipe_idx;
+ bool pending;
+
+ pipe_idx = ipa2_get_ep_mapping(client);
+ if (sps_pipe_pending_desc(ipa_ctx->bam_handle,
+ pipe_idx, &pending)) {
+ IPA_MHI_ERR("sps_pipe_pending_desc failed\n");
+ WARN_ON(1);
+ return false;
+ }
+
+ return !pending;
+}
+
+int ipa2_disable_sps_pipe(enum ipa_client_type client)
+{
+ int ipa_ep_index;
+ int res;
+
+ ipa_ep_index = ipa2_get_ep_mapping(client);
+
+ res = sps_pipe_disable(ipa_ctx->bam_handle, ipa_ep_index);
+ if (res) {
+ IPA_MHI_ERR("sps_pipe_disable fail %d\n", res);
+ return res;
+ }
+
+ return 0;
+}
+
+int ipa2_mhi_reset_channel_internal(enum ipa_client_type client)
+{
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ res = ipa_disable_data_path(ipa2_get_ep_mapping(client));
+ if (res) {
+ IPA_MHI_ERR("ipa_disable_data_path failed %d\n", res);
+ return res;
+ }
+ IPA_MHI_FUNC_EXIT();
+
+ return 0;
+}
+
+int ipa2_mhi_start_channel_internal(enum ipa_client_type client)
+{
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ res = ipa_enable_data_path(ipa2_get_ep_mapping(client));
+ if (res) {
+ IPA_MHI_ERR("ipa_enable_data_path failed %d\n", res);
+ return res;
+ }
+ IPA_MHI_FUNC_EXIT();
+
+ return 0;
+}
+
+int ipa2_mhi_init_engine(struct ipa_mhi_init_engine *params)
+{
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ if (!params) {
+ IPA_MHI_ERR("null args\n");
+ return -EINVAL;
+ }
+
+ if (ipa2_uc_state_check()) {
+ IPA_MHI_ERR("IPA uc is not loaded\n");
+ return -EAGAIN;
+ }
+
+ /* Initialize IPA MHI engine */
+ res = ipa_uc_mhi_init_engine(params->uC.msi, params->uC.mmio_addr,
+ params->uC.host_ctrl_addr, params->uC.host_data_addr,
+ params->uC.first_ch_idx, params->uC.first_er_idx);
+ if (res) {
+ IPA_MHI_ERR("failed to start MHI engine %d\n", res);
+ goto fail_init_engine;
+ }
+
+ /* Update UL/DL sync if valid */
+ res = ipa2_uc_mhi_send_dl_ul_sync_info(
+ params->uC.ipa_cached_dl_ul_sync_info);
+ if (res) {
+ IPA_MHI_ERR("failed to update ul/dl sync %d\n", res);
+ goto fail_init_engine;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+
+fail_init_engine:
+ return res;
+}
+
+/**
+ * ipa2_connect_mhi_pipe() - Connect pipe to IPA and start corresponding
+ * MHI channel
+ * @in: connect parameters
+ * @clnt_hdl: [out] client handle for this pipe
+ *
+ * This function is called by IPA MHI client driver on MHI channel start.
+ * This function is called after MHI engine was started.
+ * This function is doing the following:
+ * - Send command to uC to start corresponding MHI channel
+ * - Configure IPA EP control
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa2_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
+ u32 *clnt_hdl)
+{
+ struct ipa_ep_context *ep;
+ int ipa_ep_idx;
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ if (!in || !clnt_hdl) {
+ IPA_MHI_ERR("NULL args\n");
+ return -EINVAL;
+ }
+
+ if (in->sys->client >= IPA_CLIENT_MAX) {
+ IPA_MHI_ERR("bad parm client:%d\n", in->sys->client);
+ return -EINVAL;
+ }
+
+ ipa_ep_idx = ipa2_get_ep_mapping(in->sys->client);
+ if (ipa_ep_idx == -1) {
+ IPA_MHI_ERR("Invalid client.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa_ctx->ep[ipa_ep_idx];
+
+ IPA_MHI_DBG("client %d channelHandle %d channelIndex %d\n",
+ in->sys->client, in->start.uC.index, in->start.uC.id);
+
+ if (ep->valid == 1) {
+ IPA_MHI_ERR("EP already allocated.\n");
+ goto fail_ep_exists;
+ }
+
+ memset(ep, 0, offsetof(struct ipa_ep_context, sys));
+ ep->valid = 1;
+ ep->skip_ep_cfg = in->sys->skip_ep_cfg;
+ ep->client = in->sys->client;
+ ep->client_notify = in->sys->notify;
+ ep->priv = in->sys->priv;
+ ep->keep_ipa_awake = in->sys->keep_ipa_awake;
+
+ /* start channel in uC */
+ if (in->start.uC.state == IPA_HW_MHI_CHANNEL_STATE_INVALID) {
+ IPA_MHI_DBG("Initializing channel\n");
+ res = ipa_uc_mhi_init_channel(ipa_ep_idx, in->start.uC.index,
+ in->start.uC.id,
+ (IPA_CLIENT_IS_PROD(ep->client) ? 1 : 2));
+ if (res) {
+ IPA_MHI_ERR("init_channel failed %d\n", res);
+ goto fail_init_channel;
+ }
+ } else if (in->start.uC.state == IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
+ IPA_MHI_DBG("Starting channel\n");
+ res = ipa_uc_mhi_resume_channel(in->start.uC.index, false);
+ if (res) {
+ IPA_MHI_ERR("init_channel failed %d\n", res);
+ goto fail_init_channel;
+ }
+ } else {
+ IPA_MHI_ERR("Invalid channel state %d\n", in->start.uC.state);
+ goto fail_init_channel;
+ }
+
+ res = ipa_enable_data_path(ipa_ep_idx);
+ if (res) {
+ IPA_MHI_ERR("enable data path failed res=%d clnt=%d.\n", res,
+ ipa_ep_idx);
+ goto fail_enable_dp;
+ }
+
+ if (!ep->skip_ep_cfg) {
+ if (ipa2_cfg_ep(ipa_ep_idx, &in->sys->ipa_ep_cfg)) {
+ IPAERR("fail to configure EP.\n");
+ goto fail_ep_cfg;
+ }
+ if (ipa2_cfg_ep_status(ipa_ep_idx, &ep->status)) {
+ IPAERR("fail to configure status of EP.\n");
+ goto fail_ep_cfg;
+ }
+ IPA_MHI_DBG("ep configuration successful\n");
+ } else {
+ IPA_MHI_DBG("skipping ep configuration\n");
+ }
+
+ *clnt_hdl = ipa_ep_idx;
+
+ if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys->client))
+ ipa_install_dflt_flt_rules(ipa_ep_idx);
+
+ ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+ IPA_MHI_DBG("client %d (ep: %d) connected\n", in->sys->client,
+ ipa_ep_idx);
+
+ IPA_MHI_FUNC_EXIT();
+
+ return 0;
+
+fail_ep_cfg:
+ ipa_disable_data_path(ipa_ep_idx);
+fail_enable_dp:
+ ipa_uc_mhi_reset_channel(in->start.uC.index);
+fail_init_channel:
+ memset(ep, 0, offsetof(struct ipa_ep_context, sys));
+fail_ep_exists:
+ return -EPERM;
+}
+
+/**
+ * ipa2_disconnect_mhi_pipe() - Disconnect pipe from IPA and reset corresponding
+ * MHI channel
+ * @in: connect parameters
+ * @clnt_hdl: [out] client handle for this pipe
+ *
+ * This function is called by IPA MHI client driver on MHI channel reset.
+ * This function is called after MHI channel was started.
+ * This function is doing the following:
+ * - Send command to uC to reset corresponding MHI channel
+ * - Configure IPA EP control
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa2_disconnect_mhi_pipe(u32 clnt_hdl)
+{
+ IPA_MHI_FUNC_ENTRY();
+
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes) {
+ IPAERR("invalid handle %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ if (ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("pipe was not connected %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ ipa_ctx->ep[clnt_hdl].valid = 0;
+
+ ipa_delete_dflt_flt_rules(clnt_hdl);
+
+ IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+int ipa2_mhi_resume_channels_internal(enum ipa_client_type client,
+ bool LPTransitionRejected, bool brstmode_enabled,
+ union __packed gsi_channel_scratch ch_scratch, u8 index)
+{
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+ res = ipa_uc_mhi_resume_channel(index, LPTransitionRejected);
+ if (res) {
+ IPA_MHI_ERR("failed to suspend channel %u error %d\n",
+ index, res);
+ return res;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA MHI driver");
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
new file mode 100644
index 0000000..9b97f57
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
@@ -0,0 +1,769 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include "ipa_i.h"
+
+#define IPA_NAT_PHYS_MEM_OFFSET 0
+#define IPA_NAT_PHYS_MEM_SIZE IPA_RAM_NAT_SIZE
+
+#define IPA_NAT_SYSTEM_MEMORY 0
+#define IPA_NAT_SHARED_MEMORY 1
+#define IPA_NAT_TEMP_MEM_SIZE 128
+
+static int ipa_nat_vma_fault_remap(
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ IPADBG("\n");
+ vmf->page = NULL;
+
+ return VM_FAULT_SIGBUS;
+}
+
+/* VMA related file operations functions */
+static struct vm_operations_struct ipa_nat_remap_vm_ops = {
+ .fault = ipa_nat_vma_fault_remap,
+};
+
+static int ipa_nat_open(struct inode *inode, struct file *filp)
+{
+ struct ipa_nat_mem *nat_ctx;
+
+ IPADBG("\n");
+ nat_ctx = container_of(inode->i_cdev, struct ipa_nat_mem, cdev);
+ filp->private_data = nat_ctx;
+ IPADBG("return\n");
+
+ return 0;
+}
+
+static int ipa_nat_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+ struct ipa_nat_mem *nat_ctx = (struct ipa_nat_mem *)filp->private_data;
+ unsigned long phys_addr;
+ int result;
+
+ mutex_lock(&nat_ctx->lock);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (nat_ctx->is_sys_mem) {
+ IPADBG("Mapping system memory\n");
+ if (nat_ctx->is_mapped) {
+ IPAERR("mapping already exists, only 1 supported\n");
+ result = -EINVAL;
+ goto bail;
+ }
+ IPADBG("map sz=0x%zx\n", nat_ctx->size);
+ result =
+ dma_mmap_coherent(
+ ipa_ctx->pdev, vma,
+ nat_ctx->vaddr, nat_ctx->dma_handle,
+ nat_ctx->size);
+
+ if (result) {
+ IPAERR("unable to map memory. Err:%d\n", result);
+ goto bail;
+ }
+ ipa_ctx->nat_mem.nat_base_address = nat_ctx->vaddr;
+ } else {
+ IPADBG("Mapping shared(local) memory\n");
+ IPADBG("map sz=0x%lx\n", vsize);
+
+ if ((IPA_NAT_PHYS_MEM_SIZE == 0) ||
+ (vsize > IPA_NAT_PHYS_MEM_SIZE)) {
+ result = -EINVAL;
+ goto bail;
+ }
+ phys_addr = ipa_ctx->ipa_wrapper_base +
+ ipa_ctx->ctrl->ipa_reg_base_ofst +
+ IPA_SRAM_DIRECT_ACCESS_N_OFST(IPA_NAT_PHYS_MEM_OFFSET);
+
+ if (remap_pfn_range(
+ vma, vma->vm_start,
+ phys_addr >> PAGE_SHIFT, vsize, vma->vm_page_prot)) {
+ IPAERR("remap failed\n");
+ result = -EAGAIN;
+ goto bail;
+ }
+ ipa_ctx->nat_mem.nat_base_address = (void *)vma->vm_start;
+ }
+ nat_ctx->is_mapped = true;
+ vma->vm_ops = &ipa_nat_remap_vm_ops;
+ IPADBG("return\n");
+ result = 0;
+bail:
+ mutex_unlock(&nat_ctx->lock);
+ return result;
+}
+
+static const struct file_operations ipa_nat_fops = {
+ .owner = THIS_MODULE,
+ .open = ipa_nat_open,
+ .mmap = ipa_nat_mmap
+};
+
+/**
+ * allocate_temp_nat_memory() - Allocates temp nat memory
+ *
+ * Called during nat table delete
+ */
+void allocate_temp_nat_memory(void)
+{
+ struct ipa_nat_mem *nat_ctx = &(ipa_ctx->nat_mem);
+ int gfp_flags = GFP_KERNEL | __GFP_ZERO;
+
+ nat_ctx->tmp_vaddr =
+ dma_alloc_coherent(ipa_ctx->pdev, IPA_NAT_TEMP_MEM_SIZE,
+ &nat_ctx->tmp_dma_handle, gfp_flags);
+
+ if (nat_ctx->tmp_vaddr == NULL) {
+ IPAERR("Temp Memory alloc failed\n");
+ nat_ctx->is_tmp_mem = false;
+ return;
+ }
+
+ nat_ctx->is_tmp_mem = true;
+ IPADBG("IPA NAT allocated temp memory successfully\n");
+}
+
+/**
+ * create_nat_device() - Create the NAT device
+ *
+ * Called during ipa init to create nat device
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int create_nat_device(void)
+{
+ struct ipa_nat_mem *nat_ctx = &(ipa_ctx->nat_mem);
+ int result;
+
+ IPADBG("\n");
+
+ mutex_lock(&nat_ctx->lock);
+ nat_ctx->class = class_create(THIS_MODULE, NAT_DEV_NAME);
+ if (IS_ERR(nat_ctx->class)) {
+ IPAERR("unable to create the class\n");
+ result = -ENODEV;
+ goto vaddr_alloc_fail;
+ }
+ result = alloc_chrdev_region(&nat_ctx->dev_num,
+ 0,
+ 1,
+ NAT_DEV_NAME);
+ if (result) {
+ IPAERR("alloc_chrdev_region err.\n");
+ result = -ENODEV;
+ goto alloc_chrdev_region_fail;
+ }
+
+ nat_ctx->dev =
+ device_create(nat_ctx->class, NULL, nat_ctx->dev_num, nat_ctx,
+ "%s", NAT_DEV_NAME);
+
+ if (IS_ERR(nat_ctx->dev)) {
+ IPAERR("device_create err:%ld\n", PTR_ERR(nat_ctx->dev));
+ result = -ENODEV;
+ goto device_create_fail;
+ }
+
+ cdev_init(&nat_ctx->cdev, &ipa_nat_fops);
+ nat_ctx->cdev.owner = THIS_MODULE;
+ nat_ctx->cdev.ops = &ipa_nat_fops;
+
+ result = cdev_add(&nat_ctx->cdev, nat_ctx->dev_num, 1);
+ if (result) {
+ IPAERR("cdev_add err=%d\n", -result);
+ goto cdev_add_fail;
+ }
+ IPADBG("ipa nat dev added successful. major:%d minor:%d\n",
+ MAJOR(nat_ctx->dev_num),
+ MINOR(nat_ctx->dev_num));
+
+ nat_ctx->is_dev = true;
+ allocate_temp_nat_memory();
+ IPADBG("IPA NAT device created successfully\n");
+ result = 0;
+ goto bail;
+
+cdev_add_fail:
+ device_destroy(nat_ctx->class, nat_ctx->dev_num);
+device_create_fail:
+ unregister_chrdev_region(nat_ctx->dev_num, 1);
+alloc_chrdev_region_fail:
+ class_destroy(nat_ctx->class);
+vaddr_alloc_fail:
+ if (nat_ctx->vaddr) {
+ IPADBG("Releasing system memory\n");
+ dma_free_coherent(
+ ipa_ctx->pdev, nat_ctx->size,
+ nat_ctx->vaddr, nat_ctx->dma_handle);
+ nat_ctx->vaddr = NULL;
+ nat_ctx->dma_handle = 0;
+ nat_ctx->size = 0;
+ }
+
+bail:
+ mutex_unlock(&nat_ctx->lock);
+
+ return result;
+}
+
+/**
+ * ipa2_allocate_nat_device() - Allocates memory for the NAT device
+ * @mem: [in/out] memory parameters
+ *
+ * Called by NAT client driver to allocate memory for the NAT entries. Based on
+ * the request size either shared or system memory will be used.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa2_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
+{
+ struct ipa_nat_mem *nat_ctx = &(ipa_ctx->nat_mem);
+ int gfp_flags = GFP_KERNEL | __GFP_ZERO;
+ int result;
+
+ IPADBG("passed memory size %zu\n", mem->size);
+
+ mutex_lock(&nat_ctx->lock);
+ if (strcmp(mem->dev_name, NAT_DEV_NAME)) {
+ IPAERR("Nat device name mismatch\n");
+ IPAERR("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
+ result = -EPERM;
+ goto bail;
+ }
+
+ if (nat_ctx->is_dev != true) {
+ IPAERR("Nat device not created successfully during boot up\n");
+ result = -EPERM;
+ goto bail;
+ }
+
+ if (nat_ctx->is_dev_init == true) {
+ IPAERR("Device already init\n");
+ result = 0;
+ goto bail;
+ }
+
+ if (mem->size <= 0 ||
+ nat_ctx->is_dev_init == true) {
+ IPAERR("Invalid Parameters or device is already init\n");
+ result = -EPERM;
+ goto bail;
+ }
+
+ if (mem->size > IPA_NAT_PHYS_MEM_SIZE) {
+ IPADBG("Allocating system memory\n");
+ nat_ctx->is_sys_mem = true;
+ nat_ctx->vaddr =
+ dma_alloc_coherent(ipa_ctx->pdev, mem->size,
+ &nat_ctx->dma_handle, gfp_flags);
+ if (nat_ctx->vaddr == NULL) {
+ IPAERR("memory alloc failed\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+ nat_ctx->size = mem->size;
+ } else {
+ IPADBG("using shared(local) memory\n");
+ nat_ctx->is_sys_mem = false;
+ }
+
+ nat_ctx->is_dev_init = true;
+ IPADBG("IPA NAT dev init successfully\n");
+ result = 0;
+
+bail:
+ mutex_unlock(&nat_ctx->lock);
+
+ return result;
+}
+
+/* IOCTL function handlers */
+/**
+ * ipa2_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW
+ * @init: [in] initialization command attributes
+ *
+ * Called by NAT client driver to post IP_V4_NAT_INIT command to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
+{
+#define TBL_ENTRY_SIZE 32
+#define INDX_TBL_ENTRY_SIZE 4
+
+ struct ipa_register_write *reg_write_nop;
+ struct ipa_desc desc[2];
+ struct ipa_ip_v4_nat_init *cmd;
+ u16 size = sizeof(struct ipa_ip_v4_nat_init);
+ int result;
+ u32 offset = 0;
+ size_t tmp;
+
+ IPADBG("\n");
+ if (init->table_entries == 0) {
+ IPADBG("Table entries is zero\n");
+ return -EPERM;
+ }
+
+ /* check for integer overflow */
+ if (init->ipv4_rules_offset >
+ UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1))) {
+ IPAERR("Detected overflow\n");
+ return -EPERM;
+ }
+ /* Check Table Entry offset is not
+ * beyond allocated size
+ */
+ tmp = init->ipv4_rules_offset +
+ (TBL_ENTRY_SIZE * (init->table_entries + 1));
+ if (tmp > ipa_ctx->nat_mem.size) {
+ IPAERR("Table rules offset not valid\n");
+ IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ init->ipv4_rules_offset, (init->table_entries + 1),
+ tmp, ipa_ctx->nat_mem.size);
+ return -EPERM;
+ }
+
+ /* check for integer overflow */
+ if (init->expn_rules_offset >
+ UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries)) {
+ IPAERR("Detected overflow\n");
+ return -EPERM;
+ }
+ /* Check Expn Table Entry offset is not
+ * beyond allocated size
+ */
+ tmp = init->expn_rules_offset +
+ (TBL_ENTRY_SIZE * init->expn_table_entries);
+ if (tmp > ipa_ctx->nat_mem.size) {
+ IPAERR("Expn Table rules offset not valid\n");
+ IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ init->expn_rules_offset, init->expn_table_entries,
+ tmp, ipa_ctx->nat_mem.size);
+ return -EPERM;
+ }
+
+ /* check for integer overflow */
+ if (init->index_offset >
+ UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) {
+ IPAERR("Detected overflow\n");
+ return -EPERM;
+ }
+ /* Check Indx Table Entry offset is not
+ * beyond allocated size
+ */
+ tmp = init->index_offset +
+ (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1));
+ if (tmp > ipa_ctx->nat_mem.size) {
+ IPAERR("Indx Table rules offset not valid\n");
+ IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ init->index_offset, (init->table_entries + 1),
+ tmp, ipa_ctx->nat_mem.size);
+ return -EPERM;
+ }
+
+ /* check for integer overflow */
+ if (init->index_expn_offset >
+ UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries)) {
+ IPAERR("Detected overflow\n");
+ return -EPERM;
+ }
+ /* Check Expn Table entry offset is not
+ * beyond allocated size
+ */
+ tmp = init->index_expn_offset +
+ (INDX_TBL_ENTRY_SIZE * init->expn_table_entries);
+ if (tmp > ipa_ctx->nat_mem.size) {
+ IPAERR("Indx Expn Table rules offset not valid\n");
+ IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ init->index_expn_offset, init->expn_table_entries,
+ tmp, ipa_ctx->nat_mem.size);
+ return -EPERM;
+ }
+
+ memset(&desc, 0, sizeof(desc));
+ /* NO-OP IC for ensuring that IPA pipeline is empty */
+ reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
+ if (!reg_write_nop) {
+ IPAERR("no mem\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+
+ reg_write_nop->skip_pipeline_clear = 0;
+ reg_write_nop->value_mask = 0x0;
+
+ desc[0].opcode = IPA_REGISTER_WRITE;
+ desc[0].type = IPA_IMM_CMD_DESC;
+ desc[0].callback = NULL;
+ desc[0].user1 = NULL;
+ desc[0].user2 = 0;
+ desc[0].pyld = (void *)reg_write_nop;
+ desc[0].len = sizeof(*reg_write_nop);
+
+ cmd = kmalloc(size, GFP_KERNEL);
+ if (!cmd) {
+ IPAERR("Failed to alloc immediate command object\n");
+ result = -ENOMEM;
+ goto free_nop;
+ }
+ if (ipa_ctx->nat_mem.vaddr) {
+ IPADBG("using system memory for nat table\n");
+ cmd->ipv4_rules_addr_type = IPA_NAT_SYSTEM_MEMORY;
+ cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SYSTEM_MEMORY;
+ cmd->index_table_addr_type = IPA_NAT_SYSTEM_MEMORY;
+ cmd->index_table_expansion_addr_type = IPA_NAT_SYSTEM_MEMORY;
+
+ offset = UINT_MAX - ipa_ctx->nat_mem.dma_handle;
+
+ if ((init->ipv4_rules_offset > offset) ||
+ (init->expn_rules_offset > offset) ||
+ (init->index_offset > offset) ||
+ (init->index_expn_offset > offset)) {
+ IPAERR("Failed due to integer overflow\n");
+ IPAERR("nat.mem.dma_handle: 0x%pa\n",
+ &ipa_ctx->nat_mem.dma_handle);
+ IPAERR("ipv4_rules_offset: 0x%x\n",
+ init->ipv4_rules_offset);
+ IPAERR("expn_rules_offset: 0x%x\n",
+ init->expn_rules_offset);
+ IPAERR("index_offset: 0x%x\n",
+ init->index_offset);
+ IPAERR("index_expn_offset: 0x%x\n",
+ init->index_expn_offset);
+ result = -EPERM;
+ goto free_mem;
+ }
+ cmd->ipv4_rules_addr =
+ ipa_ctx->nat_mem.dma_handle + init->ipv4_rules_offset;
+ IPADBG("ipv4_rules_offset:0x%x\n", init->ipv4_rules_offset);
+
+ cmd->ipv4_expansion_rules_addr =
+ ipa_ctx->nat_mem.dma_handle + init->expn_rules_offset;
+ IPADBG("expn_rules_offset:0x%x\n", init->expn_rules_offset);
+
+ cmd->index_table_addr =
+ ipa_ctx->nat_mem.dma_handle + init->index_offset;
+ IPADBG("index_offset:0x%x\n", init->index_offset);
+
+ cmd->index_table_expansion_addr =
+ ipa_ctx->nat_mem.dma_handle + init->index_expn_offset;
+ IPADBG("index_expn_offset:0x%x\n", init->index_expn_offset);
+ } else {
+ IPADBG("using shared(local) memory for nat table\n");
+ cmd->ipv4_rules_addr_type = IPA_NAT_SHARED_MEMORY;
+ cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SHARED_MEMORY;
+ cmd->index_table_addr_type = IPA_NAT_SHARED_MEMORY;
+ cmd->index_table_expansion_addr_type = IPA_NAT_SHARED_MEMORY;
+
+ cmd->ipv4_rules_addr = init->ipv4_rules_offset +
+ IPA_RAM_NAT_OFST;
+
+ cmd->ipv4_expansion_rules_addr = init->expn_rules_offset +
+ IPA_RAM_NAT_OFST;
+
+ cmd->index_table_addr = init->index_offset +
+ IPA_RAM_NAT_OFST;
+
+ cmd->index_table_expansion_addr = init->index_expn_offset +
+ IPA_RAM_NAT_OFST;
+ }
+ cmd->table_index = init->tbl_index;
+ IPADBG("Table index:0x%x\n", cmd->table_index);
+ cmd->size_base_tables = init->table_entries;
+ IPADBG("Base Table size:0x%x\n", cmd->size_base_tables);
+ cmd->size_expansion_tables = init->expn_table_entries;
+ IPADBG("Expansion Table size:0x%x\n", cmd->size_expansion_tables);
+ cmd->public_ip_addr = init->ip_addr;
+ IPADBG("Public ip address:0x%x\n", cmd->public_ip_addr);
+ desc[1].opcode = IPA_IP_V4_NAT_INIT;
+ desc[1].type = IPA_IMM_CMD_DESC;
+ desc[1].callback = NULL;
+ desc[1].user1 = NULL;
+ desc[1].user2 = 0;
+ desc[1].pyld = (void *)cmd;
+ desc[1].len = size;
+ IPADBG("posting v4 init command\n");
+ if (ipa_send_cmd(2, desc)) {
+ IPAERR("Fail to send immediate command\n");
+ result = -EPERM;
+ goto free_mem;
+ }
+
+ ipa_ctx->nat_mem.public_ip_addr = init->ip_addr;
+ IPADBG("Table ip address:0x%x", ipa_ctx->nat_mem.public_ip_addr);
+
+ ipa_ctx->nat_mem.ipv4_rules_addr =
+ (char *)ipa_ctx->nat_mem.nat_base_address + init->ipv4_rules_offset;
+ IPADBG("ipv4_rules_addr: 0x%p\n",
+ ipa_ctx->nat_mem.ipv4_rules_addr);
+
+ ipa_ctx->nat_mem.ipv4_expansion_rules_addr =
+ (char *)ipa_ctx->nat_mem.nat_base_address + init->expn_rules_offset;
+ IPADBG("ipv4_expansion_rules_addr: 0x%p\n",
+ ipa_ctx->nat_mem.ipv4_expansion_rules_addr);
+
+ ipa_ctx->nat_mem.index_table_addr =
+ (char *)ipa_ctx->nat_mem.nat_base_address + init->index_offset;
+ IPADBG("index_table_addr: 0x%p\n",
+ ipa_ctx->nat_mem.index_table_addr);
+
+ ipa_ctx->nat_mem.index_table_expansion_addr =
+ (char *)ipa_ctx->nat_mem.nat_base_address + init->index_expn_offset;
+ IPADBG("index_table_expansion_addr: 0x%p\n",
+ ipa_ctx->nat_mem.index_table_expansion_addr);
+
+ IPADBG("size_base_tables: %d\n", init->table_entries);
+ ipa_ctx->nat_mem.size_base_tables = init->table_entries;
+
+ IPADBG("size_expansion_tables: %d\n", init->expn_table_entries);
+ ipa_ctx->nat_mem.size_expansion_tables = init->expn_table_entries;
+
+ IPADBG("return\n");
+ result = 0;
+free_mem:
+ kfree(cmd);
+free_nop:
+ kfree(reg_write_nop);
+bail:
+ return result;
+}
+
+/**
+ * ipa2_nat_dma_cmd() - Post NAT_DMA command to IPA HW
+ * @dma: [in] initialization command attributes
+ *
+ * Called by NAT client driver to post NAT_DMA command to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+#define NUM_OF_DESC 2
+
+ struct ipa_register_write *reg_write_nop = NULL;
+ struct ipa_nat_dma *cmd = NULL;
+ struct ipa_desc *desc = NULL;
+ u16 size = 0, cnt = 0;
+ int ret = 0;
+
+ IPADBG("\n");
+ if (dma->entries <= 0) {
+ IPAERR("Invalid number of commands %d\n",
+ dma->entries);
+ ret = -EPERM;
+ goto bail;
+ }
+
+ size = sizeof(struct ipa_desc) * NUM_OF_DESC;
+ desc = kzalloc(size, GFP_KERNEL);
+ if (desc == NULL) {
+ IPAERR("Failed to alloc memory\n");
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ size = sizeof(struct ipa_nat_dma);
+ cmd = kzalloc(size, GFP_KERNEL);
+ if (cmd == NULL) {
+ IPAERR("Failed to alloc memory\n");
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ /* NO-OP IC for ensuring that IPA pipeline is empty */
+ reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
+ if (!reg_write_nop) {
+ IPAERR("Failed to alloc memory\n");
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ reg_write_nop->skip_pipeline_clear = 0;
+ reg_write_nop->value_mask = 0x0;
+
+ desc[0].type = IPA_IMM_CMD_DESC;
+ desc[0].opcode = IPA_REGISTER_WRITE;
+ desc[0].callback = NULL;
+ desc[0].user1 = NULL;
+ desc[0].user2 = 0;
+ desc[0].len = sizeof(*reg_write_nop);
+ desc[0].pyld = (void *)reg_write_nop;
+
+ for (cnt = 0; cnt < dma->entries; cnt++) {
+ cmd->table_index = dma->dma[cnt].table_index;
+ cmd->base_addr = dma->dma[cnt].base_addr;
+ cmd->offset = dma->dma[cnt].offset;
+ cmd->data = dma->dma[cnt].data;
+
+ desc[1].type = IPA_IMM_CMD_DESC;
+ desc[1].opcode = IPA_NAT_DMA;
+ desc[1].callback = NULL;
+ desc[1].user1 = NULL;
+ desc[1].user2 = 0;
+ desc[1].len = sizeof(struct ipa_nat_dma);
+ desc[1].pyld = (void *)cmd;
+
+ ret = ipa_send_cmd(NUM_OF_DESC, desc);
+ if (ret == -EPERM)
+ IPAERR("Fail to send immediate command %d\n", cnt);
+ }
+
+bail:
+ if (cmd != NULL)
+ kfree(cmd);
+
+ if (desc != NULL)
+ kfree(desc);
+
+ if (reg_write_nop != NULL)
+ kfree(reg_write_nop);
+
+ return ret;
+}
+
+/**
+ * ipa_nat_free_mem_and_device() - free the NAT memory and remove the device
+ * @nat_ctx: [in] the IPA NAT memory to free
+ *
+ * Called by NAT client driver to free the NAT memory and remove the device
+ */
+void ipa_nat_free_mem_and_device(struct ipa_nat_mem *nat_ctx)
+{
+ IPADBG("\n");
+ mutex_lock(&nat_ctx->lock);
+
+ if (nat_ctx->is_sys_mem) {
+ IPADBG("freeing the dma memory\n");
+ dma_free_coherent(
+ ipa_ctx->pdev, nat_ctx->size,
+ nat_ctx->vaddr, nat_ctx->dma_handle);
+ nat_ctx->size = 0;
+ nat_ctx->vaddr = NULL;
+ }
+ nat_ctx->is_mapped = false;
+ nat_ctx->is_sys_mem = false;
+ nat_ctx->is_dev_init = false;
+
+ mutex_unlock(&nat_ctx->lock);
+ IPADBG("return\n");
+}
+
+/**
+ * ipa2_nat_del_cmd() - Delete a NAT table
+ * @del: [in] delete table table table parameters
+ *
+ * Called by NAT client driver to delete the nat table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa2_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
+{
+ struct ipa_register_write *reg_write_nop;
+ struct ipa_desc desc[2];
+ struct ipa_ip_v4_nat_init *cmd;
+ u16 size = sizeof(struct ipa_ip_v4_nat_init);
+ u8 mem_type = IPA_NAT_SHARED_MEMORY;
+ u32 base_addr = IPA_NAT_PHYS_MEM_OFFSET;
+ int result;
+
+ IPADBG("\n");
+ if (ipa_ctx->nat_mem.is_tmp_mem) {
+ IPAERR("using temp memory during nat del\n");
+ mem_type = IPA_NAT_SYSTEM_MEMORY;
+ base_addr = ipa_ctx->nat_mem.tmp_dma_handle;
+ }
+
+ if (del->public_ip_addr == 0) {
+ IPADBG("Bad Parameter\n");
+ result = -EPERM;
+ goto bail;
+ }
+
+ memset(&desc, 0, sizeof(desc));
+ /* NO-OP IC for ensuring that IPA pipeline is empty */
+ reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
+ if (!reg_write_nop) {
+ IPAERR("no mem\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+
+ reg_write_nop->skip_pipeline_clear = 0;
+ reg_write_nop->value_mask = 0x0;
+
+ desc[0].opcode = IPA_REGISTER_WRITE;
+ desc[0].type = IPA_IMM_CMD_DESC;
+ desc[0].callback = NULL;
+ desc[0].user1 = NULL;
+ desc[0].user2 = 0;
+ desc[0].pyld = (void *)reg_write_nop;
+ desc[0].len = sizeof(*reg_write_nop);
+
+ cmd = kmalloc(size, GFP_KERNEL);
+ if (cmd == NULL) {
+ IPAERR("Failed to alloc immediate command object\n");
+ result = -ENOMEM;
+ goto free_nop;
+ }
+ cmd->table_index = del->table_index;
+ cmd->ipv4_rules_addr = base_addr;
+ cmd->ipv4_rules_addr_type = mem_type;
+ cmd->ipv4_expansion_rules_addr = base_addr;
+ cmd->ipv4_expansion_rules_addr_type = mem_type;
+ cmd->index_table_addr = base_addr;
+ cmd->index_table_addr_type = mem_type;
+ cmd->index_table_expansion_addr = base_addr;
+ cmd->index_table_expansion_addr_type = mem_type;
+ cmd->size_base_tables = 0;
+ cmd->size_expansion_tables = 0;
+ cmd->public_ip_addr = 0;
+
+ desc[1].opcode = IPA_IP_V4_NAT_INIT;
+ desc[1].type = IPA_IMM_CMD_DESC;
+ desc[1].callback = NULL;
+ desc[1].user1 = NULL;
+ desc[1].user2 = 0;
+ desc[1].pyld = (void *)cmd;
+ desc[1].len = size;
+ if (ipa_send_cmd(2, desc)) {
+ IPAERR("Fail to send immediate command\n");
+ result = -EPERM;
+ goto free_mem;
+ }
+
+ ipa_ctx->nat_mem.size_base_tables = 0;
+ ipa_ctx->nat_mem.size_expansion_tables = 0;
+ ipa_ctx->nat_mem.public_ip_addr = 0;
+ ipa_ctx->nat_mem.ipv4_rules_addr = 0;
+ ipa_ctx->nat_mem.ipv4_expansion_rules_addr = 0;
+ ipa_ctx->nat_mem.index_table_addr = 0;
+ ipa_ctx->nat_mem.index_table_expansion_addr = 0;
+
+ ipa_nat_free_mem_and_device(&ipa_ctx->nat_mem);
+ IPADBG("return\n");
+ result = 0;
+free_mem:
+ kfree(cmd);
+free_nop:
+ kfree(reg_write_nop);
+bail:
+ return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
new file mode 100644
index 0000000..68cd7d5
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
@@ -0,0 +1,1206 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/qmi_encdec.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <linux/ipa.h>
+#include <linux/vmalloc.h>
+
+#include "ipa_qmi_service.h"
+#include "ipa_ram_mmap.h"
+#include "../ipa_common_i.h"
+
+#define IPA_Q6_SVC_VERS 1
+#define IPA_A5_SVC_VERS 1
+#define Q6_QMI_COMPLETION_TIMEOUT (60*HZ)
+
+#define IPA_A5_SERVICE_SVC_ID 0x31
+#define IPA_A5_SERVICE_INS_ID 1
+#define IPA_Q6_SERVICE_SVC_ID 0x31
+#define IPA_Q6_SERVICE_INS_ID 2
+
+#define QMI_SEND_STATS_REQ_TIMEOUT_MS 5000
+#define QMI_SEND_REQ_TIMEOUT_MS 60000
+
+static struct qmi_handle *ipa_svc_handle;
+static void ipa_a5_svc_recv_msg(struct work_struct *work);
+static DECLARE_DELAYED_WORK(work_recv_msg, ipa_a5_svc_recv_msg);
+static struct workqueue_struct *ipa_svc_workqueue;
+static struct workqueue_struct *ipa_clnt_req_workqueue;
+static struct workqueue_struct *ipa_clnt_resp_workqueue;
+static void *curr_conn;
+static bool qmi_modem_init_fin, qmi_indication_fin;
+static uint32_t ipa_wan_platform;
+struct ipa_qmi_context *ipa_qmi_ctx;
+static bool first_time_handshake;
+static atomic_t workqueues_stopped;
+static atomic_t ipa_qmi_initialized;
+struct mutex ipa_qmi_lock;
+
+/* QMI A5 service */
+
+static struct msg_desc ipa_indication_reg_req_desc = {
+ .max_msg_len = QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01,
+ .msg_id = QMI_IPA_INDICATION_REGISTER_REQ_V01,
+ .ei_array = ipa_indication_reg_req_msg_data_v01_ei,
+};
+static struct msg_desc ipa_indication_reg_resp_desc = {
+ .max_msg_len = QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01,
+ .msg_id = QMI_IPA_INDICATION_REGISTER_RESP_V01,
+ .ei_array = ipa_indication_reg_resp_msg_data_v01_ei,
+};
+static struct msg_desc ipa_master_driver_complete_indication_desc = {
+ .max_msg_len = QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01,
+ .msg_id = QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01,
+ .ei_array = ipa_master_driver_init_complt_ind_msg_data_v01_ei,
+};
+static struct msg_desc ipa_install_fltr_rule_req_desc = {
+ .max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01,
+ .msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01,
+ .ei_array = ipa_install_fltr_rule_req_msg_data_v01_ei,
+};
+static struct msg_desc ipa_install_fltr_rule_resp_desc = {
+ .max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01,
+ .msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01,
+ .ei_array = ipa_install_fltr_rule_resp_msg_data_v01_ei,
+};
+static struct msg_desc ipa_filter_installed_notif_req_desc = {
+ .max_msg_len = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01,
+ .msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01,
+ .ei_array = ipa_fltr_installed_notif_req_msg_data_v01_ei,
+};
+static struct msg_desc ipa_filter_installed_notif_resp_desc = {
+ .max_msg_len = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01,
+ .msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01,
+ .ei_array = ipa_fltr_installed_notif_resp_msg_data_v01_ei,
+};
+static struct msg_desc ipa_config_req_desc = {
+ .max_msg_len = QMI_IPA_CONFIG_REQ_MAX_MSG_LEN_V01,
+ .msg_id = QMI_IPA_CONFIG_REQ_V01,
+ .ei_array = ipa_config_req_msg_data_v01_ei,
+};
+static struct msg_desc ipa_config_resp_desc = {
+ .max_msg_len = QMI_IPA_CONFIG_RESP_MAX_MSG_LEN_V01,
+ .msg_id = QMI_IPA_CONFIG_RESP_V01,
+ .ei_array = ipa_config_resp_msg_data_v01_ei,
+};
+
+static int handle_indication_req(void *req_h, void *req)
+{
+ struct ipa_indication_reg_req_msg_v01 *indication_req;
+ struct ipa_indication_reg_resp_msg_v01 resp;
+ struct ipa_master_driver_init_complt_ind_msg_v01 ind;
+ int rc;
+
+ indication_req = (struct ipa_indication_reg_req_msg_v01 *)req;
+ IPAWANDBG("Received INDICATION Request\n");
+
+ memset(&resp, 0, sizeof(struct ipa_indication_reg_resp_msg_v01));
+ resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+ rc = qmi_send_resp_from_cb(ipa_svc_handle, curr_conn, req_h,
+ &ipa_indication_reg_resp_desc, &resp, sizeof(resp));
+ qmi_indication_fin = true;
+ /* check if need sending indication to modem */
+ if (qmi_modem_init_fin) {
+ IPAWANDBG("send indication to modem (%d)\n",
+ qmi_modem_init_fin);
+ memset(&ind, 0, sizeof(struct
+ ipa_master_driver_init_complt_ind_msg_v01));
+ ind.master_driver_init_status.result =
+ IPA_QMI_RESULT_SUCCESS_V01;
+ rc = qmi_send_ind_from_cb(ipa_svc_handle, curr_conn,
+ &ipa_master_driver_complete_indication_desc,
+ &ind,
+ sizeof(ind));
+ } else {
+ IPAWANERR("not send indication\n");
+ }
+ return rc;
+}
+
+
+static int handle_install_filter_rule_req(void *req_h, void *req)
+{
+ struct ipa_install_fltr_rule_req_msg_v01 *rule_req;
+ struct ipa_install_fltr_rule_resp_msg_v01 resp;
+ uint32_t rule_hdl[MAX_NUM_Q6_RULE];
+ int rc = 0, i;
+
+ rule_req = (struct ipa_install_fltr_rule_req_msg_v01 *)req;
+ memset(rule_hdl, 0, sizeof(rule_hdl));
+ memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_msg_v01));
+ IPAWANDBG("Received install filter Request\n");
+
+ rc = copy_ul_filter_rule_to_ipa((struct
+ ipa_install_fltr_rule_req_msg_v01*)req, rule_hdl);
+ if (rc)
+ IPAWANERR("copy UL rules from modem is failed\n");
+
+ resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+ if (rule_req->filter_spec_list_valid == true) {
+ resp.filter_handle_list_valid = true;
+ if (rule_req->filter_spec_list_len > MAX_NUM_Q6_RULE) {
+ resp.filter_handle_list_len = MAX_NUM_Q6_RULE;
+ IPAWANERR("installed (%d) max Q6-UL rules ",
+ MAX_NUM_Q6_RULE);
+ IPAWANERR("but modem gives total (%u)\n",
+ rule_req->filter_spec_list_len);
+ } else {
+ resp.filter_handle_list_len =
+ rule_req->filter_spec_list_len;
+ }
+ } else {
+ resp.filter_handle_list_valid = false;
+ }
+
+ /* construct UL filter rules response to Modem*/
+ for (i = 0; i < resp.filter_handle_list_len; i++) {
+ resp.filter_handle_list[i].filter_spec_identifier =
+ rule_req->filter_spec_list[i].filter_spec_identifier;
+ resp.filter_handle_list[i].filter_handle = rule_hdl[i];
+ }
+
+ rc = qmi_send_resp_from_cb(ipa_svc_handle, curr_conn, req_h,
+ &ipa_install_fltr_rule_resp_desc, &resp, sizeof(resp));
+
+ IPAWANDBG("Replied to install filter request\n");
+ return rc;
+}
+
+static int handle_filter_installed_notify_req(void *req_h, void *req)
+{
+ struct ipa_fltr_installed_notif_resp_msg_v01 resp;
+ int rc = 0;
+
+ memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+ IPAWANDBG("Received filter_install_notify Request\n");
+ resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+
+ rc = qmi_send_resp_from_cb(ipa_svc_handle, curr_conn, req_h,
+ &ipa_filter_installed_notif_resp_desc,
+ &resp, sizeof(resp));
+
+ IPAWANDBG("Responsed filter_install_notify Request\n");
+ return rc;
+}
+
+static int handle_ipa_config_req(void *req_h, void *req)
+{
+ struct ipa_config_resp_msg_v01 resp;
+ int rc;
+
+ memset(&resp, 0, sizeof(struct ipa_config_resp_msg_v01));
+ resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+ IPAWANDBG("Received IPA CONFIG Request\n");
+ rc = ipa_mhi_handle_ipa_config_req(
+ (struct ipa_config_req_msg_v01 *)req);
+ if (rc) {
+ IPAERR("ipa_mhi_handle_ipa_config_req failed %d\n", rc);
+ resp.resp.result = IPA_QMI_RESULT_FAILURE_V01;
+ }
+ rc = qmi_send_resp_from_cb(ipa_svc_handle, curr_conn, req_h,
+ &ipa_config_resp_desc,
+ &resp, sizeof(resp));
+ IPAWANDBG("Responsed IPA CONFIG Request\n");
+ return rc;
+}
+
+static int ipa_a5_svc_connect_cb(struct qmi_handle *handle,
+ void *conn_h)
+{
+ if (ipa_svc_handle != handle || !conn_h)
+ return -EINVAL;
+
+ if (curr_conn) {
+ IPAWANERR("Service is busy\n");
+ return -ECONNREFUSED;
+ }
+ curr_conn = conn_h;
+ return 0;
+}
+
+static int ipa_a5_svc_disconnect_cb(struct qmi_handle *handle,
+ void *conn_h)
+{
+ if (ipa_svc_handle != handle || curr_conn != conn_h)
+ return -EINVAL;
+
+ curr_conn = NULL;
+ return 0;
+}
+
+static int ipa_a5_svc_req_desc_cb(unsigned int msg_id,
+ struct msg_desc **req_desc)
+{
+ int rc;
+
+ switch (msg_id) {
+ case QMI_IPA_INDICATION_REGISTER_REQ_V01:
+ *req_desc = &ipa_indication_reg_req_desc;
+ rc = sizeof(struct ipa_indication_reg_req_msg_v01);
+ break;
+
+ case QMI_IPA_INSTALL_FILTER_RULE_REQ_V01:
+ *req_desc = &ipa_install_fltr_rule_req_desc;
+ rc = sizeof(struct ipa_install_fltr_rule_req_msg_v01);
+ break;
+ case QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01:
+ *req_desc = &ipa_filter_installed_notif_req_desc;
+ rc = sizeof(struct ipa_fltr_installed_notif_req_msg_v01);
+ break;
+ case QMI_IPA_CONFIG_REQ_V01:
+ *req_desc = &ipa_config_req_desc;
+ rc = sizeof(struct ipa_config_req_msg_v01);
+ break;
+ default:
+ rc = -ENOTSUPP;
+ break;
+ }
+ return rc;
+}
+
+static int ipa_a5_svc_req_cb(struct qmi_handle *handle, void *conn_h,
+ void *req_h, unsigned int msg_id, void *req)
+{
+ int rc;
+
+ if (ipa_svc_handle != handle || curr_conn != conn_h)
+ return -EINVAL;
+
+ switch (msg_id) {
+ case QMI_IPA_INDICATION_REGISTER_REQ_V01:
+ rc = handle_indication_req(req_h, req);
+ break;
+ case QMI_IPA_INSTALL_FILTER_RULE_REQ_V01:
+ rc = handle_install_filter_rule_req(req_h, req);
+ rc = wwan_update_mux_channel_prop();
+ break;
+ case QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01:
+ rc = handle_filter_installed_notify_req(req_h, req);
+ break;
+ case QMI_IPA_CONFIG_REQ_V01:
+ rc = handle_ipa_config_req(req_h, req);
+ break;
+ default:
+ rc = -ENOTSUPP;
+ break;
+ }
+ return rc;
+}
+
+static void ipa_a5_svc_recv_msg(struct work_struct *work)
+{
+ int rc;
+
+ do {
+ IPAWANDBG("Notified about a Receive Event");
+ rc = qmi_recv_msg(ipa_svc_handle);
+ } while (rc == 0);
+ if (rc != -ENOMSG)
+ IPAWANERR("Error receiving message\n");
+}
+
+static void qmi_ipa_a5_svc_ntfy(struct qmi_handle *handle,
+ enum qmi_event_type event, void *priv)
+{
+ switch (event) {
+ case QMI_RECV_MSG:
+ if (!atomic_read(&workqueues_stopped))
+ queue_delayed_work(ipa_svc_workqueue,
+ &work_recv_msg, 0);
+ break;
+ default:
+ break;
+ }
+}
+
+static struct qmi_svc_ops_options ipa_a5_svc_ops_options = {
+ .version = 1,
+ .service_id = IPA_A5_SERVICE_SVC_ID,
+ .service_vers = IPA_A5_SVC_VERS,
+ .service_ins = IPA_A5_SERVICE_INS_ID,
+ .connect_cb = ipa_a5_svc_connect_cb,
+ .disconnect_cb = ipa_a5_svc_disconnect_cb,
+ .req_desc_cb = ipa_a5_svc_req_desc_cb,
+ .req_cb = ipa_a5_svc_req_cb,
+};
+
+
+/****************************************************/
+/* QMI A5 client ->Q6 */
+/****************************************************/
+static void ipa_q6_clnt_recv_msg(struct work_struct *work);
+static DECLARE_DELAYED_WORK(work_recv_msg_client, ipa_q6_clnt_recv_msg);
+static void ipa_q6_clnt_svc_arrive(struct work_struct *work);
+static DECLARE_DELAYED_WORK(work_svc_arrive, ipa_q6_clnt_svc_arrive);
+static void ipa_q6_clnt_svc_exit(struct work_struct *work);
+static DECLARE_DELAYED_WORK(work_svc_exit, ipa_q6_clnt_svc_exit);
+/* Test client port for IPC Router */
+static struct qmi_handle *ipa_q6_clnt;
+static int ipa_q6_clnt_reset;
+
+static int ipa_check_qmi_response(int rc,
+ int req_id,
+ enum ipa_qmi_result_type_v01 result,
+ enum ipa_qmi_error_type_v01 error,
+ char *resp_type)
+{
+ if (rc < 0) {
+ if (rc == -ETIMEDOUT && ipa_rmnet_ctx.ipa_rmnet_ssr) {
+ IPAWANERR(
+ "Timeout for qmi request id %d\n", req_id);
+ return rc;
+ }
+ if ((rc == -ENETRESET) || (rc == -ENODEV)) {
+ IPAWANERR(
+ "SSR while waiting for qmi request id %d\n", req_id);
+ return rc;
+ }
+ IPAWANERR("Error sending qmi request id %d, rc = %d\n",
+ req_id, rc);
+ return rc;
+ }
+ if (result != IPA_QMI_RESULT_SUCCESS_V01 &&
+ ipa_rmnet_ctx.ipa_rmnet_ssr) {
+ IPAWANERR(
+ "Got bad response %d from request id %d (error %d)\n",
+ req_id, result, error);
+ return result;
+ }
+ IPAWANDBG("Received %s successfully\n", resp_type);
+ return 0;
+}
+
+static int qmi_init_modem_send_sync_msg(void)
+{
+ struct ipa_init_modem_driver_req_msg_v01 req;
+ struct ipa_init_modem_driver_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+ u16 smem_restr_bytes = ipa2_get_smem_restr_bytes();
+
+ memset(&req, 0, sizeof(struct ipa_init_modem_driver_req_msg_v01));
+ memset(&resp, 0, sizeof(struct ipa_init_modem_driver_resp_msg_v01));
+
+ req.platform_type_valid = true;
+ req.platform_type = ipa_wan_platform;
+
+ req.hdr_tbl_info_valid = (IPA_MEM_PART(modem_hdr_size) != 0);
+ req.hdr_tbl_info.modem_offset_start =
+ IPA_MEM_PART(modem_hdr_ofst) + smem_restr_bytes;
+ req.hdr_tbl_info.modem_offset_end = IPA_MEM_PART(modem_hdr_ofst) +
+ smem_restr_bytes + IPA_MEM_PART(modem_hdr_size) - 1;
+
+ req.v4_route_tbl_info_valid = true;
+ req.v4_route_tbl_info.route_tbl_start_addr = IPA_MEM_PART(v4_rt_ofst) +
+ smem_restr_bytes;
+ req.v4_route_tbl_info.num_indices = IPA_MEM_PART(v4_modem_rt_index_hi);
+ req.v6_route_tbl_info_valid = true;
+
+ req.v6_route_tbl_info.route_tbl_start_addr = IPA_MEM_PART(v6_rt_ofst) +
+ smem_restr_bytes;
+ req.v6_route_tbl_info.num_indices = IPA_MEM_PART(v6_modem_rt_index_hi);
+
+ req.v4_filter_tbl_start_addr_valid = true;
+ req.v4_filter_tbl_start_addr =
+ IPA_MEM_PART(v4_flt_ofst) + smem_restr_bytes;
+
+ req.v6_filter_tbl_start_addr_valid = true;
+ req.v6_filter_tbl_start_addr =
+ IPA_MEM_PART(v6_flt_ofst) + smem_restr_bytes;
+
+ req.modem_mem_info_valid = (IPA_MEM_PART(modem_size) != 0);
+ req.modem_mem_info.block_start_addr =
+ IPA_MEM_PART(modem_ofst) + smem_restr_bytes;
+ req.modem_mem_info.size = IPA_MEM_PART(modem_size);
+
+ req.ctrl_comm_dest_end_pt_valid = true;
+ req.ctrl_comm_dest_end_pt =
+ ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+
+ req.hdr_proc_ctx_tbl_info_valid =
+ (IPA_MEM_PART(modem_hdr_proc_ctx_size) != 0);
+ req.hdr_proc_ctx_tbl_info.modem_offset_start =
+ IPA_MEM_PART(modem_hdr_proc_ctx_ofst) + smem_restr_bytes;
+ req.hdr_proc_ctx_tbl_info.modem_offset_end =
+ IPA_MEM_PART(modem_hdr_proc_ctx_ofst) +
+ IPA_MEM_PART(modem_hdr_proc_ctx_size) + smem_restr_bytes - 1;
+
+ req.zip_tbl_info_valid = (IPA_MEM_PART(modem_comp_decomp_size) != 0);
+ req.zip_tbl_info.modem_offset_start =
+ IPA_MEM_PART(modem_comp_decomp_size) + smem_restr_bytes;
+ req.zip_tbl_info.modem_offset_end =
+ IPA_MEM_PART(modem_comp_decomp_ofst) +
+ IPA_MEM_PART(modem_comp_decomp_size) + smem_restr_bytes - 1;
+
+ if (!ipa_uc_loaded_check()) { /* First time boot */
+ req.is_ssr_bootup_valid = false;
+ req.is_ssr_bootup = 0;
+ } else { /* After SSR boot */
+ req.is_ssr_bootup_valid = true;
+ req.is_ssr_bootup = 1;
+ }
+
+ IPAWANDBG("platform_type %d\n", req.platform_type);
+ IPAWANDBG("hdr_tbl_info.modem_offset_start %d\n",
+ req.hdr_tbl_info.modem_offset_start);
+ IPAWANDBG("hdr_tbl_info.modem_offset_end %d\n",
+ req.hdr_tbl_info.modem_offset_end);
+ IPAWANDBG("v4_route_tbl_info.route_tbl_start_addr %d\n",
+ req.v4_route_tbl_info.route_tbl_start_addr);
+ IPAWANDBG("v4_route_tbl_info.num_indices %d\n",
+ req.v4_route_tbl_info.num_indices);
+ IPAWANDBG("v6_route_tbl_info.route_tbl_start_addr %d\n",
+ req.v6_route_tbl_info.route_tbl_start_addr);
+ IPAWANDBG("v6_route_tbl_info.num_indices %d\n",
+ req.v6_route_tbl_info.num_indices);
+ IPAWANDBG("v4_filter_tbl_start_addr %d\n",
+ req.v4_filter_tbl_start_addr);
+ IPAWANDBG("v6_filter_tbl_start_addr %d\n",
+ req.v6_filter_tbl_start_addr);
+ IPAWANDBG("modem_mem_info.block_start_addr %d\n",
+ req.modem_mem_info.block_start_addr);
+ IPAWANDBG("modem_mem_info.size %d\n",
+ req.modem_mem_info.size);
+ IPAWANDBG("ctrl_comm_dest_end_pt %d\n",
+ req.ctrl_comm_dest_end_pt);
+ IPAWANDBG("is_ssr_bootup %d\n",
+ req.is_ssr_bootup);
+
+ req_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_REQ_V01;
+ req_desc.ei_array = ipa_init_modem_driver_req_msg_data_v01_ei;
+
+ resp_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_RESP_V01;
+ resp_desc.ei_array = ipa_init_modem_driver_resp_msg_data_v01_ei;
+
+ pr_info("Sending QMI_IPA_INIT_MODEM_DRIVER_REQ_V01\n");
+ rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
+ &resp_desc, &resp, sizeof(resp),
+ QMI_SEND_REQ_TIMEOUT_MS);
+ pr_info("QMI_IPA_INIT_MODEM_DRIVER_REQ_V01 response received\n");
+ return ipa_check_qmi_response(rc,
+ QMI_IPA_INIT_MODEM_DRIVER_REQ_V01, resp.resp.result,
+ resp.resp.error, "ipa_init_modem_driver_resp_msg_v01");
+}
+
+/* sending filter-install-request to modem*/
+int qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req)
+{
+ struct ipa_install_fltr_rule_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ /* check if the filter rules from IPACM is valid */
+ if (req->filter_spec_list_len == 0) {
+ IPAWANDBG("IPACM pass zero rules to Q6\n");
+ } else {
+ IPAWANDBG("IPACM pass %u rules to Q6\n",
+ req->filter_spec_list_len);
+ }
+
+ mutex_lock(&ipa_qmi_lock);
+ if (ipa_qmi_ctx != NULL) {
+ /* cache the qmi_filter_request */
+ memcpy(&(ipa_qmi_ctx->ipa_install_fltr_rule_req_msg_cache[
+ ipa_qmi_ctx->num_ipa_install_fltr_rule_req_msg]),
+ req,
+ sizeof(struct ipa_install_fltr_rule_req_msg_v01));
+ ipa_qmi_ctx->num_ipa_install_fltr_rule_req_msg++;
+ ipa_qmi_ctx->num_ipa_install_fltr_rule_req_msg %= 10;
+ }
+ mutex_unlock(&ipa_qmi_lock);
+
+ req_desc.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01;
+ req_desc.ei_array = ipa_install_fltr_rule_req_msg_data_v01_ei;
+
+ memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_msg_v01));
+ resp_desc.max_msg_len =
+ QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01;
+ resp_desc.ei_array = ipa_install_fltr_rule_resp_msg_data_v01_ei;
+
+ rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc,
+ req,
+ sizeof(struct ipa_install_fltr_rule_req_msg_v01),
+ &resp_desc, &resp, sizeof(resp),
+ QMI_SEND_REQ_TIMEOUT_MS);
+ return ipa_check_qmi_response(rc,
+ QMI_IPA_INSTALL_FILTER_RULE_REQ_V01, resp.resp.result,
+ resp.resp.error, "ipa_install_filter");
+}
+
+
+int qmi_enable_force_clear_datapath_send(
+ struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
+{
+ struct ipa_enable_force_clear_datapath_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int rc = 0;
+
+
+ if (!req || !req->source_pipe_bitmask) {
+ IPAWANERR("invalid params\n");
+ return -EINVAL;
+ }
+
+ req_desc.max_msg_len =
+ QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_V01;
+ req_desc.ei_array = ipa_enable_force_clear_datapath_req_msg_data_v01_ei;
+
+ memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+ resp_desc.max_msg_len =
+ QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
+ resp_desc.ei_array =
+ ipa_enable_force_clear_datapath_resp_msg_data_v01_ei;
+
+ rc = qmi_send_req_wait(ipa_q6_clnt,
+ &req_desc,
+ req,
+ sizeof(*req),
+ &resp_desc, &resp, sizeof(resp), 0);
+ if (rc < 0) {
+ IPAWANERR("send req failed %d\n", rc);
+ return rc;
+ }
+ if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) {
+ IPAWANERR("filter_notify failed %d\n",
+ resp.resp.result);
+ return resp.resp.result;
+ }
+ IPAWANDBG("SUCCESS\n");
+ return rc;
+}
+
+int qmi_disable_force_clear_datapath_send(
+ struct ipa_disable_force_clear_datapath_req_msg_v01 *req)
+{
+ struct ipa_disable_force_clear_datapath_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int rc = 0;
+
+
+ if (!req) {
+ IPAWANERR("invalid params\n");
+ return -EINVAL;
+ }
+
+ req_desc.max_msg_len =
+ QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_V01;
+ req_desc.ei_array =
+ ipa_disable_force_clear_datapath_req_msg_data_v01_ei;
+
+ memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+ resp_desc.max_msg_len =
+ QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
+ resp_desc.ei_array =
+ ipa_disable_force_clear_datapath_resp_msg_data_v01_ei;
+
+ rc = qmi_send_req_wait(ipa_q6_clnt,
+ &req_desc,
+ req,
+ sizeof(*req),
+ &resp_desc, &resp, sizeof(resp), 0);
+ if (rc < 0) {
+ IPAWANERR("send req failed %d\n", rc);
+ return rc;
+ }
+ if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) {
+ IPAWANERR("filter_notify failed %d\n",
+ resp.resp.result);
+ return resp.resp.result;
+ }
+ IPAWANDBG("SUCCESS\n");
+ return rc;
+}
+
+/* sending filter-installed-notify-request to modem*/
+int qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 *req)
+{
+ struct ipa_fltr_installed_notif_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int rc = 0, i = 0;
+
+ /* check if the filter rules from IPACM is valid */
+ if (req->filter_index_list_len == 0) {
+ IPAWANERR(" delete UL filter rule for pipe %d\n",
+ req->source_pipe_index);
+ return -EINVAL;
+ } else if (req->filter_index_list_len > QMI_IPA_MAX_FILTERS_V01) {
+ IPAWANERR(" UL filter rule for pipe %d exceed max (%u)\n",
+ req->source_pipe_index,
+ req->filter_index_list_len);
+ return -EINVAL;
+ } else if (req->filter_index_list[0].filter_index == 0 &&
+ req->source_pipe_index !=
+ ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD)) {
+ IPAWANERR(" get index wrong for pipe %d\n",
+ req->source_pipe_index);
+ for (i = 0; i < req->filter_index_list_len; i++)
+ IPAWANERR(" %d-st handle %d index %d\n",
+ i,
+ req->filter_index_list[i].filter_handle,
+ req->filter_index_list[i].filter_index);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_qmi_lock);
+ if (ipa_qmi_ctx != NULL) {
+ /* cache the qmi_filter_request */
+ memcpy(&(ipa_qmi_ctx->ipa_fltr_installed_notif_req_msg_cache[
+ ipa_qmi_ctx->num_ipa_fltr_installed_notif_req_msg]),
+ req,
+ sizeof(struct ipa_fltr_installed_notif_req_msg_v01));
+ ipa_qmi_ctx->num_ipa_fltr_installed_notif_req_msg++;
+ ipa_qmi_ctx->num_ipa_fltr_installed_notif_req_msg %= 10;
+ }
+ mutex_unlock(&ipa_qmi_lock);
+ req_desc.max_msg_len =
+ QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01;
+ req_desc.ei_array = ipa_fltr_installed_notif_req_msg_data_v01_ei;
+
+ memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+ resp_desc.max_msg_len =
+ QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01;
+ resp_desc.ei_array = ipa_fltr_installed_notif_resp_msg_data_v01_ei;
+
+ rc = qmi_send_req_wait(ipa_q6_clnt,
+ &req_desc,
+ req,
+ sizeof(struct ipa_fltr_installed_notif_req_msg_v01),
+ &resp_desc, &resp, sizeof(resp),
+ QMI_SEND_REQ_TIMEOUT_MS);
+ return ipa_check_qmi_response(rc,
+ QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, resp.resp.result,
+ resp.resp.error, "ipa_fltr_installed_notif_resp");
+}
+
+static void ipa_q6_clnt_recv_msg(struct work_struct *work)
+{
+ int rc;
+
+ do {
+ IPAWANDBG("Notified about a Receive Event");
+ rc = qmi_recv_msg(ipa_q6_clnt);
+ } while (rc == 0);
+ if (rc != -ENOMSG)
+ IPAWANERR("Error receiving message\n");
+}
+
+static void ipa_q6_clnt_notify(struct qmi_handle *handle,
+ enum qmi_event_type event, void *notify_priv)
+{
+ switch (event) {
+ case QMI_RECV_MSG:
+ IPAWANDBG("client qmi recv message called");
+ if (!atomic_read(&workqueues_stopped))
+ queue_delayed_work(ipa_clnt_resp_workqueue,
+ &work_recv_msg_client, 0);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ipa_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id,
+ void *msg, unsigned int msg_len,
+ void *ind_cb_priv)
+{
+ struct ipa_data_usage_quota_reached_ind_msg_v01 qmi_ind;
+ struct msg_desc qmi_ind_desc;
+ int rc = 0;
+
+ if (handle != ipa_q6_clnt) {
+ IPAWANERR("Wrong client\n");
+ return;
+ }
+
+ if (msg_id == QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01) {
+ memset(&qmi_ind, 0, sizeof(
+ struct ipa_data_usage_quota_reached_ind_msg_v01));
+ qmi_ind_desc.max_msg_len =
+ QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01;
+ qmi_ind_desc.msg_id = QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01;
+ qmi_ind_desc.ei_array =
+ ipa_data_usage_quota_reached_ind_msg_data_v01_ei;
+
+ rc = qmi_kernel_decode(&qmi_ind_desc, &qmi_ind, msg, msg_len);
+ if (rc < 0) {
+ IPAWANERR("Error decoding msg_id %d\n", msg_id);
+ return;
+ }
+ IPAWANDBG("Quota reached indication on qmux(%d) Mbytes(%lu)\n",
+ qmi_ind.apn.mux_id,
+ (unsigned long int) qmi_ind.apn.num_Mbytes);
+ ipa_broadcast_quota_reach_ind(qmi_ind.apn.mux_id);
+ }
+}
+
+static void ipa_q6_clnt_svc_arrive(struct work_struct *work)
+{
+ int rc;
+ struct ipa_master_driver_init_complt_ind_msg_v01 ind;
+
+ /* Create a Local client port for QMI communication */
+ ipa_q6_clnt = qmi_handle_create(ipa_q6_clnt_notify, NULL);
+ if (!ipa_q6_clnt) {
+ IPAWANERR("QMI client handle alloc failed\n");
+ return;
+ }
+
+ IPAWANDBG("Lookup server name, get client-hdl(%p)\n",
+ ipa_q6_clnt);
+ rc = qmi_connect_to_service(ipa_q6_clnt,
+ IPA_Q6_SERVICE_SVC_ID,
+ IPA_Q6_SVC_VERS,
+ IPA_Q6_SERVICE_INS_ID);
+ if (rc < 0) {
+ IPAWANERR("Server not found\n");
+ ipa_q6_clnt_svc_exit(0);
+ return;
+ }
+
+ rc = qmi_register_ind_cb(ipa_q6_clnt, ipa_q6_clnt_ind_cb, NULL);
+ if (rc < 0)
+ IPAWANERR("Unable to register for indications\n");
+
+ ipa_q6_clnt_reset = 0;
+ IPAWANDBG("Q6 QMI service available now\n");
+ /* Initialize modem IPA-driver */
+ IPAWANDBG("send qmi_init_modem_send_sync_msg to modem\n");
+ rc = qmi_init_modem_send_sync_msg();
+ if ((rc == -ENETRESET) || (rc == -ENODEV)) {
+ IPAWANERR("qmi_init_modem_send_sync_msg failed due to SSR!\n");
+ /* Cleanup will take place when ipa_wwan_remove is called */
+ return;
+ }
+ if (rc != 0) {
+ IPAWANERR("qmi_init_modem_send_sync_msg failed\n");
+ /*
+ * This is a very unexpected scenario, which requires a kernel
+ * panic in order to force dumps for QMI/Q6 side analysis.
+ */
+ BUG();
+ return;
+ }
+ qmi_modem_init_fin = true;
+
+ /* In cold-bootup, first_time_handshake = false */
+ ipa_q6_handshake_complete(first_time_handshake);
+ first_time_handshake = true;
+
+ IPAWANDBG("complete, qmi_modem_init_fin : %d\n",
+ qmi_modem_init_fin);
+
+ if (qmi_indication_fin) {
+ IPAWANDBG("send indication to modem (%d)\n",
+ qmi_indication_fin);
+ memset(&ind, 0, sizeof(struct
+ ipa_master_driver_init_complt_ind_msg_v01));
+ ind.master_driver_init_status.result =
+ IPA_QMI_RESULT_SUCCESS_V01;
+ rc = qmi_send_ind(ipa_svc_handle, curr_conn,
+ &ipa_master_driver_complete_indication_desc,
+ &ind,
+ sizeof(ind));
+ IPAWANDBG("ipa_qmi_service_client good\n");
+ } else {
+ IPAWANERR("not send indication (%d)\n",
+ qmi_indication_fin);
+ }
+}
+
+
+static void ipa_q6_clnt_svc_exit(struct work_struct *work)
+{
+ mutex_lock(&ipa_qmi_lock);
+
+ if (ipa_q6_clnt)
+ qmi_handle_destroy(ipa_q6_clnt);
+ ipa_q6_clnt_reset = 1;
+ ipa_q6_clnt = NULL;
+
+ mutex_unlock(&ipa_qmi_lock);
+}
+
+
+static int ipa_q6_clnt_svc_event_notify(struct notifier_block *this,
+ unsigned long code,
+ void *_cmd)
+{
+ IPAWANDBG("event %ld\n", code);
+ switch (code) {
+ case QMI_SERVER_ARRIVE:
+ if (!atomic_read(&workqueues_stopped))
+ queue_delayed_work(ipa_clnt_req_workqueue,
+ &work_svc_arrive, 0);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+
+static struct notifier_block ipa_q6_clnt_nb = {
+ .notifier_call = ipa_q6_clnt_svc_event_notify,
+};
+
+static void ipa_qmi_service_init_worker(void)
+{
+ int rc;
+
+ /* Initialize QMI-service*/
+ IPAWANDBG("IPA A7 QMI init OK :>>>>\n");
+
+ /* start the QMI msg cache */
+ ipa_qmi_ctx = vzalloc(sizeof(*ipa_qmi_ctx));
+ if (!ipa_qmi_ctx) {
+ IPAWANERR(":kzalloc err.\n");
+ return;
+ }
+ ipa_qmi_ctx->modem_cfg_emb_pipe_flt =
+ ipa2_get_modem_cfg_emb_pipe_flt();
+
+ ipa_svc_workqueue = create_singlethread_workqueue("ipa_A7_svc");
+ if (!ipa_svc_workqueue) {
+ IPAWANERR("Creating ipa_A7_svc workqueue failed\n");
+ vfree(ipa_qmi_ctx);
+ ipa_qmi_ctx = NULL;
+ return;
+ }
+
+ ipa_svc_handle = qmi_handle_create(qmi_ipa_a5_svc_ntfy, NULL);
+ if (!ipa_svc_handle) {
+ IPAWANERR("Creating ipa_A7_svc qmi handle failed\n");
+ goto destroy_ipa_A7_svc_wq;
+ }
+
+ /*
+ * Setting the current connection to NULL, as due to a race between
+ * server and client clean-up in SSR, the disconnect_cb might not
+ * have necessarily been called
+ */
+ curr_conn = NULL;
+
+ rc = qmi_svc_register(ipa_svc_handle, &ipa_a5_svc_ops_options);
+ if (rc < 0) {
+ IPAWANERR("Registering ipa_a5 svc failed %d\n",
+ rc);
+ goto destroy_qmi_handle;
+ }
+
+ /* Initialize QMI-client */
+
+ ipa_clnt_req_workqueue = create_singlethread_workqueue("clnt_req");
+ if (!ipa_clnt_req_workqueue) {
+ IPAWANERR("Creating clnt_req workqueue failed\n");
+ goto deregister_qmi_srv;
+ }
+
+ ipa_clnt_resp_workqueue = create_singlethread_workqueue("clnt_resp");
+ if (!ipa_clnt_resp_workqueue) {
+ IPAWANERR("Creating clnt_resp workqueue failed\n");
+ goto destroy_clnt_req_wq;
+ }
+
+ rc = qmi_svc_event_notifier_register(IPA_Q6_SERVICE_SVC_ID,
+ IPA_Q6_SVC_VERS,
+ IPA_Q6_SERVICE_INS_ID, &ipa_q6_clnt_nb);
+ if (rc < 0) {
+ IPAWANERR("notifier register failed\n");
+ goto destroy_clnt_resp_wq;
+ }
+
+ atomic_set(&ipa_qmi_initialized, 1);
+ /* get Q6 service and start send modem-initial to Q6 */
+ IPAWANDBG("wait service available\n");
+ return;
+
+destroy_clnt_resp_wq:
+ destroy_workqueue(ipa_clnt_resp_workqueue);
+ ipa_clnt_resp_workqueue = NULL;
+destroy_clnt_req_wq:
+ destroy_workqueue(ipa_clnt_req_workqueue);
+ ipa_clnt_req_workqueue = NULL;
+deregister_qmi_srv:
+ qmi_svc_unregister(ipa_svc_handle);
+destroy_qmi_handle:
+ qmi_handle_destroy(ipa_svc_handle);
+ ipa_svc_handle = 0;
+destroy_ipa_A7_svc_wq:
+ destroy_workqueue(ipa_svc_workqueue);
+ ipa_svc_workqueue = NULL;
+ vfree(ipa_qmi_ctx);
+ ipa_qmi_ctx = NULL;
+}
+
+int ipa_qmi_service_init(uint32_t wan_platform_type)
+{
+ ipa_wan_platform = wan_platform_type;
+ qmi_modem_init_fin = false;
+ qmi_indication_fin = false;
+ atomic_set(&workqueues_stopped, 0);
+
+ if (atomic_read(&ipa_qmi_initialized == 0))
+ ipa_qmi_service_init_worker();
+ return 0;
+}
+
+void ipa_qmi_service_exit(void)
+{
+ int ret = 0;
+
+ atomic_set(&workqueues_stopped, 1);
+
+ /* qmi-service */
+ if (ipa_svc_handle) {
+ ret = qmi_svc_unregister(ipa_svc_handle);
+ if (ret < 0)
+ IPAWANERR("unregister qmi handle %p failed, ret=%d\n",
+ ipa_svc_handle, ret);
+ }
+ if (ipa_svc_workqueue) {
+ flush_workqueue(ipa_svc_workqueue);
+ destroy_workqueue(ipa_svc_workqueue);
+ ipa_svc_workqueue = NULL;
+ }
+
+ if (ipa_svc_handle) {
+ ret = qmi_handle_destroy(ipa_svc_handle);
+ if (ret < 0)
+ IPAWANERR("Error destroying qmi handle %p, ret=%d\n",
+ ipa_svc_handle, ret);
+ }
+ ipa_svc_handle = 0;
+
+ /* qmi-client */
+
+ /* Unregister from events */
+ ret = qmi_svc_event_notifier_unregister(IPA_Q6_SERVICE_SVC_ID,
+ IPA_Q6_SVC_VERS,
+ IPA_Q6_SERVICE_INS_ID, &ipa_q6_clnt_nb);
+ if (ret < 0)
+ IPAWANERR(
+ "Error qmi_svc_event_notifier_unregister service %d, ret=%d\n",
+ IPA_Q6_SERVICE_SVC_ID, ret);
+
+ /* Release client handle */
+ ipa_q6_clnt_svc_exit(0);
+
+ if (ipa_clnt_req_workqueue) {
+ destroy_workqueue(ipa_clnt_req_workqueue);
+ ipa_clnt_req_workqueue = NULL;
+ }
+ if (ipa_clnt_resp_workqueue) {
+ destroy_workqueue(ipa_clnt_resp_workqueue);
+ ipa_clnt_resp_workqueue = NULL;
+ }
+
+ mutex_lock(&ipa_qmi_lock);
+ /* clean the QMI msg cache */
+ if (ipa_qmi_ctx != NULL) {
+ vfree(ipa_qmi_ctx);
+ ipa_qmi_ctx = NULL;
+ }
+ mutex_unlock(&ipa_qmi_lock);
+ qmi_modem_init_fin = false;
+ qmi_indication_fin = false;
+ atomic_set(&ipa_qmi_initialized, 0);
+}
+
+void ipa_qmi_stop_workqueues(void)
+{
+ IPAWANDBG("Stopping all QMI workqueues\n");
+
+ /* Stopping all workqueues so new work won't be scheduled */
+ atomic_set(&workqueues_stopped, 1);
+
+ /* Making sure that the current scheduled work won't be executed */
+ cancel_delayed_work(&work_recv_msg);
+ cancel_delayed_work(&work_recv_msg_client);
+ cancel_delayed_work(&work_svc_arrive);
+ cancel_delayed_work(&work_svc_exit);
+}
+
+/* voting for bus BW to ipa_rm*/
+int vote_for_bus_bw(uint32_t *bw_mbps)
+{
+ struct ipa_rm_perf_profile profile;
+ int ret;
+
+ if (bw_mbps == NULL) {
+ IPAWANERR("Bus BW is invalid\n");
+ return -EINVAL;
+ }
+
+ memset(&profile, 0, sizeof(profile));
+ profile.max_supported_bandwidth_mbps = *bw_mbps;
+ ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
+ &profile);
+ if (ret)
+ IPAWANERR("Failed to set perf profile to BW %u\n",
+ profile.max_supported_bandwidth_mbps);
+ else
+ IPAWANDBG("Succeeded to set perf profile to BW %u\n",
+ profile.max_supported_bandwidth_mbps);
+
+ return ret;
+}
+
+int ipa_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
+ struct ipa_get_data_stats_resp_msg_v01 *resp)
+{
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ req_desc.max_msg_len = QMI_IPA_GET_DATA_STATS_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_GET_DATA_STATS_REQ_V01;
+ req_desc.ei_array = ipa_get_data_stats_req_msg_data_v01_ei;
+
+ resp_desc.max_msg_len = QMI_IPA_GET_DATA_STATS_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_GET_DATA_STATS_RESP_V01;
+ resp_desc.ei_array = ipa_get_data_stats_resp_msg_data_v01_ei;
+
+ IPAWANDBG("Sending QMI_IPA_GET_DATA_STATS_REQ_V01\n");
+
+ rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+ sizeof(struct ipa_get_data_stats_req_msg_v01),
+ &resp_desc, resp,
+ sizeof(struct ipa_get_data_stats_resp_msg_v01),
+ QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+ IPAWANDBG("QMI_IPA_GET_DATA_STATS_RESP_V01 received\n");
+
+ return ipa_check_qmi_response(rc,
+ QMI_IPA_GET_DATA_STATS_REQ_V01, resp->resp.result,
+ resp->resp.error, "ipa_get_data_stats_resp_msg_v01");
+}
+
+int ipa_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req,
+ struct ipa_get_apn_data_stats_resp_msg_v01 *resp)
+{
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ req_desc.max_msg_len = QMI_IPA_GET_APN_DATA_STATS_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_GET_APN_DATA_STATS_REQ_V01;
+ req_desc.ei_array = ipa_get_apn_data_stats_req_msg_data_v01_ei;
+
+ resp_desc.max_msg_len = QMI_IPA_GET_APN_DATA_STATS_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_GET_APN_DATA_STATS_RESP_V01;
+ resp_desc.ei_array = ipa_get_apn_data_stats_resp_msg_data_v01_ei;
+
+ IPAWANDBG("Sending QMI_IPA_GET_APN_DATA_STATS_REQ_V01\n");
+
+ rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+ sizeof(struct ipa_get_apn_data_stats_req_msg_v01),
+ &resp_desc, resp,
+ sizeof(struct ipa_get_apn_data_stats_resp_msg_v01),
+ QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+ IPAWANDBG("QMI_IPA_GET_APN_DATA_STATS_RESP_V01 received\n");
+
+ return ipa_check_qmi_response(rc,
+ QMI_IPA_GET_APN_DATA_STATS_REQ_V01, resp->resp.result,
+ resp->resp.error, "ipa_get_apn_data_stats_req_msg_v01");
+}
+
+int ipa_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req)
+{
+ struct ipa_set_data_usage_quota_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ memset(&resp, 0, sizeof(struct ipa_set_data_usage_quota_resp_msg_v01));
+
+ req_desc.max_msg_len = QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01;
+ req_desc.ei_array = ipa_set_data_usage_quota_req_msg_data_v01_ei;
+
+ resp_desc.max_msg_len =
+ QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01;
+ resp_desc.ei_array = ipa_set_data_usage_quota_resp_msg_data_v01_ei;
+
+ IPAWANDBG("Sending QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01\n");
+
+ rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+ sizeof(struct ipa_set_data_usage_quota_req_msg_v01),
+ &resp_desc, &resp, sizeof(resp),
+ QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+ IPAWANDBG("QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01 received\n");
+
+ return ipa_check_qmi_response(rc,
+ QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result,
+ resp.resp.error, "ipa_set_data_usage_quota_req_msg_v01");
+}
+
+int ipa_qmi_stop_data_qouta(void)
+{
+ struct ipa_stop_data_usage_quota_req_msg_v01 req;
+ struct ipa_stop_data_usage_quota_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ memset(&req, 0, sizeof(struct ipa_stop_data_usage_quota_req_msg_v01));
+ memset(&resp, 0, sizeof(struct ipa_stop_data_usage_quota_resp_msg_v01));
+
+ req_desc.max_msg_len =
+ QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01;
+ req_desc.ei_array = ipa_stop_data_usage_quota_req_msg_data_v01_ei;
+
+ resp_desc.max_msg_len =
+ QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01;
+ resp_desc.ei_array = ipa_stop_data_usage_quota_resp_msg_data_v01_ei;
+
+ IPAWANDBG("Sending QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01\n");
+
+ rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
+ &resp_desc, &resp, sizeof(resp),
+ QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+ IPAWANDBG("QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01 received\n");
+
+ return ipa_check_qmi_response(rc,
+ QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result,
+ resp.resp.error, "ipa_stop_data_usage_quota_req_msg_v01");
+}
+
+void ipa_qmi_init(void)
+{
+ mutex_init(&ipa_qmi_lock);
+}
+
+void ipa_qmi_cleanup(void)
+{
+ mutex_destroy(&ipa_qmi_lock);
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h
new file mode 100644
index 0000000..7793fc0
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h
@@ -0,0 +1,280 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef IPA_QMI_SERVICE_H
+#define IPA_QMI_SERVICE_H
+
+#include <linux/ipa.h>
+#include <linux/ipa_qmi_service_v01.h>
+#include <uapi/linux/msm_rmnet.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include "ipa_i.h"
+#include <linux/rmnet_ipa_fd_ioctl.h>
+
+/**
+ * name of the DL wwan default routing tables for v4 and v6
+ */
+#define IPA_A7_QMAP_HDR_NAME "ipa_qmap_hdr"
+#define IPA_DFLT_WAN_RT_TBL_NAME "ipa_dflt_wan_rt"
+#define MAX_NUM_Q6_RULE 35
+#define MAX_NUM_QMI_RULE_CACHE 10
+#define DEV_NAME "ipa-wan"
+#define SUBSYS_MODEM "modem"
+
+#define IPAWANDBG(fmt, args...) \
+ pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+#define IPAWANERR(fmt, args...) \
+ pr_err(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+
+extern struct ipa_qmi_context *ipa_qmi_ctx;
+extern struct mutex ipa_qmi_lock;
+
+struct ipa_qmi_context {
+struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE];
+u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE];
+int num_ipa_install_fltr_rule_req_msg;
+struct ipa_install_fltr_rule_req_msg_v01
+ ipa_install_fltr_rule_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
+int num_ipa_fltr_installed_notif_req_msg;
+struct ipa_fltr_installed_notif_req_msg_v01
+ ipa_fltr_installed_notif_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
+bool modem_cfg_emb_pipe_flt;
+};
+
+struct rmnet_mux_val {
+ uint32_t mux_id;
+ int8_t vchannel_name[IFNAMSIZ];
+ bool mux_channel_set;
+ bool ul_flt_reg;
+ bool mux_hdr_set;
+ uint32_t hdr_hdl;
+};
+
+extern struct elem_info ipa_init_modem_driver_req_msg_data_v01_ei[];
+extern struct elem_info ipa_init_modem_driver_resp_msg_data_v01_ei[];
+extern struct elem_info ipa_indication_reg_req_msg_data_v01_ei[];
+extern struct elem_info ipa_indication_reg_resp_msg_data_v01_ei[];
+extern struct elem_info ipa_master_driver_init_complt_ind_msg_data_v01_ei[];
+extern struct elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[];
+extern struct elem_info ipa_install_fltr_rule_resp_msg_data_v01_ei[];
+extern struct elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[];
+extern struct elem_info ipa_fltr_installed_notif_resp_msg_data_v01_ei[];
+extern struct elem_info ipa_enable_force_clear_datapath_req_msg_data_v01_ei[];
+extern struct elem_info ipa_enable_force_clear_datapath_resp_msg_data_v01_ei[];
+extern struct elem_info ipa_disable_force_clear_datapath_req_msg_data_v01_ei[];
+extern struct elem_info ipa_disable_force_clear_datapath_resp_msg_data_v01_ei[];
+extern struct elem_info ipa_config_req_msg_data_v01_ei[];
+extern struct elem_info ipa_config_resp_msg_data_v01_ei[];
+extern struct elem_info ipa_get_data_stats_req_msg_data_v01_ei[];
+extern struct elem_info ipa_get_data_stats_resp_msg_data_v01_ei[];
+extern struct elem_info ipa_get_apn_data_stats_req_msg_data_v01_ei[];
+extern struct elem_info ipa_get_apn_data_stats_resp_msg_data_v01_ei[];
+extern struct elem_info ipa_set_data_usage_quota_req_msg_data_v01_ei[];
+extern struct elem_info ipa_set_data_usage_quota_resp_msg_data_v01_ei[];
+extern struct elem_info ipa_data_usage_quota_reached_ind_msg_data_v01_ei[];
+extern struct elem_info ipa_stop_data_usage_quota_req_msg_data_v01_ei[];
+extern struct elem_info ipa_stop_data_usage_quota_resp_msg_data_v01_ei[];
+
+/**
+ * struct ipa_rmnet_context - IPA rmnet context
+ * @ipa_rmnet_ssr: support modem SSR
+ * @polling_interval: Requested interval for polling tethered statistics
+ * @metered_mux_id: The mux ID on which quota has been set
+ */
+struct ipa_rmnet_context {
+ bool ipa_rmnet_ssr;
+ u64 polling_interval;
+ u32 metered_mux_id;
+};
+
+extern struct ipa_rmnet_context ipa_rmnet_ctx;
+
+#ifdef CONFIG_RMNET_IPA
+
+int ipa_qmi_service_init(uint32_t wan_platform_type);
+
+void ipa_qmi_service_exit(void);
+
+/* sending filter-install-request to modem*/
+int qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req);
+
+/* sending filter-installed-notify-request to modem*/
+int qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 *req);
+
+/* voting for bus BW to ipa_rm*/
+int vote_for_bus_bw(uint32_t *bw_mbps);
+
+int qmi_enable_force_clear_datapath_send(
+ struct ipa_enable_force_clear_datapath_req_msg_v01 *req);
+
+int qmi_disable_force_clear_datapath_send(
+ struct ipa_disable_force_clear_datapath_req_msg_v01 *req);
+
+int copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
+ *rule_req, uint32_t *rule_hdl);
+
+int wwan_update_mux_channel_prop(void);
+
+int wan_ioctl_init(void);
+
+void wan_ioctl_stop_qmi_messages(void);
+
+void wan_ioctl_enable_qmi_messages(void);
+
+void wan_ioctl_deinit(void);
+
+void ipa_qmi_stop_workqueues(void);
+
+int rmnet_ipa_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data);
+
+int rmnet_ipa_set_data_quota(struct wan_ioctl_set_data_quota *data);
+
+void ipa_broadcast_quota_reach_ind(uint32_t mux_id);
+
+int rmnet_ipa_set_tether_client_pipe(struct wan_ioctl_set_tether_client_pipe
+ *data);
+
+int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
+ bool reset);
+
+int ipa_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
+ struct ipa_get_data_stats_resp_msg_v01 *resp);
+
+int ipa_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req,
+ struct ipa_get_apn_data_stats_resp_msg_v01 *resp);
+
+int ipa_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req);
+
+int ipa_qmi_stop_data_qouta(void);
+
+void ipa_q6_handshake_complete(bool ssr_bootup);
+
+void ipa_qmi_init(void);
+
+void ipa_qmi_cleanup(void);
+
+#else /* CONFIG_RMNET_IPA */
+
+static inline int ipa_qmi_service_init(uint32_t wan_platform_type)
+{
+ return -EPERM;
+}
+
+static inline void ipa_qmi_service_exit(void) { }
+
+/* sending filter-install-request to modem*/
+static inline int qmi_filter_request_send(
+ struct ipa_install_fltr_rule_req_msg_v01 *req)
+{
+ return -EPERM;
+}
+
+/* sending filter-installed-notify-request to modem*/
+static inline int qmi_filter_notify_send(
+ struct ipa_fltr_installed_notif_req_msg_v01 *req)
+{
+ return -EPERM;
+}
+
+static inline int qmi_enable_force_clear_datapath_send(
+ struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
+{
+ return -EPERM;
+}
+
+static inline int qmi_disable_force_clear_datapath_send(
+ struct ipa_disable_force_clear_datapath_req_msg_v01 *req)
+{
+ return -EPERM;
+}
+
+static inline int copy_ul_filter_rule_to_ipa(
+ struct ipa_install_fltr_rule_req_msg_v01 *rule_req, uint32_t *rule_hdl)
+{
+ return -EPERM;
+}
+
+static inline int wwan_update_mux_channel_prop(void)
+{
+ return -EPERM;
+}
+
+static inline int wan_ioctl_init(void)
+{
+ return -EPERM;
+}
+
+static inline void wan_ioctl_stop_qmi_messages(void) { }
+
+static inline void wan_ioctl_enable_qmi_messages(void) { }
+
+static inline void wan_ioctl_deinit(void) { }
+
+static inline void ipa_qmi_stop_workqueues(void) { }
+
+static inline int vote_for_bus_bw(uint32_t *bw_mbps)
+{
+ return -EPERM;
+}
+
+static inline int rmnet_ipa_poll_tethering_stats(
+ struct wan_ioctl_poll_tethering_stats *data)
+{
+ return -EPERM;
+}
+
+static inline int rmnet_ipa_set_data_quota(
+ struct wan_ioctl_set_data_quota *data)
+{
+ return -EPERM;
+}
+
+static inline void ipa_broadcast_quota_reach_ind(uint32_t mux_id) { }
+
+static inline int ipa_qmi_get_data_stats(
+ struct ipa_get_data_stats_req_msg_v01 *req,
+ struct ipa_get_data_stats_resp_msg_v01 *resp)
+{
+ return -EPERM;
+}
+
+static inline int ipa_qmi_get_network_stats(
+ struct ipa_get_apn_data_stats_req_msg_v01 *req,
+ struct ipa_get_apn_data_stats_resp_msg_v01 *resp)
+{
+ return -EPERM;
+}
+
+static inline int ipa_qmi_set_data_quota(
+ struct ipa_set_data_usage_quota_req_msg_v01 *req)
+{
+ return -EPERM;
+}
+
+static inline int ipa_qmi_stop_data_qouta(void)
+{
+ return -EPERM;
+}
+
+static inline void ipa_q6_handshake_complete(bool ssr_bootup) { }
+
+static inline void ipa_qmi_init(void)
+{
+}
+
+static inline void ipa_qmi_cleanup(void)
+{
+}
+
+#endif /* CONFIG_RMNET_IPA */
+
+#endif /* IPA_QMI_SERVICE_H */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
new file mode 100644
index 0000000..dd59140
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
@@ -0,0 +1,2366 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/qmi_encdec.h>
+#include <linux/ipa_qmi_service_v01.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+/* Type Definitions */
+static struct elem_info ipa_hdr_tbl_info_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_hdr_tbl_info_type_v01,
+ modem_offset_start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_hdr_tbl_info_type_v01,
+ modem_offset_end),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa_route_tbl_info_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_route_tbl_info_type_v01,
+ route_tbl_start_addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_route_tbl_info_type_v01,
+ num_indices),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa_modem_mem_info_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_modem_mem_info_type_v01,
+ block_start_addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_modem_mem_info_type_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa_hdr_proc_ctx_tbl_info_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_hdr_proc_ctx_tbl_info_type_v01,
+ modem_offset_start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_hdr_proc_ctx_tbl_info_type_v01,
+ modem_offset_end),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa_zip_tbl_info_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_zip_tbl_info_type_v01,
+ modem_offset_start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_zip_tbl_info_type_v01,
+ modem_offset_end),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa_ipfltr_range_eq_16_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ipfltr_range_eq_16_type_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ipfltr_range_eq_16_type_v01,
+ range_low),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ipfltr_range_eq_16_type_v01,
+ range_high),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa_ipfltr_mask_eq_32_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ipfltr_mask_eq_32_type_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ipfltr_mask_eq_32_type_v01,
+ mask),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ipfltr_mask_eq_32_type_v01,
+ value),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa_ipfltr_eq_16_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ipfltr_eq_16_type_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_ipfltr_eq_16_type_v01,
+ value),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa_ipfltr_eq_32_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_ipfltr_eq_32_type_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_ipfltr_eq_32_type_v01,
+ value),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa_ipfltr_mask_eq_128_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ipfltr_mask_eq_128_type_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 16,
+ .elem_size = sizeof(uint8_t),
+ .is_array = STATIC_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ipfltr_mask_eq_128_type_v01,
+ mask),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 16,
+ .elem_size = sizeof(uint8_t),
+ .is_array = STATIC_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ipfltr_mask_eq_128_type_v01,
+ value),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa_filter_rule_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_filter_rule_type_v01,
+ rule_eq_bitmap),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_filter_rule_type_v01,
+ tos_eq_present),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ tos_eq),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ protocol_eq_present),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ protocol_eq),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ num_ihl_offset_range_16),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01,
+ .elem_size = sizeof(
+ struct ipa_ipfltr_range_eq_16_type_v01),
+ .is_array = STATIC_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ ihl_offset_range_16),
+ .ei_array = ipa_ipfltr_range_eq_16_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ num_offset_meq_32),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01,
+ .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+ .is_array = STATIC_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ offset_meq_32),
+ .ei_array = ipa_ipfltr_mask_eq_32_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ tc_eq_present),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ tc_eq),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ flow_eq_present),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ flow_eq),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ ihl_offset_eq_16_present),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_ipfltr_eq_16_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ ihl_offset_eq_16),
+ .ei_array = ipa_ipfltr_eq_16_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ ihl_offset_eq_32_present),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_ipfltr_eq_32_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ ihl_offset_eq_32),
+ .ei_array = ipa_ipfltr_eq_32_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ num_ihl_offset_meq_32),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01,
+ .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+ .is_array = STATIC_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ ihl_offset_meq_32),
+ .ei_array = ipa_ipfltr_mask_eq_32_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ num_offset_meq_128),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len =
+ QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01,
+ .elem_size = sizeof(
+ struct ipa_ipfltr_mask_eq_128_type_v01),
+ .is_array = STATIC_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_filter_rule_type_v01,
+ offset_meq_128),
+ .ei_array = ipa_ipfltr_mask_eq_128_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ metadata_meq32_present),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ metadata_meq32),
+ .ei_array = ipa_ipfltr_mask_eq_32_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ ipv4_frag_eq_present),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa_filter_spec_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_type_v01,
+ filter_spec_identifier),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_type_v01,
+ ip_type),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_filter_rule_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_type_v01,
+ filter_rule),
+ .ei_array = ipa_filter_rule_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_type_v01,
+ filter_action),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_type_v01,
+ is_routing_table_index_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_type_v01,
+ route_table_index),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_type_v01,
+ is_mux_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_type_v01,
+ mux_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info
+ ipa_filter_rule_identifier_to_handle_map_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_filter_rule_identifier_to_handle_map_v01,
+ filter_spec_identifier),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_filter_rule_identifier_to_handle_map_v01,
+ filter_handle),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa_filter_handle_to_index_map_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_filter_handle_to_index_map_v01,
+ filter_handle),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_filter_handle_to_index_map_v01,
+ filter_index),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ platform_type_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ platform_type),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ hdr_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_hdr_tbl_info_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ hdr_tbl_info),
+ .ei_array = ipa_hdr_tbl_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v4_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_route_tbl_info_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v4_route_tbl_info),
+ .ei_array = ipa_route_tbl_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v6_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_route_tbl_info_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v6_route_tbl_info),
+ .ei_array = ipa_route_tbl_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v4_filter_tbl_start_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v4_filter_tbl_start_addr),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v6_filter_tbl_start_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v6_filter_tbl_start_addr),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ modem_mem_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_modem_mem_info_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ modem_mem_info),
+ .ei_array = ipa_modem_mem_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ ctrl_comm_dest_end_pt_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ ctrl_comm_dest_end_pt),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ is_ssr_bootup_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ is_ssr_bootup),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ hdr_proc_ctx_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(
+ struct ipa_hdr_proc_ctx_tbl_info_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ hdr_proc_ctx_tbl_info),
+ .ei_array = ipa_hdr_proc_ctx_tbl_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ zip_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_zip_tbl_info_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ zip_tbl_info),
+ .ei_array = ipa_zip_tbl_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_init_modem_driver_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_resp_msg_v01,
+ ctrl_comm_dest_end_pt_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_resp_msg_v01,
+ ctrl_comm_dest_end_pt),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_resp_msg_v01,
+ default_end_pt_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_resp_msg_v01,
+ default_end_pt),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_indication_reg_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_indication_reg_req_msg_v01,
+ master_driver_init_complete_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_indication_reg_req_msg_v01,
+ master_driver_init_complete),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_indication_reg_req_msg_v01,
+ data_usage_quota_reached_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_indication_reg_req_msg_v01,
+ data_usage_quota_reached),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_indication_reg_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_indication_reg_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_master_driver_init_complt_ind_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ ipa_master_driver_init_complt_ind_msg_v01,
+ master_driver_init_status),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ filter_spec_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ filter_spec_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_FILTERS_V01,
+ .elem_size = sizeof(struct ipa_filter_spec_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ filter_spec_list),
+ .ei_array = ipa_filter_spec_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ source_pipe_index_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ source_pipe_index),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ num_ipv4_filters_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ num_ipv4_filters),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ num_ipv6_filters_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ num_ipv6_filters),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ xlat_filter_indices_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ xlat_filter_indices_list_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_IPA_MAX_FILTERS_V01,
+ .elem_size = sizeof(uint32_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ xlat_filter_indices_list),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_install_fltr_rule_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_resp_msg_v01,
+ filter_handle_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_resp_msg_v01,
+ filter_handle_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_FILTERS_V01,
+ .elem_size = sizeof(
+ struct ipa_filter_rule_identifier_to_handle_map_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_resp_msg_v01,
+ filter_handle_list),
+ .ei_array =
+ ipa_filter_rule_identifier_to_handle_map_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ source_pipe_index),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ install_status),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ filter_index_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_FILTERS_V01,
+ .elem_size = sizeof(
+ struct ipa_filter_handle_to_index_map_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ filter_index_list),
+ .ei_array = ipa_filter_handle_to_index_map_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ embedded_pipe_index_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ embedded_pipe_index),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ retain_header_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ retain_header),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ embedded_call_mux_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ embedded_call_mux_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ num_ipv4_filters_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ num_ipv4_filters),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ num_ipv6_filters_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ num_ipv6_filters),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ start_ipv4_filter_idx_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ start_ipv4_filter_idx),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ start_ipv6_filter_idx_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ start_ipv6_filter_idx),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_fltr_installed_notif_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_enable_force_clear_datapath_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct ipa_enable_force_clear_datapath_req_msg_v01,
+ source_pipe_bitmask),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_enable_force_clear_datapath_req_msg_v01,
+ request_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_enable_force_clear_datapath_req_msg_v01,
+ throttle_source_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_enable_force_clear_datapath_req_msg_v01,
+ throttle_source),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_enable_force_clear_datapath_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_enable_force_clear_datapath_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_disable_force_clear_datapath_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct ipa_disable_force_clear_datapath_req_msg_v01,
+ request_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_disable_force_clear_datapath_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_disable_force_clear_datapath_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_config_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ peripheral_type_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ peripheral_type),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ hw_deaggr_supported_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ hw_deaggr_supported),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ max_aggr_frame_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ max_aggr_frame_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ ipa_ingress_pipe_mode_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ ipa_ingress_pipe_mode),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ peripheral_speed_info_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ peripheral_speed_info),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ dl_accumulation_time_limit_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ dl_accumulation_time_limit),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ dl_accumulation_pkt_limit_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ dl_accumulation_pkt_limit),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ dl_accumulation_byte_limit_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ dl_accumulation_byte_limit),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ ul_accumulation_time_limit_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ ul_accumulation_time_limit),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ hw_control_flags_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ hw_control_flags),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ ul_msi_event_threshold_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ ul_msi_event_threshold),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ dl_msi_event_threshold_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ dl_msi_event_threshold),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_config_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_config_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_get_data_stats_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct ipa_get_data_stats_req_msg_v01,
+ ipa_stats_type),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_data_stats_req_msg_v01,
+ reset_stats_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_data_stats_req_msg_v01,
+ reset_stats),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa_pipe_stats_info_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_pipe_stats_info_type_v01,
+ pipe_index),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_pipe_stats_info_type_v01,
+ num_ipv4_packets),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_pipe_stats_info_type_v01,
+ num_ipv4_bytes),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_pipe_stats_info_type_v01,
+ num_ipv6_packets),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_pipe_stats_info_type_v01,
+ num_ipv6_bytes),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa_stats_type_filter_rule_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct
+ ipa_stats_type_filter_rule_v01,
+ filter_rule_index),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct
+ ipa_stats_type_filter_rule_v01,
+ num_packets),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ ipa_stats_type_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ ipa_stats_type),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ ul_src_pipe_stats_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ ul_src_pipe_stats_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_PIPES_V01,
+ .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ ul_src_pipe_stats_list),
+ .ei_array = ipa_pipe_stats_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ dl_dst_pipe_stats_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ dl_dst_pipe_stats_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_PIPES_V01,
+ .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ dl_dst_pipe_stats_list),
+ .ei_array = ipa_pipe_stats_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ dl_filter_rule_stats_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ dl_filter_rule_stats_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_FILTERS_V01,
+ .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ dl_filter_rule_stats_list),
+ .ei_array = ipa_stats_type_filter_rule_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa_apn_data_stats_info_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct
+ ipa_apn_data_stats_info_type_v01,
+ mux_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct
+ ipa_apn_data_stats_info_type_v01,
+ num_ul_packets),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct
+ ipa_apn_data_stats_info_type_v01,
+ num_ul_bytes),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct
+ ipa_apn_data_stats_info_type_v01,
+ num_dl_packets),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct
+ ipa_apn_data_stats_info_type_v01,
+ num_dl_bytes),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_get_apn_data_stats_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_apn_data_stats_req_msg_v01,
+ mux_id_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_apn_data_stats_req_msg_v01,
+ mux_id_list_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_IPA_MAX_APN_V01,
+ .elem_size = sizeof(uint32_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_apn_data_stats_req_msg_v01,
+ mux_id_list),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_get_apn_data_stats_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_get_apn_data_stats_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_apn_data_stats_resp_msg_v01,
+ apn_data_stats_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_apn_data_stats_resp_msg_v01,
+ apn_data_stats_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_APN_V01,
+ .elem_size = sizeof(struct
+ ipa_apn_data_stats_info_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_apn_data_stats_resp_msg_v01,
+ apn_data_stats_list),
+ .ei_array = ipa_apn_data_stats_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa_data_usage_quota_info_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct
+ ipa_data_usage_quota_info_type_v01,
+ mux_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct
+ ipa_data_usage_quota_info_type_v01,
+ num_Mbytes),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_set_data_usage_quota_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_set_data_usage_quota_req_msg_v01,
+ apn_quota_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_set_data_usage_quota_req_msg_v01,
+ apn_quota_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_APN_V01,
+ .elem_size = sizeof(struct
+ ipa_data_usage_quota_info_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_set_data_usage_quota_req_msg_v01,
+ apn_quota_list),
+ .ei_array = ipa_data_usage_quota_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_set_data_usage_quota_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_set_data_usage_quota_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_data_usage_quota_reached_ind_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct
+ ipa_data_usage_quota_info_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct ipa_data_usage_quota_reached_ind_msg_v01,
+ apn),
+ .ei_array = ipa_data_usage_quota_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_stop_data_usage_quota_req_msg_data_v01_ei[] = {
+ /* ipa_stop_data_usage_quota_req_msg is empty */
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa_stop_data_usage_quota_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_stop_data_usage_quota_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_ram_mmap.h b/drivers/platform/msm/ipa/ipa_v2/ipa_ram_mmap.h
new file mode 100644
index 0000000..56ada21b
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_ram_mmap.h
@@ -0,0 +1,560 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RAM_MMAP_H_
+#define _IPA_RAM_MMAP_H_
+
+/*
+ * This header defines the memory map of the IPA RAM (not all SRAM is
+ * available for SW use)
+ * In case of restricted bytes the actual starting address will be
+ * advanced by the number of needed bytes
+ */
+
+#define IPA_RAM_NAT_OFST 0
+#define IPA_RAM_NAT_SIZE 0
+
+#define IPA_MEM_v1_RAM_HDR_OFST (IPA_RAM_NAT_OFST + IPA_RAM_NAT_SIZE)
+#define IPA_MEM_v1_RAM_HDR_SIZE 1664
+#define IPA_MEM_v1_RAM_V4_FLT_OFST (IPA_MEM_v1_RAM_HDR_OFST +\
+ IPA_MEM_v1_RAM_HDR_SIZE)
+#define IPA_MEM_v1_RAM_V4_FLT_SIZE 2176
+#define IPA_MEM_v1_RAM_V4_RT_OFST (IPA_MEM_v1_RAM_V4_FLT_OFST +\
+ IPA_MEM_v1_RAM_V4_FLT_SIZE)
+#define IPA_MEM_v1_RAM_V4_RT_SIZE 512
+#define IPA_MEM_v1_RAM_V6_FLT_OFST (IPA_MEM_v1_RAM_V4_RT_OFST +\
+ IPA_MEM_v1_RAM_V4_RT_SIZE)
+#define IPA_MEM_v1_RAM_V6_FLT_SIZE 1792
+#define IPA_MEM_v1_RAM_V6_RT_OFST (IPA_MEM_v1_RAM_V6_FLT_OFST +\
+ IPA_MEM_v1_RAM_V6_FLT_SIZE)
+#define IPA_MEM_v1_RAM_V6_RT_SIZE 512
+#define IPA_MEM_v1_RAM_END_OFST (IPA_MEM_v1_RAM_V6_RT_OFST +\
+ IPA_MEM_v1_RAM_V6_RT_SIZE)
+
+#define IPA_MEM_RAM_V6_RT_SIZE_DDR 16384
+#define IPA_MEM_RAM_V4_RT_SIZE_DDR 16384
+#define IPA_MEM_RAM_V6_FLT_SIZE_DDR 16384
+#define IPA_MEM_RAM_V4_FLT_SIZE_DDR 16384
+#define IPA_MEM_RAM_HDR_PROC_CTX_SIZE_DDR 0
+
+#define IPA_MEM_CANARY_SIZE 4
+#define IPA_MEM_CANARY_VAL 0xdeadbeef
+
+#define IPA_MEM_RAM_MODEM_NETWORK_STATS_SIZE 256
+/*
+ * IPA v2.0 and v2.1 SRAM memory layout:
+ * +-------------+
+ * | V4 FLT HDR |
+ * +-------------+
+ * | CANARY |
+ * +-------------+
+ * | CANARY |
+ * +-------------+
+ * | V6 FLT HDR |
+ * +-------------+
+ * | CANARY |
+ * +-------------+
+ * | CANARY |
+ * +-------------+
+ * | V4 RT HDR |
+ * +-------------+
+ * | CANARY |
+ * +-------------+
+ * | V6 RT HDR |
+ * +-------------+
+ * | CANARY |
+ * +-------------+
+ * | MODEM HDR |
+ * +-------------+
+ * | APPS HDR |
+ * +-------------+
+ * | CANARY |
+ * +-------------+
+ * | MODEM MEM |
+ * +-------------+
+ * | CANARY |
+ * +-------------+
+ * | APPS V4 FLT |
+ * +-------------+
+ * | APPS V6 FLT |
+ * +-------------+
+ * | CANARY |
+ * +-------------+
+ * | UC INFO |
+ * +-------------+
+ */
+#define IPA_MEM_v2_RAM_OFST_START 128
+#define IPA_MEM_v2_RAM_V4_FLT_OFST IPA_MEM_v2_RAM_OFST_START
+#define IPA_MEM_v2_RAM_V4_FLT_SIZE 88
+
+/* V4 filtering header table is 8B aligned */
+#if (IPA_MEM_v2_RAM_V4_FLT_OFST & 7)
+#error V4 filtering header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_RAM_V6_FLT_OFST (IPA_MEM_v2_RAM_V4_FLT_OFST + \
+ IPA_MEM_v2_RAM_V4_FLT_SIZE + 2*IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_RAM_V6_FLT_SIZE 88
+
+/* V6 filtering header table is 8B aligned */
+#if (IPA_MEM_v2_RAM_V6_FLT_OFST & 7)
+#error V6 filtering header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_RAM_V4_RT_OFST (IPA_MEM_v2_RAM_V6_FLT_OFST + \
+ IPA_MEM_v2_RAM_V6_FLT_SIZE + 2*IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_RAM_V4_NUM_INDEX 11
+#define IPA_MEM_v2_V4_MODEM_RT_INDEX_LO 0
+#define IPA_MEM_v2_V4_MODEM_RT_INDEX_HI 3
+#define IPA_MEM_v2_V4_APPS_RT_INDEX_LO 4
+#define IPA_MEM_v2_V4_APPS_RT_INDEX_HI 10
+#define IPA_MEM_v2_RAM_V4_RT_SIZE (IPA_MEM_v2_RAM_V4_NUM_INDEX * 4)
+
+/* V4 routing header table is 8B aligned */
+#if (IPA_MEM_v2_RAM_V4_RT_OFST & 7)
+#error V4 routing header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_RAM_V6_RT_OFST (IPA_MEM_v2_RAM_V4_RT_OFST + \
+ IPA_MEM_v2_RAM_V4_RT_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_RAM_V6_NUM_INDEX 11
+#define IPA_MEM_v2_V6_MODEM_RT_INDEX_LO 0
+#define IPA_MEM_v2_V6_MODEM_RT_INDEX_HI 3
+#define IPA_MEM_v2_V6_APPS_RT_INDEX_LO 4
+#define IPA_MEM_v2_V6_APPS_RT_INDEX_HI 10
+#define IPA_MEM_v2_RAM_V6_RT_SIZE (IPA_MEM_v2_RAM_V6_NUM_INDEX * 4)
+
+/* V6 routing header table is 8B aligned */
+#if (IPA_MEM_v2_RAM_V6_RT_OFST & 7)
+#error V6 routing header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_RAM_MODEM_HDR_OFST (IPA_MEM_v2_RAM_V6_RT_OFST + \
+ IPA_MEM_v2_RAM_V6_RT_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_RAM_MODEM_HDR_SIZE 320
+
+/* header table is 8B aligned */
+#if (IPA_MEM_v2_RAM_MODEM_HDR_OFST & 7)
+#error header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_RAM_APPS_HDR_OFST (IPA_MEM_v2_RAM_MODEM_HDR_OFST + \
+ IPA_MEM_v2_RAM_MODEM_HDR_SIZE)
+#define IPA_MEM_v2_RAM_APPS_HDR_SIZE 72
+
+/* header table is 8B aligned */
+#if (IPA_MEM_v2_RAM_APPS_HDR_OFST & 7)
+#error header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_RAM_MODEM_OFST (IPA_MEM_v2_RAM_APPS_HDR_OFST + \
+ IPA_MEM_v2_RAM_APPS_HDR_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_RAM_MODEM_SIZE 3532
+
+/* modem memory is 4B aligned */
+#if (IPA_MEM_v2_RAM_MODEM_OFST & 3)
+#error modem memory is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_RAM_APPS_V4_FLT_OFST (IPA_MEM_v2_RAM_MODEM_OFST + \
+ IPA_MEM_v2_RAM_MODEM_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_RAM_APPS_V4_FLT_SIZE 1920
+
+/* filtering rule is 4B aligned */
+#if (IPA_MEM_v2_RAM_APPS_V4_FLT_OFST & 3)
+#error filtering rule is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_RAM_APPS_V6_FLT_OFST (IPA_MEM_v2_RAM_APPS_V4_FLT_OFST + \
+ IPA_MEM_v2_RAM_APPS_V4_FLT_SIZE)
+#define IPA_MEM_v2_RAM_APPS_V6_FLT_SIZE 1372
+
+/* filtering rule is 4B aligned */
+#if (IPA_MEM_v2_RAM_APPS_V6_FLT_OFST & 3)
+#error filtering rule is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_RAM_UC_INFO_OFST (IPA_MEM_v2_RAM_APPS_V6_FLT_OFST + \
+ IPA_MEM_v2_RAM_APPS_V6_FLT_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_RAM_UC_INFO_SIZE 292
+
+/* uC info 4B aligned */
+#if (IPA_MEM_v2_RAM_UC_INFO_OFST & 3)
+#error uC info is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_RAM_END_OFST (IPA_MEM_v2_RAM_UC_INFO_OFST + \
+ IPA_MEM_v2_RAM_UC_INFO_SIZE)
+#define IPA_MEM_v2_RAM_APPS_V4_RT_OFST IPA_MEM_v2_RAM_END_OFST
+#define IPA_MEM_v2_RAM_APPS_V4_RT_SIZE 0
+#define IPA_MEM_v2_RAM_APPS_V6_RT_OFST IPA_MEM_v2_RAM_END_OFST
+#define IPA_MEM_v2_RAM_APPS_V6_RT_SIZE 0
+#define IPA_MEM_v2_RAM_HDR_SIZE_DDR 4096
+
+/*
+ * IPA v2.5/v2.6 SRAM memory layout:
+ * +----------------+
+ * | UC INFO |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | V4 FLT HDR |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | V6 FLT HDR |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | V4 RT HDR |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | V6 RT HDR |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | MODEM HDR |
+ * +----------------+
+ * | APPS HDR |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | MODEM PROC CTX |
+ * +----------------+
+ * | APPS PROC CTX |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | MODEM MEM |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ */
+
+#define IPA_MEM_v2_5_RAM_UC_MEM_SIZE 128
+#define IPA_MEM_v2_5_RAM_UC_INFO_OFST IPA_MEM_v2_5_RAM_UC_MEM_SIZE
+#define IPA_MEM_v2_5_RAM_UC_INFO_SIZE 512
+
+/* uC info 4B aligned */
+#if (IPA_MEM_v2_5_RAM_UC_INFO_OFST & 3)
+#error uC info is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_OFST_START (IPA_MEM_v2_5_RAM_UC_INFO_OFST + \
+ IPA_MEM_v2_5_RAM_UC_INFO_SIZE)
+
+#define IPA_MEM_v2_5_RAM_V4_FLT_OFST (IPA_MEM_v2_5_RAM_OFST_START + \
+ 2 * IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_5_RAM_V4_FLT_SIZE 88
+
+/* V4 filtering header table is 8B aligned */
+#if (IPA_MEM_v2_5_RAM_V4_FLT_OFST & 7)
+#error V4 filtering header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_V6_FLT_OFST (IPA_MEM_v2_5_RAM_V4_FLT_OFST + \
+ IPA_MEM_v2_5_RAM_V4_FLT_SIZE + 2 * IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_5_RAM_V6_FLT_SIZE 88
+
+/* V6 filtering header table is 8B aligned */
+#if (IPA_MEM_v2_5_RAM_V6_FLT_OFST & 7)
+#error V6 filtering header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_V4_RT_OFST (IPA_MEM_v2_5_RAM_V6_FLT_OFST + \
+ IPA_MEM_v2_5_RAM_V6_FLT_SIZE + 2 * IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_5_RAM_V4_NUM_INDEX 15
+#define IPA_MEM_v2_5_V4_MODEM_RT_INDEX_LO 0
+#define IPA_MEM_v2_5_V4_MODEM_RT_INDEX_HI 6
+#define IPA_MEM_v2_5_V4_APPS_RT_INDEX_LO \
+ (IPA_MEM_v2_5_V4_MODEM_RT_INDEX_HI + 1)
+#define IPA_MEM_v2_5_V4_APPS_RT_INDEX_HI \
+ (IPA_MEM_v2_5_RAM_V4_NUM_INDEX - 1)
+#define IPA_MEM_v2_5_RAM_V4_RT_SIZE (IPA_MEM_v2_5_RAM_V4_NUM_INDEX * 4)
+
+/* V4 routing header table is 8B aligned */
+#if (IPA_MEM_v2_5_RAM_V4_RT_OFST & 7)
+#error V4 routing header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_V6_RT_OFST (IPA_MEM_v2_5_RAM_V4_RT_OFST + \
+ IPA_MEM_v2_5_RAM_V4_RT_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_5_RAM_V6_NUM_INDEX 15
+#define IPA_MEM_v2_5_V6_MODEM_RT_INDEX_LO 0
+#define IPA_MEM_v2_5_V6_MODEM_RT_INDEX_HI 6
+#define IPA_MEM_v2_5_V6_APPS_RT_INDEX_LO \
+ (IPA_MEM_v2_5_V6_MODEM_RT_INDEX_HI + 1)
+#define IPA_MEM_v2_5_V6_APPS_RT_INDEX_HI \
+ (IPA_MEM_v2_5_RAM_V6_NUM_INDEX - 1)
+#define IPA_MEM_v2_5_RAM_V6_RT_SIZE (IPA_MEM_v2_5_RAM_V6_NUM_INDEX * 4)
+
+/* V6 routing header table is 8B aligned */
+#if (IPA_MEM_v2_5_RAM_V6_RT_OFST & 7)
+#error V6 routing header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_MODEM_HDR_OFST (IPA_MEM_v2_5_RAM_V6_RT_OFST + \
+ IPA_MEM_v2_5_RAM_V6_RT_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_5_RAM_MODEM_HDR_SIZE 320
+
+/* header table is 8B aligned */
+#if (IPA_MEM_v2_5_RAM_MODEM_HDR_OFST & 7)
+#error header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_APPS_HDR_OFST (IPA_MEM_v2_5_RAM_MODEM_HDR_OFST + \
+ IPA_MEM_v2_5_RAM_MODEM_HDR_SIZE)
+#define IPA_MEM_v2_5_RAM_APPS_HDR_SIZE 0
+
+/* header table is 8B aligned */
+#if (IPA_MEM_v2_5_RAM_APPS_HDR_OFST & 7)
+#error header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_OFST \
+ (IPA_MEM_v2_5_RAM_APPS_HDR_OFST + IPA_MEM_v2_5_RAM_APPS_HDR_SIZE + \
+ 2 * IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_SIZE 512
+
+/* header processing context table is 8B aligned */
+#if (IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_OFST & 7)
+#error header processing context table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_OFST \
+ (IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_OFST + \
+ IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_SIZE)
+#define IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_SIZE 512
+
+/* header processing context table is 8B aligned */
+#if (IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_OFST & 7)
+#error header processing context table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_MODEM_OFST (IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_OFST + \
+ IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_5_RAM_MODEM_SIZE 5800
+
+/* modem memory is 4B aligned */
+#if (IPA_MEM_v2_5_RAM_MODEM_OFST & 3)
+#error modem memory is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_APPS_V4_FLT_OFST (IPA_MEM_v2_5_RAM_MODEM_OFST + \
+ IPA_MEM_v2_5_RAM_MODEM_SIZE)
+#define IPA_MEM_v2_5_RAM_APPS_V4_FLT_SIZE 0
+
+/* filtering rule is 4B aligned */
+#if (IPA_MEM_v2_5_RAM_APPS_V4_FLT_OFST & 3)
+#error filtering rule is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_APPS_V6_FLT_OFST (IPA_MEM_v2_5_RAM_APPS_V4_FLT_OFST + \
+ IPA_MEM_v2_5_RAM_APPS_V4_FLT_SIZE)
+#define IPA_MEM_v2_5_RAM_APPS_V6_FLT_SIZE 0
+
+/* filtering rule is 4B aligned */
+#if (IPA_MEM_v2_5_RAM_APPS_V6_FLT_OFST & 3)
+#error filtering rule is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_END_OFST (IPA_MEM_v2_5_RAM_APPS_V6_FLT_OFST + \
+ IPA_MEM_v2_5_RAM_APPS_V6_FLT_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_5_RAM_APPS_V4_RT_OFST IPA_MEM_v2_5_RAM_END_OFST
+#define IPA_MEM_v2_5_RAM_APPS_V4_RT_SIZE 0
+#define IPA_MEM_v2_5_RAM_APPS_V6_RT_OFST IPA_MEM_v2_5_RAM_END_OFST
+#define IPA_MEM_v2_5_RAM_APPS_V6_RT_SIZE 0
+#define IPA_MEM_v2_5_RAM_HDR_SIZE_DDR 2048
+
+/*
+ * IPA v2.6Lite SRAM memory layout:
+ * +----------------+
+ * | UC INFO |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | V4 FLT HDR |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | V6 FLT HDR |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | V4 RT HDR |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | V6 RT HDR |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | MODEM HDR |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | COMP / DECOMP |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ * | MODEM MEM |
+ * +----------------+
+ * | CANARY |
+ * +----------------+
+ */
+
+#define IPA_MEM_v2_6L_RAM_UC_MEM_SIZE 128
+#define IPA_MEM_v2_6L_RAM_UC_INFO_OFST IPA_MEM_v2_6L_RAM_UC_MEM_SIZE
+#define IPA_MEM_v2_6L_RAM_UC_INFO_SIZE 512
+
+/* uC info 4B aligned */
+#if (IPA_MEM_v2_6L_RAM_UC_INFO_OFST & 3)
+#error uC info is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_OFST_START (IPA_MEM_v2_6L_RAM_UC_INFO_OFST + \
+ IPA_MEM_v2_6L_RAM_UC_INFO_SIZE)
+
+#define IPA_MEM_v2_6L_RAM_V4_FLT_OFST (IPA_MEM_v2_6L_RAM_OFST_START + \
+ 2 * IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_6L_RAM_V4_FLT_SIZE 88
+
+/* V4 filtering header table is 8B aligned */
+#if (IPA_MEM_v2_6L_RAM_V4_FLT_OFST & 7)
+#error V4 filtering header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_V6_FLT_OFST (IPA_MEM_v2_6L_RAM_V4_FLT_OFST + \
+ IPA_MEM_v2_6L_RAM_V4_FLT_SIZE + 2 * IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_6L_RAM_V6_FLT_SIZE 88
+
+/* V6 filtering header table is 8B aligned */
+#if (IPA_MEM_v2_6L_RAM_V6_FLT_OFST & 7)
+#error V6 filtering header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_V4_RT_OFST (IPA_MEM_v2_6L_RAM_V6_FLT_OFST + \
+ IPA_MEM_v2_6L_RAM_V6_FLT_SIZE + 2 * IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_6L_RAM_V4_NUM_INDEX 15
+#define IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_LO 0
+#define IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_HI 6
+#define IPA_MEM_v2_6L_V4_APPS_RT_INDEX_LO \
+ (IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_HI + 1)
+#define IPA_MEM_v2_6L_V4_APPS_RT_INDEX_HI \
+ (IPA_MEM_v2_6L_RAM_V4_NUM_INDEX - 1)
+#define IPA_MEM_v2_6L_RAM_V4_RT_SIZE (IPA_MEM_v2_6L_RAM_V4_NUM_INDEX * 4)
+
+/* V4 routing header table is 8B aligned */
+#if (IPA_MEM_v2_6L_RAM_V4_RT_OFST & 7)
+#error V4 routing header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_V6_RT_OFST (IPA_MEM_v2_6L_RAM_V4_RT_OFST + \
+ IPA_MEM_v2_6L_RAM_V4_RT_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_6L_RAM_V6_NUM_INDEX 15
+#define IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_LO 0
+#define IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_HI 6
+#define IPA_MEM_v2_6L_V6_APPS_RT_INDEX_LO \
+ (IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_HI + 1)
+#define IPA_MEM_v2_6L_V6_APPS_RT_INDEX_HI \
+ (IPA_MEM_v2_6L_RAM_V6_NUM_INDEX - 1)
+#define IPA_MEM_v2_6L_RAM_V6_RT_SIZE (IPA_MEM_v2_6L_RAM_V6_NUM_INDEX * 4)
+
+/* V6 routing header table is 8B aligned */
+#if (IPA_MEM_v2_6L_RAM_V6_RT_OFST & 7)
+#error V6 routing header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_MODEM_HDR_OFST (IPA_MEM_v2_6L_RAM_V6_RT_OFST + \
+ IPA_MEM_v2_6L_RAM_V6_RT_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_6L_RAM_MODEM_HDR_SIZE 320
+
+/* header table is 8B aligned */
+#if (IPA_MEM_v2_6L_RAM_MODEM_HDR_OFST & 7)
+#error header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_APPS_HDR_OFST (IPA_MEM_v2_6L_RAM_MODEM_HDR_OFST + \
+ IPA_MEM_v2_6L_RAM_MODEM_HDR_SIZE)
+#define IPA_MEM_v2_6L_RAM_APPS_HDR_SIZE 0
+
+/* header table is 8B aligned */
+#if (IPA_MEM_v2_6L_RAM_APPS_HDR_OFST & 7)
+#error header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_OFST \
+ (IPA_MEM_v2_6L_RAM_APPS_HDR_OFST + IPA_MEM_v2_6L_RAM_APPS_HDR_SIZE + \
+ 2 * IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_SIZE 512
+
+/* comp/decomp memory region is 8B aligned */
+#if (IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_OFST & 7)
+#error header processing context table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_MODEM_OFST \
+ (IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_OFST + \
+ IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_6L_RAM_MODEM_SIZE 6376
+
+/* modem memory is 4B aligned */
+#if (IPA_MEM_v2_6L_RAM_MODEM_OFST & 3)
+#error modem memory is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_APPS_V4_FLT_OFST (IPA_MEM_v2_6L_RAM_MODEM_OFST + \
+ IPA_MEM_v2_6L_RAM_MODEM_SIZE)
+#define IPA_MEM_v2_6L_RAM_APPS_V4_FLT_SIZE 0
+
+/* filtering rule is 4B aligned */
+#if (IPA_MEM_v2_6L_RAM_APPS_V4_FLT_OFST & 3)
+#error filtering rule is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_APPS_V6_FLT_OFST \
+ (IPA_MEM_v2_6L_RAM_APPS_V4_FLT_OFST + \
+ IPA_MEM_v2_6L_RAM_APPS_V4_FLT_SIZE)
+#define IPA_MEM_v2_6L_RAM_APPS_V6_FLT_SIZE 0
+
+/* filtering rule is 4B aligned */
+#if (IPA_MEM_v2_6L_RAM_APPS_V6_FLT_OFST & 3)
+#error filtering rule is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_END_OFST (IPA_MEM_v2_6L_RAM_APPS_V6_FLT_OFST + \
+ IPA_MEM_v2_6L_RAM_APPS_V6_FLT_SIZE + IPA_MEM_CANARY_SIZE)
+
+#define IPA_MEM_v2_6L_RAM_APPS_V4_RT_OFST IPA_MEM_v2_6L_RAM_END_OFST
+#define IPA_MEM_v2_6L_RAM_APPS_V4_RT_SIZE 0
+#define IPA_MEM_v2_6L_RAM_APPS_V6_RT_OFST IPA_MEM_v2_6L_RAM_END_OFST
+#define IPA_MEM_v2_6L_RAM_APPS_V6_RT_SIZE 0
+#define IPA_MEM_v2_6L_RAM_HDR_SIZE_DDR 2048
+
+#endif /* _IPA_RAM_MMAP_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_reg.h b/drivers/platform/msm/ipa/ipa_v2/ipa_reg.h
new file mode 100644
index 0000000..6487a2f
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_reg.h
@@ -0,0 +1,319 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __IPA_REG_H__
+#define __IPA_REG_H__
+
+/*
+ * IPA's BAM specific registers
+ * Used for IPA HW 1.0 only
+ */
+
+#define IPA_BAM_REG_BASE_OFST 0x00004000
+#define IPA_BAM_CNFG_BITS_OFST 0x7c
+#define IPA_BAM_REMAP_SIZE (0x1000)
+
+#define IPA_FILTER_FILTER_EN_BMSK 0x1
+#define IPA_FILTER_FILTER_EN_SHFT 0x0
+#define IPA_AGGREGATION_SPARE_REG_2_OFST 0x00002094
+#define IPA_AGGREGATION_QCNCM_SIG0_SHFT 16
+#define IPA_AGGREGATION_QCNCM_SIG1_SHFT 8
+
+#define IPA_AGGREGATION_SPARE_REG_1_OFST 0x00002090
+#define IPA_AGGREGATION_SPARE_REG_2_OFST 0x00002094
+
+#define IPA_AGGREGATION_SINGLE_NDP_MSK 0x1
+#define IPA_AGGREGATION_SINGLE_NDP_BMSK 0xfffffffe
+
+#define IPA_AGGREGATION_MODE_MSK 0x1
+#define IPA_AGGREGATION_MODE_SHFT 31
+#define IPA_AGGREGATION_MODE_BMSK 0x7fffffff
+
+#define IPA_AGGREGATION_QCNCM_SIG_BMSK 0xff000000
+
+#define IPA_FILTER_FILTER_EN_BMSK 0x1
+#define IPA_FILTER_FILTER_EN_SHFT 0x0
+
+#define IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_SHFT 2
+#define IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_BMSK 0x4
+
+#define IPA_HEAD_OF_LINE_BLOCK_EN_OFST 0x00000044
+
+/*
+ * End of IPA 1.0 Registers
+ */
+
+
+/*
+ * IPA HW 2.0 Registers
+ */
+#define IPA_REG_BASE 0x0
+
+#define IPA_IRQ_STTS_EE_n_ADDR(n) (IPA_REG_BASE + 0x00001008 + 0x1000 * (n))
+#define IPA_IRQ_STTS_EE_n_MAXn 3
+
+#define IPA_IRQ_EN_EE_n_ADDR(n) (IPA_REG_BASE + 0x0000100c + 0x1000 * (n))
+#define IPA_IRQ_EN_EE_n_MAXn 3
+
+
+#define IPA_IRQ_CLR_EE_n_ADDR(n) (IPA_REG_BASE + 0x00001010 + 0x1000 * (n))
+#define IPA_IRQ_CLR_EE_n_MAXn 3
+
+#define IPA_IRQ_SUSPEND_INFO_EE_n_ADDR(n) \
+ (IPA_REG_BASE + 0x00001098 + 0x1000 * (n))
+#define IPA_IRQ_SUSPEND_INFO_EE_n_MAXn 3
+/*
+ * End of IPA 2.0 Registers
+ */
+
+/*
+ * IPA HW 2.5 Registers
+ */
+#define IPA_BCR_OFST 0x000005B0
+#define IPA_COUNTER_CFG_OFST 0x000005E8
+#define IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK 0xF
+#define IPA_COUNTER_CFG_EOT_COAL_GRAN_SHFT 0x0
+#define IPA_COUNTER_CFG_AGGR_GRAN_BMSK 0x1F0
+#define IPA_COUNTER_CFG_AGGR_GRAN_SHFT 0x4
+ /*
+ * End of IPA 2.5 Registers
+ */
+
+/*
+ * IPA HW 2.6/2.6L Registers
+ */
+#define IPA_ENABLED_PIPES_OFST 0x000005DC
+#define IPA_YELLOW_MARKER_SYS_CFG_OFST 0x00000728
+/*
+ * End of IPA 2.6/2.6L Registers
+ */
+
+/*
+ * Common Registers
+ */
+#define IPA_REG_BASE_OFST_v2_0 0x00020000
+#define IPA_REG_BASE_OFST_v2_5 0x00040000
+#define IPA_REG_BASE_OFST_v2_6L IPA_REG_BASE_OFST_v2_5
+#define IPA_COMP_SW_RESET_OFST 0x0000003c
+
+#define IPA_VERSION_OFST 0x00000034
+#define IPA_COMP_HW_VERSION_OFST 0x00000030
+
+#define IPA_SHARED_MEM_SIZE_OFST_v1_1 0x00000050
+#define IPA_SHARED_MEM_SIZE_OFST_v2_0 0x00000050
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0 0xffff0000
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0 0x10
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0 0xffff
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0 0x0
+
+#define IPA_ENDP_INIT_AGGR_N_OFST_v1_1(n) (0x000001c0 + 0x4 * (n))
+#define IPA_ENDP_INIT_AGGR_N_OFST_v2_0(n) (0x00000320 + 0x4 * (n))
+
+#define IPA_ENDP_INIT_ROUTE_N_OFST_v1_1(n) (0x00000220 + 0x4 * (n))
+#define IPA_ENDP_INIT_ROUTE_N_OFST_v2_0(n) (0x00000370 + 0x4 * (n))
+#define IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK 0x1f
+#define IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT 0x0
+
+#define IPA_ROUTE_OFST_v1_1 0x00000044
+
+#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0
+#define IPA_ROUTE_ROUTE_DIS_BMSK 0x1
+#define IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1
+#define IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80
+#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK 0x3e0000
+#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT 0x11
+
+#define IPA_FILTER_OFST_v1_1 0x00000048
+
+#define IPA_SRAM_DIRECT_ACCESS_N_OFST_v1_1(n) (0x00004000 + 0x4 * (n))
+#define IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(n) (0x00005000 + 0x4 * (n))
+#define IPA_SRAM_DIRECT_ACCESS_N_OFST(n) (0x00004000 + 0x4 * (n))
+#define IPA_SRAM_SW_FIRST_v2_5 0x00005000
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0x40
+#define IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT 0x0
+#define IPA_COMP_CFG_OFST 0x00000038
+
+#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK 0x1
+#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT 0x16
+#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK 0x200000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT 0x15
+#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK 0x1f8000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT 0xf
+#define IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK 0x7c00
+#define IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT 0xa
+#define IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK 0x3e0
+#define IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT 0x5
+#define IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK 0x1c
+#define IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT 0x2
+#define IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK 0x3
+#define IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT 0x0
+
+#define IPA_ENDP_INIT_MODE_N_OFST_v1_1(n) (0x00000170 + 0x4 * (n))
+#define IPA_ENDP_INIT_MODE_N_OFST_v2_0(n) (0x000002c0 + 0x4 * (n))
+#define IPA_ENDP_INIT_MODE_N_RMSK 0x7f
+#define IPA_ENDP_INIT_MODE_N_MAX 19
+#define IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v1_1 0x7c
+#define IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v1_1 0x2
+#define IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v2_0 0x1f0
+#define IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v2_0 0x4
+#define IPA_ENDP_INIT_MODE_N_MODE_BMSK 0x7
+#define IPA_ENDP_INIT_MODE_N_MODE_SHFT 0x0
+
+#define IPA_ENDP_INIT_HDR_N_OFST_v1_1(n) (0x00000120 + 0x4 * (n))
+#define IPA_ENDP_INIT_HDR_N_OFST_v2_0(n) (0x00000170 + 0x4 * (n))
+#define IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK 0x3f
+#define IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT 0x0
+#define IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000
+#define IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd
+#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK 0x3f00000
+#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT 0x14
+#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000
+#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13
+#define IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_BMSK_v2 0x10000000
+#define IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_SHFT_v2 0x1c
+#define IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_BMSK_v2 0x8000000
+#define IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_SHFT_v2 0x1b
+#define IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK 0x4000000
+#define IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT 0x1a
+#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK 0x40
+#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT 0x6
+#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT 0x7
+#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK 0x1f80
+
+#define IPA_ENDP_INIT_NAT_N_OFST_v1_1(n) (0x000000c0 + 0x4 * (n))
+#define IPA_ENDP_INIT_NAT_N_OFST_v2_0(n) (0x00000120 + 0x4 * (n))
+#define IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK 0x3
+#define IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT 0x0
+
+
+#define IPA_ENDP_INIT_HDR_EXT_n_OFST_v2_0(n) (0x000001c0 + 0x4 * (n))
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK 0x1
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT 0x0
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK 0x2
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT 0x1
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK 0x4
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT 0x2
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK 0x8
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT 0x3
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK 0x3f0
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT 0x4
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_0 0x1c00
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT 0xa
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_5 0x3c00
+
+
+
+/*
+ * IPA HW 1.1 specific Registers
+ */
+
+#define IPA_FILTER_FILTER_DIS_BMSK 0x1
+#define IPA_FILTER_FILTER_DIS_SHFT 0x0
+#define IPA_SINGLE_NDP_MODE_OFST 0x00000064
+#define IPA_QCNCM_OFST 0x00000060
+
+#define IPA_ENDP_INIT_CTRL_N_OFST(n) (0x00000070 + 0x4 * (n))
+#define IPA_ENDP_INIT_CTRL_N_RMSK 0x1
+#define IPA_ENDP_INIT_CTRL_N_MAX 19
+#define IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_BMSK 0x1
+#define IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_SHFT 0x0
+#define IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK 0x2
+#define IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT 0x1
+
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v1_1(n) (0x00000270 + 0x4 * (n))
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(n) (0x000003c0 + 0x4 * (n))
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_RMSK 0x1
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_MAX 19
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_BMSK 0x1
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_SHFT 0x0
+
+#define IPA_ENDP_INIT_DEAGGR_n_OFST_v2_0(n) (0x00000470 + 0x04 * (n))
+#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK 0x3F
+#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT 0x0
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK 0x40
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT 0x6
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK 0x3F00
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT 0x8
+#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK 0xFFFF0000
+#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT 0x10
+
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v1_1(n) (0x000002c0 + 0x4 * (n))
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(n) (0x00000420 + 0x4 * (n))
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_RMSK 0x1ff
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_MAX 19
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_BMSK 0x1ff
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_SHFT 0x0
+
+#define IPA_DEBUG_CNT_REG_N_OFST_v1_1(n) (0x00000340 + 0x4 * (n))
+#define IPA_DEBUG_CNT_REG_N_OFST_v2_0(n) (0x00000600 + 0x4 * (n))
+#define IPA_DEBUG_CNT_REG_N_RMSK 0xffffffff
+#define IPA_DEBUG_CNT_REG_N_MAX 15
+#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_BMSK 0xffffffff
+#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_SHFT 0x0
+
+#define IPA_DEBUG_CNT_CTRL_N_OFST_v1_1(n) (0x00000380 + 0x4 * (n))
+#define IPA_DEBUG_CNT_CTRL_N_OFST_v2_0(n) (0x00000640 + 0x4 * (n))
+#define IPA_DEBUG_CNT_CTRL_N_RMSK 0x1ff1f171
+#define IPA_DEBUG_CNT_CTRL_N_MAX 15
+#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_RULE_INDEX_BMSK 0x1ff00000
+#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_RULE_INDEX_SHFT 0x14
+#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_SOURCE_PIPE_BMSK 0x1f000
+#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_SOURCE_PIPE_SHFT 0xc
+#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_PRODUCT_BMSK 0x100
+#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_PRODUCT_SHFT 0x8
+#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_TYPE_BMSK 0x70
+#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_TYPE_SHFT 0x4
+#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_EN_BMSK 0x1
+#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_EN_SHFT 0x0
+
+#define IPA_ENDP_STATUS_n_OFST(n) (0x000004c0 + 0x4 * (n))
+#define IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK 0x3e
+#define IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT 0x1
+#define IPA_ENDP_STATUS_n_STATUS_EN_BMSK 0x1
+#define IPA_ENDP_STATUS_n_STATUS_EN_SHFT 0x0
+
+#define IPA_ENDP_INIT_CFG_n_OFST(n) (0x000000c0 + 0x4 * (n))
+#define IPA_ENDP_INIT_CFG_n_RMSK 0x7f
+#define IPA_ENDP_INIT_CFG_n_MAXn 19
+#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK 0x78
+#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT 0x3
+#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK 0x6
+#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT 0x1
+#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK 0x1
+#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT 0x0
+
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_OFST(n) (0x00000220 + 0x4 * (n))
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_RMSK 0xffffffff
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_MAXn 19
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK 0xffffffff
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT 0x0
+
+#define IPA_ENDP_INIT_HDR_METADATA_n_OFST(n) (0x00000270 + 0x4 * (n))
+#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK 0xFF0000
+#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT 0x10
+
+#define IPA_IRQ_EE_UC_n_OFFS(n) (0x0000101c + 0x1000 * (n))
+#define IPA_IRQ_EE_UC_n_RMSK 0x1
+#define IPA_IRQ_EE_UC_n_MAXn 3
+#define IPA_IRQ_EE_UC_n_INT_BMSK 0x1
+#define IPA_IRQ_EE_UC_n_INT_SHFT 0x0
+
+#define IPA_UC_MAILBOX_m_n_OFFS(m, n) (0x0001a000 + 0x80 * (m) + 0x4 * (n))
+#define IPA_UC_MAILBOX_m_n_OFFS_v2_5(m, n) (0x00022000 + 0x80 * (m) + 0x4 * (n))
+
+#define IPA_SYS_PKT_PROC_CNTXT_BASE_OFST (0x000005d8)
+#define IPA_LOCAL_PKT_PROC_CNTXT_BASE_OFST (0x000005e0)
+
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
new file mode 100644
index 0000000..164e94b
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -0,0 +1,1457 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include "ipa_i.h"
+
+#define IPA_RT_TABLE_INDEX_NOT_FOUND (-1)
+#define IPA_RT_TABLE_WORD_SIZE (4)
+#define IPA_RT_INDEX_BITMAP_SIZE (32)
+#define IPA_RT_TABLE_MEMORY_ALLIGNMENT (127)
+#define IPA_RT_ENTRY_MEMORY_ALLIGNMENT (3)
+#define IPA_RT_BIT_MASK (0x1)
+#define IPA_RT_STATUS_OF_ADD_FAILED (-1)
+#define IPA_RT_STATUS_OF_DEL_FAILED (-1)
+#define IPA_RT_STATUS_OF_MDFY_FAILED (-1)
+
+/**
+ * __ipa_generate_rt_hw_rule_v2() - generates the routing hardware rule
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means
+ * caller wants to know the size of the rule as seen
+ * by HW so they did not pass a valid buffer, we will use a
+ * scratch buffer instead.
+ * With this scheme we are going to
+ * generate the rule twice, once to know size using scratch
+ * buffer and second to write the rule to the actual caller
+ * supplied buffer which is of required size
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+int __ipa_generate_rt_hw_rule_v2(enum ipa_ip_type ip,
+ struct ipa_rt_entry *entry, u8 *buf)
+{
+ struct ipa_rt_rule_hw_hdr *rule_hdr;
+ const struct ipa_rt_rule *rule =
+ (const struct ipa_rt_rule *)&entry->rule;
+ u16 en_rule = 0;
+ u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4];
+ u8 *start;
+ int pipe_idx;
+
+ if (buf == NULL) {
+ memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
+ buf = (u8 *)tmp;
+ }
+
+ start = buf;
+ rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf;
+ pipe_idx = ipa2_get_ep_mapping(entry->rule.dst);
+ if (pipe_idx == -1) {
+ IPAERR("Wrong destination pipe specified in RT rule\n");
+ WARN_ON(1);
+ return -EPERM;
+ }
+ if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) {
+ IPAERR("No RT rule on IPA_client_producer pipe.\n");
+ IPAERR("pipe_idx: %d dst_pipe: %d\n",
+ pipe_idx, entry->rule.dst);
+ WARN_ON(1);
+ return -EPERM;
+ }
+ rule_hdr->u.hdr.pipe_dest_idx = pipe_idx;
+ rule_hdr->u.hdr.system = !ipa_ctx->hdr_tbl_lcl;
+ if (entry->hdr) {
+ rule_hdr->u.hdr.hdr_offset =
+ entry->hdr->offset_entry->offset >> 2;
+ } else {
+ rule_hdr->u.hdr.hdr_offset = 0;
+ }
+ buf += sizeof(struct ipa_rt_rule_hw_hdr);
+
+ if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
+ IPAERR("fail to generate hw rule\n");
+ return -EPERM;
+ }
+
+ IPADBG("en_rule 0x%x\n", en_rule);
+
+ rule_hdr->u.hdr.en_rule = en_rule;
+ ipa_write_32(rule_hdr->u.word, (u8 *)rule_hdr);
+
+ if (entry->hw_len == 0) {
+ entry->hw_len = buf - start;
+ } else if (entry->hw_len != (buf - start)) {
+ IPAERR(
+ "hw_len differs b/w passes passed=0x%x calc=0x%xtd\n",
+ entry->hw_len,
+ (buf - start));
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+/**
+ * __ipa_generate_rt_hw_rule_v2_5() - generates the routing hardware rule
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means
+ * caller wants to know the size of the rule as seen
+ * by HW so they did not pass a valid buffer, we will use a
+ * scratch buffer instead.
+ * With this scheme we are going to
+ * generate the rule twice, once to know size using scratch
+ * buffer and second to write the rule to the actual caller
+ * supplied buffer which is of required size
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+int __ipa_generate_rt_hw_rule_v2_5(enum ipa_ip_type ip,
+ struct ipa_rt_entry *entry, u8 *buf)
+{
+ struct ipa_rt_rule_hw_hdr *rule_hdr;
+ const struct ipa_rt_rule *rule =
+ (const struct ipa_rt_rule *)&entry->rule;
+ u16 en_rule = 0;
+ u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4];
+ u8 *start;
+ int pipe_idx;
+
+ if (buf == NULL) {
+ memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
+ buf = (u8 *)tmp;
+ }
+
+ start = buf;
+ rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf;
+ pipe_idx = ipa2_get_ep_mapping(entry->rule.dst);
+ if (pipe_idx == -1) {
+ IPAERR("Wrong destination pipe specified in RT rule\n");
+ WARN_ON(1);
+ return -EPERM;
+ }
+ if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) {
+ IPAERR("No RT rule on IPA_client_producer pipe.\n");
+ IPAERR("pipe_idx: %d dst_pipe: %d\n",
+ pipe_idx, entry->rule.dst);
+ WARN_ON(1);
+ return -EPERM;
+ }
+ rule_hdr->u.hdr_v2_5.pipe_dest_idx = pipe_idx;
+ if (entry->proc_ctx || (entry->hdr && entry->hdr->is_hdr_proc_ctx)) {
+ struct ipa_hdr_proc_ctx_entry *proc_ctx;
+
+ proc_ctx = (entry->proc_ctx) ? : entry->hdr->proc_ctx;
+ rule_hdr->u.hdr_v2_5.system = !ipa_ctx->hdr_proc_ctx_tbl_lcl;
+ BUG_ON(proc_ctx->offset_entry->offset & 31);
+ rule_hdr->u.hdr_v2_5.proc_ctx = 1;
+ rule_hdr->u.hdr_v2_5.hdr_offset =
+ (proc_ctx->offset_entry->offset +
+ ipa_ctx->hdr_proc_ctx_tbl.start_offset) >> 5;
+ } else if (entry->hdr) {
+ rule_hdr->u.hdr_v2_5.system = !ipa_ctx->hdr_tbl_lcl;
+ BUG_ON(entry->hdr->offset_entry->offset & 3);
+ rule_hdr->u.hdr_v2_5.proc_ctx = 0;
+ rule_hdr->u.hdr_v2_5.hdr_offset =
+ entry->hdr->offset_entry->offset >> 2;
+ } else {
+ rule_hdr->u.hdr_v2_5.proc_ctx = 0;
+ rule_hdr->u.hdr_v2_5.hdr_offset = 0;
+ }
+ buf += sizeof(struct ipa_rt_rule_hw_hdr);
+
+ if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
+ IPAERR("fail to generate hw rule\n");
+ return -EPERM;
+ }
+
+ IPADBG("en_rule 0x%x\n", en_rule);
+
+ rule_hdr->u.hdr_v2_5.en_rule = en_rule;
+ ipa_write_32(rule_hdr->u.word, (u8 *)rule_hdr);
+
+ if (entry->hw_len == 0) {
+ entry->hw_len = buf - start;
+ } else if (entry->hw_len != (buf - start)) {
+ IPAERR("hw_len differs b/w passes passed=0x%x calc=0x%xtd\n",
+ entry->hw_len, (buf - start));
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+/**
+ * __ipa_generate_rt_hw_rule_v2_6L() - generates the routing hardware rule
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means that the caller wants to know the size
+ * of the rule as seen by HW so they did not pass a valid buffer, we will
+ * use a scratch buffer instead.
+ * With this scheme we are going to generate the rule twice, once to know
+ * size using scratch buffer and second to write the rule to the actual
+ * caller supplied buffer which is of required size.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+int __ipa_generate_rt_hw_rule_v2_6L(enum ipa_ip_type ip,
+ struct ipa_rt_entry *entry, u8 *buf)
+{
+ /* Same implementation as IPAv2 */
+ return __ipa_generate_rt_hw_rule_v2(ip, entry, buf);
+}
+
+/**
+ * ipa_get_rt_hw_tbl_size() - returns the size of HW routing table
+ * @ip: the ip address family type
+ * @hdr_sz: header size
+ * @max_rt_idx: maximal index
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ * the MSB set in rt_idx_bitmap indicates the size of hdr of routing tbl
+ */
+static int ipa_get_rt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz,
+ int *max_rt_idx)
+{
+ struct ipa_rt_tbl_set *set;
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_entry *entry;
+ u32 total_sz = 0;
+ u32 tbl_sz;
+ u32 bitmap = ipa_ctx->rt_idx_bitmap[ip];
+ int highest_bit_set = IPA_RT_TABLE_INDEX_NOT_FOUND;
+ int i;
+ int res;
+
+ *hdr_sz = 0;
+ set = &ipa_ctx->rt_tbl_set[ip];
+
+ for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
+ if (bitmap & IPA_RT_BIT_MASK)
+ highest_bit_set = i;
+ bitmap >>= 1;
+ }
+
+ *max_rt_idx = highest_bit_set;
+ if (highest_bit_set == IPA_RT_TABLE_INDEX_NOT_FOUND) {
+ IPAERR("no rt tbls present\n");
+ total_sz = IPA_RT_TABLE_WORD_SIZE;
+ *hdr_sz = IPA_RT_TABLE_WORD_SIZE;
+ return total_sz;
+ }
+
+ *hdr_sz = (highest_bit_set + 1) * IPA_RT_TABLE_WORD_SIZE;
+ total_sz += *hdr_sz;
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ tbl_sz = 0;
+ list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+ res = ipa_ctx->ctrl->ipa_generate_rt_hw_rule(
+ ip,
+ entry,
+ NULL);
+ if (res) {
+ IPAERR("failed to find HW RT rule size\n");
+ return -EPERM;
+ }
+ tbl_sz += entry->hw_len;
+ }
+
+ if (tbl_sz)
+ tbl->sz = tbl_sz + IPA_RT_TABLE_WORD_SIZE;
+
+ if (tbl->in_sys)
+ continue;
+
+ if (tbl_sz) {
+ /* add the terminator */
+ total_sz += (tbl_sz + IPA_RT_TABLE_WORD_SIZE);
+ /* every rule-set should start at word boundary */
+ total_sz = (total_sz + IPA_RT_ENTRY_MEMORY_ALLIGNMENT) &
+ ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
+ }
+ }
+
+ IPADBG("RT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip);
+
+ return total_sz;
+}
+
+static int ipa_generate_rt_hw_tbl_common(enum ipa_ip_type ip, u8 *base, u8 *hdr,
+ u32 body_ofst, u32 apps_start_idx)
+{
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_entry *entry;
+ struct ipa_rt_tbl_set *set;
+ u32 offset;
+ u8 *body;
+ struct ipa_mem_buffer rt_tbl_mem;
+ u8 *rt_tbl_mem_body;
+ int res;
+
+ /* build the rt tbl in the DMA buffer to submit to IPA HW */
+ body = base;
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ if (!tbl->in_sys) {
+ offset = body - base + body_ofst;
+ if (offset & IPA_RT_ENTRY_MEMORY_ALLIGNMENT) {
+ IPAERR("offset is not word multiple %d\n",
+ offset);
+ goto proc_err;
+ }
+
+ /* convert offset to words from bytes */
+ offset &= ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
+ /* rule is at an offset from base */
+ offset |= IPA_RT_BIT_MASK;
+
+ /* update the hdr at the right index */
+ ipa_write_32(offset, hdr +
+ ((tbl->idx - apps_start_idx) *
+ IPA_RT_TABLE_WORD_SIZE));
+
+ /* generate the rule-set */
+ list_for_each_entry(entry, &tbl->head_rt_rule_list,
+ link) {
+ res = ipa_ctx->ctrl->ipa_generate_rt_hw_rule(
+ ip,
+ entry,
+ body);
+ if (res) {
+ IPAERR("failed to gen HW RT rule\n");
+ goto proc_err;
+ }
+ body += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ body = ipa_write_32(0, body);
+ if ((long)body & IPA_RT_ENTRY_MEMORY_ALLIGNMENT)
+ /* advance body to next word boundary */
+ body = body + (IPA_RT_TABLE_WORD_SIZE -
+ ((long)body &
+ IPA_RT_ENTRY_MEMORY_ALLIGNMENT));
+ } else {
+ WARN_ON(tbl->sz == 0);
+ /* allocate memory for the RT tbl */
+ rt_tbl_mem.size = tbl->sz;
+ rt_tbl_mem.base =
+ dma_alloc_coherent(ipa_ctx->pdev, rt_tbl_mem.size,
+ &rt_tbl_mem.phys_base, GFP_KERNEL);
+ if (!rt_tbl_mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n",
+ rt_tbl_mem.size);
+ WARN_ON(1);
+ goto proc_err;
+ }
+
+ WARN_ON(rt_tbl_mem.phys_base &
+ IPA_RT_ENTRY_MEMORY_ALLIGNMENT);
+ rt_tbl_mem_body = rt_tbl_mem.base;
+ memset(rt_tbl_mem.base, 0, rt_tbl_mem.size);
+ /* update the hdr at the right index */
+ ipa_write_32(rt_tbl_mem.phys_base,
+ hdr + ((tbl->idx - apps_start_idx) *
+ IPA_RT_TABLE_WORD_SIZE));
+ /* generate the rule-set */
+ list_for_each_entry(entry, &tbl->head_rt_rule_list,
+ link) {
+ res = ipa_ctx->ctrl->ipa_generate_rt_hw_rule(
+ ip,
+ entry,
+ rt_tbl_mem_body);
+ if (res) {
+ IPAERR("failed to gen HW RT rule\n");
+ WARN_ON(1);
+ goto rt_table_mem_alloc_failed;
+ }
+ rt_tbl_mem_body += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ rt_tbl_mem_body = ipa_write_32(0, rt_tbl_mem_body);
+
+ if (tbl->curr_mem.phys_base) {
+ WARN_ON(tbl->prev_mem.phys_base);
+ tbl->prev_mem = tbl->curr_mem;
+ }
+ tbl->curr_mem = rt_tbl_mem;
+ }
+ }
+
+ return 0;
+
+rt_table_mem_alloc_failed:
+ dma_free_coherent(ipa_ctx->pdev, rt_tbl_mem.size,
+ rt_tbl_mem.base, rt_tbl_mem.phys_base);
+proc_err:
+ return -EPERM;
+}
+
+
+/**
+ * ipa_generate_rt_hw_tbl() - generates the routing hardware table
+ * @ip: [in] the ip address family type
+ * @mem: [out] buffer to put the filtering table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+static int ipa_generate_rt_hw_tbl_v1_1(enum ipa_ip_type ip,
+ struct ipa_mem_buffer *mem)
+{
+ u32 hdr_sz;
+ u8 *hdr;
+ u8 *body;
+ u8 *base;
+ int max_rt_idx;
+ int i;
+
+ mem->size = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx);
+ mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) &
+ ~IPA_RT_TABLE_MEMORY_ALLIGNMENT;
+
+ if (mem->size == 0) {
+ IPAERR("rt tbl empty ip=%d\n", ip);
+ goto error;
+ }
+ mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
+ &mem->phys_base, GFP_KERNEL);
+ if (!mem->base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+ goto error;
+ }
+
+ memset(mem->base, 0, mem->size);
+
+ /* build the rt tbl in the DMA buffer to submit to IPA HW */
+ base = hdr = (u8 *)mem->base;
+ body = base + hdr_sz;
+
+ /* setup all indices to point to the empty sys rt tbl */
+ for (i = 0; i <= max_rt_idx; i++)
+ ipa_write_32(ipa_ctx->empty_rt_tbl_mem.phys_base,
+ hdr + (i * IPA_RT_TABLE_WORD_SIZE));
+
+ if (ipa_generate_rt_hw_tbl_common(ip, base, hdr, 0, 0)) {
+ IPAERR("fail to generate RT tbl\n");
+ goto proc_err;
+ }
+
+ return 0;
+
+proc_err:
+ dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base);
+ mem->base = NULL;
+error:
+ return -EPERM;
+}
+
+static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
+{
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_tbl *next;
+ struct ipa_rt_tbl_set *set;
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ if (tbl->prev_mem.phys_base) {
+ IPADBG("reaping rt tbl name=%s ip=%d\n", tbl->name, ip);
+ dma_free_coherent(ipa_ctx->pdev, tbl->prev_mem.size,
+ tbl->prev_mem.base,
+ tbl->prev_mem.phys_base);
+ memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
+ }
+ }
+
+ set = &ipa_ctx->reap_rt_tbl_set[ip];
+ list_for_each_entry_safe(tbl, next, &set->head_rt_tbl_list, link) {
+ list_del(&tbl->link);
+ WARN_ON(tbl->prev_mem.phys_base != 0);
+ if (tbl->curr_mem.phys_base) {
+ IPADBG("reaping sys rt tbl name=%s ip=%d\n", tbl->name,
+ ip);
+ dma_free_coherent(ipa_ctx->pdev, tbl->curr_mem.size,
+ tbl->curr_mem.base,
+ tbl->curr_mem.phys_base);
+ kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
+ }
+ }
+}
+
+int __ipa_commit_rt_v1_1(enum ipa_ip_type ip)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_mem_buffer *mem;
+ void *cmd;
+ struct ipa_ip_v4_routing_init *v4;
+ struct ipa_ip_v6_routing_init *v6;
+ u16 avail;
+ u16 size;
+
+ mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
+ if (!mem) {
+ IPAERR("failed to alloc memory object\n");
+ goto fail_alloc_mem;
+ }
+
+ if (ip == IPA_IP_v4) {
+ avail = ipa_ctx->ip4_rt_tbl_lcl ? IPA_MEM_v1_RAM_V4_RT_SIZE :
+ IPA_MEM_PART(v4_rt_size_ddr);
+ size = sizeof(struct ipa_ip_v4_routing_init);
+ } else {
+ avail = ipa_ctx->ip6_rt_tbl_lcl ? IPA_MEM_v1_RAM_V6_RT_SIZE :
+ IPA_MEM_PART(v6_rt_size_ddr);
+ size = sizeof(struct ipa_ip_v6_routing_init);
+ }
+ cmd = kmalloc(size, GFP_KERNEL);
+ if (!cmd) {
+ IPAERR("failed to alloc immediate command object\n");
+ goto fail_alloc_cmd;
+ }
+
+ if (ipa_generate_rt_hw_tbl_v1_1(ip, mem)) {
+ IPAERR("fail to generate RT HW TBL ip %d\n", ip);
+ goto fail_hw_tbl_gen;
+ }
+
+ if (mem->size > avail) {
+ IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
+ goto fail_send_cmd;
+ }
+
+ if (ip == IPA_IP_v4) {
+ v4 = (struct ipa_ip_v4_routing_init *)cmd;
+ desc.opcode = IPA_IP_V4_ROUTING_INIT;
+ v4->ipv4_rules_addr = mem->phys_base;
+ v4->size_ipv4_rules = mem->size;
+ v4->ipv4_addr = IPA_MEM_v1_RAM_V4_RT_OFST;
+ IPADBG("putting Routing IPv4 rules to phys 0x%x",
+ v4->ipv4_addr);
+ } else {
+ v6 = (struct ipa_ip_v6_routing_init *)cmd;
+ desc.opcode = IPA_IP_V6_ROUTING_INIT;
+ v6->ipv6_rules_addr = mem->phys_base;
+ v6->size_ipv6_rules = mem->size;
+ v6->ipv6_addr = IPA_MEM_v1_RAM_V6_RT_OFST;
+ IPADBG("putting Routing IPv6 rules to phys 0x%x",
+ v6->ipv6_addr);
+ }
+
+ desc.pyld = cmd;
+ desc.len = size;
+ desc.type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ goto fail_send_cmd;
+ }
+
+ __ipa_reap_sys_rt_tbls(ip);
+ dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base);
+ kfree(cmd);
+ kfree(mem);
+
+ return 0;
+
+fail_send_cmd:
+ if (mem->base)
+ dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
+ mem->phys_base);
+fail_hw_tbl_gen:
+ kfree(cmd);
+fail_alloc_cmd:
+ kfree(mem);
+fail_alloc_mem:
+ return -EPERM;
+}
+
+static int ipa_generate_rt_hw_tbl_v2(enum ipa_ip_type ip,
+ struct ipa_mem_buffer *mem, struct ipa_mem_buffer *head)
+{
+ u32 hdr_sz;
+ u8 *hdr;
+ u8 *body;
+ u8 *base;
+ int max_rt_idx;
+ int i;
+ u32 *entr;
+ int num_index;
+ u32 body_start_offset;
+ u32 apps_start_idx;
+
+ if (ip == IPA_IP_v4) {
+ num_index = IPA_MEM_PART(v4_apps_rt_index_hi) -
+ IPA_MEM_PART(v4_apps_rt_index_lo) + 1;
+ body_start_offset = IPA_MEM_PART(apps_v4_rt_ofst) -
+ IPA_MEM_PART(v4_rt_ofst);
+ apps_start_idx = IPA_MEM_PART(v4_apps_rt_index_lo);
+ } else {
+ num_index = IPA_MEM_PART(v6_apps_rt_index_hi) -
+ IPA_MEM_PART(v6_apps_rt_index_lo) + 1;
+ body_start_offset = IPA_MEM_PART(apps_v6_rt_ofst) -
+ IPA_MEM_PART(v6_rt_ofst);
+ apps_start_idx = IPA_MEM_PART(v6_apps_rt_index_lo);
+ }
+
+ head->size = num_index * 4;
+ head->base = dma_alloc_coherent(ipa_ctx->pdev, head->size,
+ &head->phys_base, GFP_KERNEL);
+ if (!head->base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", head->size);
+ goto err;
+ }
+ entr = (u32 *)head->base;
+ hdr = (u8 *)head->base;
+ for (i = 1; i <= num_index; i++) {
+ *entr = ipa_ctx->empty_rt_tbl_mem.phys_base;
+ entr++;
+ }
+
+ mem->size = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx);
+ mem->size -= hdr_sz;
+ mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) &
+ ~IPA_RT_TABLE_MEMORY_ALLIGNMENT;
+
+ if (mem->size > 0) {
+ mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
+ &mem->phys_base, GFP_KERNEL);
+ if (!mem->base) {
+ IPAERR("fail to alloc DMA buff of size %d\n",
+ mem->size);
+ goto base_err;
+ }
+ memset(mem->base, 0, mem->size);
+ }
+
+ /* build the rt tbl in the DMA buffer to submit to IPA HW */
+ body = base = (u8 *)mem->base;
+
+ if (ipa_generate_rt_hw_tbl_common(ip, base, hdr, body_start_offset,
+ apps_start_idx)) {
+ IPAERR("fail to generate RT tbl\n");
+ goto proc_err;
+ }
+
+ return 0;
+
+proc_err:
+ if (mem->size)
+ dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
+ mem->phys_base);
+base_err:
+ dma_free_coherent(ipa_ctx->pdev, head->size, head->base,
+ head->phys_base);
+err:
+ return -EPERM;
+}
+
+int __ipa_commit_rt_v2(enum ipa_ip_type ip)
+{
+ struct ipa_desc desc[2];
+ struct ipa_mem_buffer body;
+ struct ipa_mem_buffer head;
+ struct ipa_hw_imm_cmd_dma_shared_mem cmd1 = {0};
+ struct ipa_hw_imm_cmd_dma_shared_mem cmd2 = {0};
+ u16 avail;
+ u32 num_modem_rt_index;
+ int rc = 0;
+ u32 local_addr1;
+ u32 local_addr2;
+ bool lcl;
+
+ memset(desc, 0, 2 * sizeof(struct ipa_desc));
+
+ if (ip == IPA_IP_v4) {
+ avail = ipa_ctx->ip4_rt_tbl_lcl ?
+ IPA_MEM_PART(apps_v4_rt_size) :
+ IPA_MEM_PART(v4_rt_size_ddr);
+ num_modem_rt_index =
+ IPA_MEM_PART(v4_modem_rt_index_hi) -
+ IPA_MEM_PART(v4_modem_rt_index_lo) + 1;
+ local_addr1 = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v4_rt_ofst) +
+ num_modem_rt_index * 4;
+ local_addr2 = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(apps_v4_rt_ofst);
+ lcl = ipa_ctx->ip4_rt_tbl_lcl;
+ } else {
+ avail = ipa_ctx->ip6_rt_tbl_lcl ?
+ IPA_MEM_PART(apps_v6_rt_size) :
+ IPA_MEM_PART(v6_rt_size_ddr);
+ num_modem_rt_index =
+ IPA_MEM_PART(v6_modem_rt_index_hi) -
+ IPA_MEM_PART(v6_modem_rt_index_lo) + 1;
+ local_addr1 = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v6_rt_ofst) +
+ num_modem_rt_index * 4;
+ local_addr2 = ipa_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(apps_v6_rt_ofst);
+ lcl = ipa_ctx->ip6_rt_tbl_lcl;
+ }
+
+ if (ipa_generate_rt_hw_tbl_v2(ip, &body, &head)) {
+ IPAERR("fail to generate RT HW TBL ip %d\n", ip);
+ rc = -EFAULT;
+ goto fail_gen;
+ }
+
+ if (body.size > avail) {
+ IPAERR("tbl too big, needed %d avail %d\n", body.size, avail);
+ rc = -EFAULT;
+ goto fail_send_cmd;
+ }
+
+ cmd1.size = head.size;
+ cmd1.system_addr = head.phys_base;
+ cmd1.local_addr = local_addr1;
+ desc[0].opcode = IPA_DMA_SHARED_MEM;
+ desc[0].pyld = &cmd1;
+ desc[0].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+ desc[0].type = IPA_IMM_CMD_DESC;
+
+ if (lcl) {
+ cmd2.size = body.size;
+ cmd2.system_addr = body.phys_base;
+ cmd2.local_addr = local_addr2;
+
+ desc[1].opcode = IPA_DMA_SHARED_MEM;
+ desc[1].pyld = &cmd2;
+ desc[1].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+ desc[1].type = IPA_IMM_CMD_DESC;
+
+ if (ipa_send_cmd(2, desc)) {
+ IPAERR("fail to send immediate command\n");
+ rc = -EFAULT;
+ goto fail_send_cmd;
+ }
+ } else {
+ if (ipa_send_cmd(1, desc)) {
+ IPAERR("fail to send immediate command\n");
+ rc = -EFAULT;
+ goto fail_send_cmd;
+ }
+ }
+
+ IPADBG("HEAD\n");
+ IPA_DUMP_BUFF(head.base, head.phys_base, head.size);
+ if (body.size) {
+ IPADBG("BODY\n");
+ IPA_DUMP_BUFF(body.base, body.phys_base, body.size);
+ }
+ __ipa_reap_sys_rt_tbls(ip);
+fail_send_cmd:
+ dma_free_coherent(ipa_ctx->pdev, head.size, head.base, head.phys_base);
+ if (body.size)
+ dma_free_coherent(ipa_ctx->pdev, body.size, body.base,
+ body.phys_base);
+fail_gen:
+ return rc;
+}
+
+/**
+ * __ipa_find_rt_tbl() - find the routing table
+ * which name is given as parameter
+ * @ip: [in] the ip address family type of the wanted routing table
+ * @name: [in] the name of the wanted routing table
+ *
+ * Returns: the routing table which name is given as parameter, or NULL if it
+ * doesn't exist
+ */
+struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name)
+{
+ struct ipa_rt_tbl *entry;
+ struct ipa_rt_tbl_set *set;
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+ list_for_each_entry(entry, &set->head_rt_tbl_list, link) {
+ if (!strcmp(name, entry->name))
+ return entry;
+ }
+
+ return NULL;
+}
+
+/**
+ * ipa2_query_rt_index() - find the routing table index
+ * which name and ip type are given as parameters
+ * @in: [out] the index of the wanted routing table
+ *
+ * Returns: the routing table which name is given as parameter, or NULL if it
+ * doesn't exist
+ */
+int ipa2_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in)
+{
+ struct ipa_rt_tbl *entry;
+
+ if (in->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ /* check if this table exists */
+ entry = __ipa_find_rt_tbl(in->ip, in->name);
+ if (!entry)
+ return -EFAULT;
+
+ in->idx = entry->idx;
+ return 0;
+}
+
+static struct ipa_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
+ const char *name)
+{
+ struct ipa_rt_tbl *entry;
+ struct ipa_rt_tbl_set *set;
+ int i;
+ int id;
+
+ if (ip >= IPA_IP_MAX || name == NULL) {
+ IPAERR("bad parm\n");
+ goto error;
+ }
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+ /* check if this table exists */
+ entry = __ipa_find_rt_tbl(ip, name);
+ if (!entry) {
+ entry = kmem_cache_zalloc(ipa_ctx->rt_tbl_cache, GFP_KERNEL);
+ if (!entry) {
+ IPAERR("failed to alloc RT tbl object\n");
+ goto error;
+ }
+ /* find a routing tbl index */
+ for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
+ if (!test_bit(i, &ipa_ctx->rt_idx_bitmap[ip])) {
+ entry->idx = i;
+ set_bit(i, &ipa_ctx->rt_idx_bitmap[ip]);
+ break;
+ }
+ }
+ if (i == IPA_RT_INDEX_BITMAP_SIZE) {
+ IPAERR("not free RT tbl indices left\n");
+ goto fail_rt_idx_alloc;
+ }
+
+ INIT_LIST_HEAD(&entry->head_rt_rule_list);
+ INIT_LIST_HEAD(&entry->link);
+ strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
+ entry->set = set;
+ entry->cookie = IPA_COOKIE;
+ entry->in_sys = (ip == IPA_IP_v4) ?
+ !ipa_ctx->ip4_rt_tbl_lcl : !ipa_ctx->ip6_rt_tbl_lcl;
+ set->tbl_cnt++;
+ list_add(&entry->link, &set->head_rt_tbl_list);
+
+ IPADBG("add rt tbl idx=%d tbl_cnt=%d ip=%d\n", entry->idx,
+ set->tbl_cnt, ip);
+
+ id = ipa_id_alloc(entry);
+ if (id < 0) {
+ IPAERR("failed to add to tree\n");
+ WARN_ON(1);
+ }
+ entry->id = id;
+ }
+
+ return entry;
+
+fail_rt_idx_alloc:
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
+error:
+ return NULL;
+}
+
+static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry)
+{
+ enum ipa_ip_type ip = IPA_IP_MAX;
+ u32 id;
+
+ if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
+ IPAERR("bad parms\n");
+ return -EINVAL;
+ }
+ id = entry->id;
+ if (ipa_id_find(id) == NULL) {
+ IPAERR("lookup failed\n");
+ return -EPERM;
+ }
+
+ if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
+ ip = IPA_IP_v4;
+ else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
+ ip = IPA_IP_v6;
+ else
+ WARN_ON(1);
+
+ if (!entry->in_sys) {
+ list_del(&entry->link);
+ clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
+ entry->set->tbl_cnt--;
+ IPADBG("del rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
+ entry->set->tbl_cnt);
+ kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
+ } else {
+ list_move(&entry->link,
+ &ipa_ctx->reap_rt_tbl_set[ip].head_rt_tbl_list);
+ clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
+ entry->set->tbl_cnt--;
+ IPADBG("del sys rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
+ entry->set->tbl_cnt);
+ }
+
+ /* remove the handle from the database */
+ ipa_id_remove(id);
+ return 0;
+}
+
+static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
+ const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl)
+{
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_entry *entry;
+ struct ipa_hdr_entry *hdr = NULL;
+ struct ipa_hdr_proc_ctx_entry *proc_ctx = NULL;
+ int id;
+
+ if (rule->hdr_hdl && rule->hdr_proc_ctx_hdl) {
+ IPAERR("rule contains both hdr_hdl and hdr_proc_ctx_hdl\n");
+ goto error;
+ }
+
+ if (rule->hdr_hdl) {
+ hdr = ipa_id_find(rule->hdr_hdl);
+ if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) {
+ IPAERR("rt rule does not point to valid hdr\n");
+ goto error;
+ }
+ } else if (rule->hdr_proc_ctx_hdl) {
+ proc_ctx = ipa_id_find(rule->hdr_proc_ctx_hdl);
+ if ((proc_ctx == NULL) || (proc_ctx->cookie != IPA_COOKIE)) {
+ IPAERR("rt rule does not point to valid proc ctx\n");
+ goto error;
+ }
+ }
+
+
+ tbl = __ipa_add_rt_tbl(ip, name);
+ if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
+ IPAERR("bad params\n");
+ goto error;
+ }
+ /*
+ * do not allow any rules to be added at end of the "default" routing
+ * tables
+ */
+ if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
+ (tbl->rule_cnt > 0) && (at_rear != 0)) {
+ IPAERR("cannot add rule at end of tbl rule_cnt=%d at_rear=%d\n",
+ tbl->rule_cnt, at_rear);
+ goto error;
+ }
+
+ entry = kmem_cache_zalloc(ipa_ctx->rt_rule_cache, GFP_KERNEL);
+ if (!entry) {
+ IPAERR("failed to alloc RT rule object\n");
+ goto error;
+ }
+ INIT_LIST_HEAD(&entry->link);
+ entry->cookie = IPA_COOKIE;
+ entry->rule = *rule;
+ entry->tbl = tbl;
+ entry->hdr = hdr;
+ entry->proc_ctx = proc_ctx;
+ if (at_rear)
+ list_add_tail(&entry->link, &tbl->head_rt_rule_list);
+ else
+ list_add(&entry->link, &tbl->head_rt_rule_list);
+ tbl->rule_cnt++;
+ if (entry->hdr)
+ entry->hdr->ref_cnt++;
+ else if (entry->proc_ctx)
+ entry->proc_ctx->ref_cnt++;
+ id = ipa_id_alloc(entry);
+ if (id < 0) {
+ IPAERR("failed to add to tree\n");
+ WARN_ON(1);
+ goto ipa_insert_failed;
+ }
+ IPADBG("add rt rule tbl_idx=%d rule_cnt=%d\n", tbl->idx, tbl->rule_cnt);
+ *rule_hdl = id;
+ entry->id = id;
+
+ return 0;
+
+ipa_insert_failed:
+ if (entry->hdr)
+ entry->hdr->ref_cnt--;
+ else if (entry->proc_ctx)
+ entry->proc_ctx->ref_cnt--;
+ list_del(&entry->link);
+ kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
+error:
+ return -EPERM;
+}
+
+/**
+ * ipa2_add_rt_rule() - Add the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @rules: [inout] set of routing rules to add
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
+{
+ int i;
+ int ret;
+
+ if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < rules->num_rules; i++) {
+ if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
+ &rules->rules[i].rule,
+ rules->rules[i].at_rear,
+ &rules->rules[i].rt_rule_hdl)) {
+ IPAERR("failed to add rt rule %d\n", i);
+ rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+ } else {
+ rules->rules[i].status = 0;
+ }
+ }
+
+ if (rules->commit)
+ if (ipa_ctx->ctrl->ipa_commit_rt(rules->ip)) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return ret;
+}
+
+int __ipa_del_rt_rule(u32 rule_hdl)
+{
+ struct ipa_rt_entry *entry;
+ int id;
+
+ entry = ipa_id_find(rule_hdl);
+
+ if (entry == NULL) {
+ IPAERR("lookup failed\n");
+ return -EINVAL;
+ }
+
+ if (entry->cookie != IPA_COOKIE) {
+ IPAERR("bad params\n");
+ return -EINVAL;
+ }
+
+ if (entry->hdr)
+ __ipa_release_hdr(entry->hdr->id);
+ else if (entry->proc_ctx)
+ __ipa_release_hdr_proc_ctx(entry->proc_ctx->id);
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ IPADBG("del rt rule tbl_idx=%d rule_cnt=%d\n", entry->tbl->idx,
+ entry->tbl->rule_cnt);
+ if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
+ if (__ipa_del_rt_tbl(entry->tbl))
+ IPAERR("fail to del RT tbl\n");
+ }
+ entry->cookie = 0;
+ id = entry->id;
+ kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
+
+ /* remove the handle from the database */
+ ipa_id_remove(id);
+
+ return 0;
+}
+
+/**
+ * ipa2_del_rt_rule() - Remove the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @hdls: [inout] set of routing rules to delete
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
+{
+ int i;
+ int ret;
+
+ if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < hdls->num_hdls; i++) {
+ if (__ipa_del_rt_rule(hdls->hdl[i].hdl)) {
+ IPAERR("failed to del rt rule %i\n", i);
+ hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
+ } else {
+ hdls->hdl[i].status = 0;
+ }
+ }
+
+ if (hdls->commit)
+ if (ipa_ctx->ctrl->ipa_commit_rt(hdls->ip)) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return ret;
+}
+
+/**
+ * ipa2_commit_rt_rule() - Commit the current SW routing table of specified type
+ * to IPA HW
+ * @ip: The family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_commit_rt(enum ipa_ip_type ip)
+{
+ int ret;
+
+ if (ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ /*
+ * issue a commit on the filtering module of same IP type since
+ * filtering rules point to routing tables
+ */
+ if (ipa2_commit_flt(ip))
+ return -EPERM;
+
+ mutex_lock(&ipa_ctx->lock);
+ if (ipa_ctx->ctrl->ipa_commit_rt(ip)) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return ret;
+}
+
+/**
+ * ipa2_reset_rt() - reset the current SW routing table of specified type
+ * (does not commit to HW)
+ * @ip: The family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_reset_rt(enum ipa_ip_type ip)
+{
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_tbl *tbl_next;
+ struct ipa_rt_tbl_set *set;
+ struct ipa_rt_entry *rule;
+ struct ipa_rt_entry *rule_next;
+ struct ipa_rt_tbl_set *rset;
+ u32 apps_start_idx;
+ int id;
+
+ if (ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) {
+ if (ip == IPA_IP_v4)
+ apps_start_idx = IPA_MEM_PART(v4_apps_rt_index_lo);
+ else
+ apps_start_idx = IPA_MEM_PART(v6_apps_rt_index_lo);
+ } else {
+ apps_start_idx = 0;
+ }
+
+ /*
+ * issue a reset on the filtering module of same IP type since
+ * filtering rules point to routing tables
+ */
+ if (ipa2_reset_flt(ip))
+ IPAERR("fail to reset flt ip=%d\n", ip);
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+ rset = &ipa_ctx->reap_rt_tbl_set[ip];
+ mutex_lock(&ipa_ctx->lock);
+ IPADBG("reset rt ip=%d\n", ip);
+ list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) {
+ list_for_each_entry_safe(rule, rule_next,
+ &tbl->head_rt_rule_list, link) {
+ if (ipa_id_find(rule->id) == NULL) {
+ WARN_ON(1);
+ mutex_unlock(&ipa_ctx->lock);
+ return -EFAULT;
+ }
+
+ /*
+ * for the "default" routing tbl, remove all but the
+ * last rule
+ */
+ if (tbl->idx == apps_start_idx && tbl->rule_cnt == 1)
+ continue;
+
+ list_del(&rule->link);
+ tbl->rule_cnt--;
+ if (rule->hdr)
+ __ipa_release_hdr(rule->hdr->id);
+ else if (rule->proc_ctx)
+ __ipa_release_hdr_proc_ctx(rule->proc_ctx->id);
+ rule->cookie = 0;
+ id = rule->id;
+ kmem_cache_free(ipa_ctx->rt_rule_cache, rule);
+
+ /* remove the handle from the database */
+ ipa_id_remove(id);
+ }
+
+ if (ipa_id_find(tbl->id) == NULL) {
+ WARN_ON(1);
+ mutex_unlock(&ipa_ctx->lock);
+ return -EFAULT;
+ }
+ id = tbl->id;
+
+ /* do not remove the "default" routing tbl which has index 0 */
+ if (tbl->idx != apps_start_idx) {
+ if (!tbl->in_sys) {
+ list_del(&tbl->link);
+ set->tbl_cnt--;
+ clear_bit(tbl->idx,
+ &ipa_ctx->rt_idx_bitmap[ip]);
+ IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n",
+ tbl->idx, set->tbl_cnt);
+ kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
+ } else {
+ list_move(&tbl->link, &rset->head_rt_tbl_list);
+ clear_bit(tbl->idx,
+ &ipa_ctx->rt_idx_bitmap[ip]);
+ set->tbl_cnt--;
+ IPADBG("rst sys rt tbl_idx=%d tbl_cnt=%d\n",
+ tbl->idx, set->tbl_cnt);
+ }
+ /* remove the handle from the database */
+ ipa_id_remove(id);
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return 0;
+}
+
+/**
+ * ipa2_get_rt_tbl() - lookup the specified routing table and return handle if
+ * it exists, if lookup succeeds the routing table ref cnt is increased
+ * @lookup: [inout] routing table to lookup and its handle
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ * Caller should call ipa_put_rt_tbl later if this function succeeds
+ */
+int ipa2_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
+{
+ struct ipa_rt_tbl *entry;
+ int result = -EFAULT;
+
+ if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+ mutex_lock(&ipa_ctx->lock);
+ entry = __ipa_find_rt_tbl(lookup->ip, lookup->name);
+ if (entry && entry->cookie == IPA_COOKIE) {
+ entry->ref_cnt++;
+ lookup->hdl = entry->id;
+
+ /* commit for get */
+ if (ipa_ctx->ctrl->ipa_commit_rt(lookup->ip))
+ IPAERR("fail to commit RT tbl\n");
+
+ result = 0;
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+
+/**
+ * ipa2_put_rt_tbl() - Release the specified routing table handle
+ * @rt_tbl_hdl: [in] the routing table handle to release
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_put_rt_tbl(u32 rt_tbl_hdl)
+{
+ struct ipa_rt_tbl *entry;
+ enum ipa_ip_type ip = IPA_IP_MAX;
+ int result;
+
+ mutex_lock(&ipa_ctx->lock);
+ entry = ipa_id_find(rt_tbl_hdl);
+ if (entry == NULL) {
+ IPAERR("lookup failed\n");
+ result = -EINVAL;
+ goto ret;
+ }
+
+ if ((entry->cookie != IPA_COOKIE) || entry->ref_cnt == 0) {
+ IPAERR("bad parms\n");
+ result = -EINVAL;
+ goto ret;
+ }
+
+ if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
+ ip = IPA_IP_v4;
+ else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
+ ip = IPA_IP_v6;
+ else
+ WARN_ON(1);
+
+ entry->ref_cnt--;
+ if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
+ if (__ipa_del_rt_tbl(entry))
+ IPAERR("fail to del RT tbl\n");
+ /* commit for put */
+ if (ipa_ctx->ctrl->ipa_commit_rt(ip))
+ IPAERR("fail to commit RT tbl\n");
+ }
+
+ result = 0;
+
+ret:
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+
+
+static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule)
+{
+ struct ipa_rt_entry *entry;
+ struct ipa_hdr_entry *hdr = NULL;
+
+ if (rtrule->rule.hdr_hdl) {
+ hdr = ipa_id_find(rtrule->rule.hdr_hdl);
+ if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) {
+ IPAERR("rt rule does not point to valid hdr\n");
+ goto error;
+ }
+ }
+
+ entry = ipa_id_find(rtrule->rt_rule_hdl);
+ if (entry == NULL) {
+ IPAERR("lookup failed\n");
+ goto error;
+ }
+
+ if (entry->cookie != IPA_COOKIE) {
+ IPAERR("bad params\n");
+ goto error;
+ }
+
+ if (entry->hdr)
+ entry->hdr->ref_cnt--;
+
+ entry->rule = rtrule->rule;
+ entry->hdr = hdr;
+
+ if (entry->hdr)
+ entry->hdr->ref_cnt++;
+
+ return 0;
+
+error:
+ return -EPERM;
+}
+
+/**
+ * ipa2_mdfy_rt_rule() - Modify the specified routing rules in SW and optionally
+ * commit to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls)
+{
+ int i;
+ int result;
+
+ if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < hdls->num_rules; i++) {
+ if (__ipa_mdfy_rt_rule(&hdls->rules[i])) {
+ IPAERR("failed to mdfy rt rule %i\n", i);
+ hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
+ } else {
+ hdls->rules[i].status = 0;
+ }
+ }
+
+ if (hdls->commit)
+ if (ipa_ctx->ctrl->ipa_commit_rt(hdls->ip)) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h b/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h
new file mode 100644
index 0000000..a03a49a
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h
@@ -0,0 +1,152 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ipa
+#define TRACE_INCLUDE_FILE ipa_trace
+
+#if !defined(_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _IPA_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(
+ intr_to_poll,
+
+ TP_PROTO(unsigned long client),
+
+ TP_ARGS(client),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, client)
+ ),
+
+ TP_fast_assign(
+ __entry->client = client;
+ ),
+
+ TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+ poll_to_intr,
+
+ TP_PROTO(unsigned long client),
+
+ TP_ARGS(client),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, client)
+ ),
+
+ TP_fast_assign(
+ __entry->client = client;
+ ),
+
+ TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+ idle_sleep_enter,
+
+ TP_PROTO(unsigned long client),
+
+ TP_ARGS(client),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, client)
+ ),
+
+ TP_fast_assign(
+ __entry->client = client;
+ ),
+
+ TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+ idle_sleep_exit,
+
+ TP_PROTO(unsigned long client),
+
+ TP_ARGS(client),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, client)
+ ),
+
+ TP_fast_assign(
+ __entry->client = client;
+ ),
+
+ TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+ rmnet_ipa_netifni,
+
+ TP_PROTO(unsigned long rx_pkt_cnt),
+
+ TP_ARGS(rx_pkt_cnt),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, rx_pkt_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->rx_pkt_cnt = rx_pkt_cnt;
+ ),
+
+ TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
+TRACE_EVENT(
+ rmnet_ipa_netifrx,
+
+ TP_PROTO(unsigned long rx_pkt_cnt),
+
+ TP_ARGS(rx_pkt_cnt),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, rx_pkt_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->rx_pkt_cnt = rx_pkt_cnt;
+ ),
+
+ TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
+TRACE_EVENT(
+ rmnet_ipa_netif_rcv_skb,
+
+ TP_PROTO(unsigned long rx_pkt_cnt),
+
+ TP_ARGS(rx_pkt_cnt),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, rx_pkt_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->rx_pkt_cnt = rx_pkt_cnt;
+ ),
+
+ TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+#endif /* _IPA_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
new file mode 100644
index 0000000..01eea36
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
@@ -0,0 +1,923 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "ipa_i.h"
+#include <linux/delay.h>
+
+#define IPA_RAM_UC_SMEM_SIZE 128
+#define IPA_HW_INTERFACE_VERSION 0x0111
+#define IPA_PKT_FLUSH_TO_US 100
+#define IPA_UC_POLL_SLEEP_USEC 100
+#define IPA_UC_POLL_MAX_RETRY 10000
+#define HOLB_WORKQUEUE_NAME "ipa_holb_wq"
+
+static struct workqueue_struct *ipa_holb_wq;
+static void ipa_start_monitor_holb(struct work_struct *work);
+static DECLARE_WORK(ipa_holb_work, ipa_start_monitor_holb);
+
+/**
+ * enum ipa_cpu_2_hw_commands - Values that represent the commands from the CPU
+ * IPA_CPU_2_HW_CMD_NO_OP : No operation is required.
+ * IPA_CPU_2_HW_CMD_UPDATE_FLAGS : Update SW flags which defines the behavior
+ * of HW.
+ * IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST : Launch predefined test over HW.
+ * IPA_CPU_2_HW_CMD_DEBUG_GET_INFO : Read HW internal debug information.
+ * IPA_CPU_2_HW_CMD_ERR_FATAL : CPU instructs HW to perform error fatal
+ * handling.
+ * IPA_CPU_2_HW_CMD_CLK_GATE : CPU instructs HW to goto Clock Gated state.
+ * IPA_CPU_2_HW_CMD_CLK_UNGATE : CPU instructs HW to goto Clock Ungated state.
+ * IPA_CPU_2_HW_CMD_MEMCPY : CPU instructs HW to do memcopy using QMB.
+ * IPA_CPU_2_HW_CMD_RESET_PIPE : Command to reset a pipe - SW WA for a HW bug.
+ */
+enum ipa_cpu_2_hw_commands {
+ IPA_CPU_2_HW_CMD_NO_OP =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+ IPA_CPU_2_HW_CMD_UPDATE_FLAGS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+ IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+ IPA_CPU_2_HW_CMD_DEBUG_GET_INFO =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
+ IPA_CPU_2_HW_CMD_ERR_FATAL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
+ IPA_CPU_2_HW_CMD_CLK_GATE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
+ IPA_CPU_2_HW_CMD_CLK_UNGATE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
+ IPA_CPU_2_HW_CMD_MEMCPY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7),
+ IPA_CPU_2_HW_CMD_RESET_PIPE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8),
+ IPA_CPU_2_HW_CMD_UPDATE_HOLB_MONITORING =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 9),
+};
+
+/**
+ * enum ipa_hw_2_cpu_responses - Values that represent common HW responses
+ * to CPU commands.
+ * @IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED : HW shall send this command once
+ * boot sequence is completed and HW is ready to serve commands from CPU
+ * @IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED: Response to CPU commands
+ */
+enum ipa_hw_2_cpu_responses {
+ IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+ IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+};
+
+/**
+ * enum ipa_hw_2_cpu_events - Values that represent HW event to be sent to CPU.
+ * @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the
+ * device
+ * @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information
+ */
+enum ipa_hw_2_cpu_events {
+ IPA_HW_2_CPU_EVENT_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+ IPA_HW_2_CPU_EVENT_LOG_INFO =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+};
+
+/**
+ * enum ipa_hw_errors - Common error types.
+ * @IPA_HW_ERROR_NONE : No error persists
+ * @IPA_HW_INVALID_DOORBELL_ERROR : Invalid data read from doorbell
+ * @IPA_HW_DMA_ERROR : Unexpected DMA error
+ * @IPA_HW_FATAL_SYSTEM_ERROR : HW has crashed and requires reset.
+ * @IPA_HW_INVALID_OPCODE : Invalid opcode sent
+ * @IPA_HW_ZIP_ENGINE_ERROR : ZIP engine error
+ */
+enum ipa_hw_errors {
+ IPA_HW_ERROR_NONE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+ IPA_HW_INVALID_DOORBELL_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+ IPA_HW_DMA_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+ IPA_HW_FATAL_SYSTEM_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
+ IPA_HW_INVALID_OPCODE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
+ IPA_HW_ZIP_ENGINE_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5)
+};
+
+/**
+ * struct IpaHwResetPipeCmdData_t - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_MEMCPY command.
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+struct IpaHwMemCopyData_t {
+ u32 destination_addr;
+ u32 source_addr;
+ u32 dest_buffer_size;
+ u32 source_buffer_size;
+};
+
+/**
+ * union IpaHwResetPipeCmdData_t - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_RESET_PIPE command.
+ * @pipeNum : Pipe number to be reset
+ * @direction : 1 - IPA Producer, 0 - IPA Consumer
+ * @reserved_02_03 : Reserved
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+union IpaHwResetPipeCmdData_t {
+ struct IpaHwResetPipeCmdParams_t {
+ u8 pipeNum;
+ u8 direction;
+ u32 reserved_02_03;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwmonitorHolbCmdData_t - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_UPDATE_HOLB_MONITORING command.
+ * @monitorPipe : Indication whether to monitor the pipe. 0 – Do not Monitor
+ * Pipe, 1 – Monitor Pipe
+ * @pipeNum : Pipe to be monitored/not monitored
+ * @reserved_02_03 : Reserved
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+union IpaHwmonitorHolbCmdData_t {
+ struct IpaHwmonitorHolbCmdParams_t {
+ u8 monitorPipe;
+ u8 pipeNum;
+ u32 reserved_02_03:16;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+
+/**
+ * union IpaHwCpuCmdCompletedResponseData_t - Structure holding the parameters
+ * for IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED response.
+ * @originalCmdOp : The original command opcode
+ * @status : 0 for success indication, otherwise failure
+ * @reserved : Reserved
+ *
+ * Parameters are sent as 32b immediate parameters.
+ */
+union IpaHwCpuCmdCompletedResponseData_t {
+ struct IpaHwCpuCmdCompletedResponseParams_t {
+ u32 originalCmdOp:8;
+ u32 status:8;
+ u32 reserved:16;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwErrorEventData_t - HW->CPU Common Events
+ * @errorType : Entered when a system error is detected by the HW. Type of
+ * error is specified by IPA_HW_ERRORS
+ * @reserved : Reserved
+ */
+union IpaHwErrorEventData_t {
+ struct IpaHwErrorEventParams_t {
+ u32 errorType:8;
+ u32 reserved:24;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwUpdateFlagsCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_UPDATE_FLAGS command
+ * @newFlags: SW flags defined the behavior of HW.
+ * This field is expected to be used as bitmask for enum ipa_hw_flags
+ */
+union IpaHwUpdateFlagsCmdData_t {
+ struct IpaHwUpdateFlagsCmdParams_t {
+ u32 newFlags;
+ } params;
+ u32 raw32b;
+};
+
+struct ipa_uc_hdlrs uc_hdlrs[IPA_HW_NUM_FEATURES] = { { 0 } };
+
+static inline const char *ipa_hw_error_str(enum ipa_hw_errors err_type)
+{
+ const char *str;
+
+ switch (err_type) {
+ case IPA_HW_ERROR_NONE:
+ str = "IPA_HW_ERROR_NONE";
+ break;
+ case IPA_HW_INVALID_DOORBELL_ERROR:
+ str = "IPA_HW_INVALID_DOORBELL_ERROR";
+ break;
+ case IPA_HW_FATAL_SYSTEM_ERROR:
+ str = "IPA_HW_FATAL_SYSTEM_ERROR";
+ break;
+ case IPA_HW_INVALID_OPCODE:
+ str = "IPA_HW_INVALID_OPCODE";
+ break;
+ case IPA_HW_ZIP_ENGINE_ERROR:
+ str = "IPA_HW_ZIP_ENGINE_ERROR";
+ break;
+ default:
+ str = "INVALID ipa_hw_errors type";
+ }
+
+ return str;
+}
+
+static void ipa_log_evt_hdlr(void)
+{
+ int i;
+
+ if (!ipa_ctx->uc_ctx.uc_event_top_ofst) {
+ ipa_ctx->uc_ctx.uc_event_top_ofst =
+ ipa_ctx->uc_ctx.uc_sram_mmio->eventParams;
+ if (ipa_ctx->uc_ctx.uc_event_top_ofst +
+ sizeof(struct IpaHwEventLogInfoData_t) >=
+ ipa_ctx->ctrl->ipa_reg_base_ofst +
+ IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) +
+ ipa_ctx->smem_sz) {
+ IPAERR("uc_top 0x%x outside SRAM\n",
+ ipa_ctx->uc_ctx.uc_event_top_ofst);
+ goto bad_uc_top_ofst;
+ }
+
+ ipa_ctx->uc_ctx.uc_event_top_mmio = ioremap(
+ ipa_ctx->ipa_wrapper_base +
+ ipa_ctx->uc_ctx.uc_event_top_ofst,
+ sizeof(struct IpaHwEventLogInfoData_t));
+ if (!ipa_ctx->uc_ctx.uc_event_top_mmio) {
+ IPAERR("fail to ioremap uc top\n");
+ goto bad_uc_top_ofst;
+ }
+
+ for (i = 0; i < IPA_HW_NUM_FEATURES; i++) {
+ if (uc_hdlrs[i].ipa_uc_event_log_info_hdlr)
+ uc_hdlrs[i].ipa_uc_event_log_info_hdlr
+ (ipa_ctx->uc_ctx.uc_event_top_mmio);
+ }
+ } else {
+
+ if (ipa_ctx->uc_ctx.uc_sram_mmio->eventParams !=
+ ipa_ctx->uc_ctx.uc_event_top_ofst) {
+ IPAERR("uc top ofst changed new=%u cur=%u\n",
+ ipa_ctx->uc_ctx.uc_sram_mmio->
+ eventParams,
+ ipa_ctx->uc_ctx.uc_event_top_ofst);
+ }
+ }
+
+ return;
+
+bad_uc_top_ofst:
+ ipa_ctx->uc_ctx.uc_event_top_ofst = 0;
+}
+
+/**
+ * ipa2_uc_state_check() - Check the status of the uC interface
+ *
+ * Return value: 0 if the uC is loaded, interface is initialized
+ * and there was no recent failure in one of the commands.
+ * A negative value is returned otherwise.
+ */
+int ipa2_uc_state_check(void)
+{
+ if (!ipa_ctx->uc_ctx.uc_inited) {
+ IPAERR("uC interface not initialized\n");
+ return -EFAULT;
+ }
+
+ if (!ipa_ctx->uc_ctx.uc_loaded) {
+ IPAERR("uC is not loaded\n");
+ return -EFAULT;
+ }
+
+ if (ipa_ctx->uc_ctx.uc_failed) {
+ IPAERR("uC has failed its last command\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa2_uc_state_check);
+
+/**
+ * ipa_uc_loaded_check() - Check the uC has been loaded
+ *
+ * Return value: 1 if the uC is loaded, 0 otherwise
+ */
+int ipa_uc_loaded_check(void)
+{
+ return ipa_ctx->uc_ctx.uc_loaded;
+}
+EXPORT_SYMBOL(ipa_uc_loaded_check);
+
+static void ipa_uc_event_handler(enum ipa_irq_type interrupt,
+ void *private_data,
+ void *interrupt_data)
+{
+ union IpaHwErrorEventData_t evt;
+ u8 feature;
+
+ WARN_ON(private_data != ipa_ctx);
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ IPADBG("uC evt opcode=%u\n",
+ ipa_ctx->uc_ctx.uc_sram_mmio->eventOp);
+
+
+ feature = EXTRACT_UC_FEATURE(ipa_ctx->uc_ctx.uc_sram_mmio->eventOp);
+
+ if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
+ IPAERR("Invalid feature %u for event %u\n",
+ feature, ipa_ctx->uc_ctx.uc_sram_mmio->eventOp);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return;
+ }
+ /* Feature specific handling */
+ if (uc_hdlrs[feature].ipa_uc_event_hdlr)
+ uc_hdlrs[feature].ipa_uc_event_hdlr
+ (ipa_ctx->uc_ctx.uc_sram_mmio);
+
+ /* General handling */
+ if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+ IPA_HW_2_CPU_EVENT_ERROR) {
+ evt.raw32b = ipa_ctx->uc_ctx.uc_sram_mmio->eventParams;
+ IPAERR("uC Error, evt errorType = %s\n",
+ ipa_hw_error_str(evt.params.errorType));
+ ipa_ctx->uc_ctx.uc_failed = true;
+ ipa_ctx->uc_ctx.uc_error_type = evt.params.errorType;
+ if (evt.params.errorType == IPA_HW_ZIP_ENGINE_ERROR) {
+ IPAERR("IPA has encountered a ZIP engine error\n");
+ ipa_ctx->uc_ctx.uc_zip_error = true;
+ }
+ BUG();
+ } else if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+ IPA_HW_2_CPU_EVENT_LOG_INFO) {
+ IPADBG("uC evt log info ofst=0x%x\n",
+ ipa_ctx->uc_ctx.uc_sram_mmio->eventParams);
+ ipa_log_evt_hdlr();
+ } else {
+ IPADBG("unsupported uC evt opcode=%u\n",
+ ipa_ctx->uc_ctx.uc_sram_mmio->eventOp);
+ }
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+}
+
+static int ipa_uc_panic_notifier(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ int result = 0;
+ struct ipa_active_client_logging_info log_info;
+
+ IPADBG("this=%p evt=%lu ptr=%p\n", this, event, ptr);
+
+ result = ipa2_uc_state_check();
+ if (result)
+ goto fail;
+ IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
+ if (ipa2_inc_client_enable_clks_no_block(&log_info))
+ goto fail;
+
+ ipa_ctx->uc_ctx.uc_sram_mmio->cmdOp =
+ IPA_CPU_2_HW_CMD_ERR_FATAL;
+ /* ensure write to shared memory is done before triggering uc */
+ wmb();
+ ipa_write_reg(ipa_ctx->mmio, IPA_IRQ_EE_UC_n_OFFS(0), 0x1);
+ /* give uc enough time to save state */
+ udelay(IPA_PKT_FLUSH_TO_US);
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPADBG("err_fatal issued\n");
+
+fail:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ipa_uc_panic_blk = {
+ .notifier_call = ipa_uc_panic_notifier,
+};
+
+void ipa_register_panic_hdlr(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &ipa_uc_panic_blk);
+}
+
+static void ipa_uc_response_hdlr(enum ipa_irq_type interrupt,
+ void *private_data,
+ void *interrupt_data)
+{
+ union IpaHwCpuCmdCompletedResponseData_t uc_rsp;
+ u8 feature;
+ int res;
+ int i;
+
+ WARN_ON(private_data != ipa_ctx);
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPADBG("uC rsp opcode=%u\n",
+ ipa_ctx->uc_ctx.uc_sram_mmio->responseOp);
+
+ feature = EXTRACT_UC_FEATURE(ipa_ctx->uc_ctx.uc_sram_mmio->responseOp);
+
+ if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
+ IPAERR("Invalid feature %u for event %u\n",
+ feature, ipa_ctx->uc_ctx.uc_sram_mmio->eventOp);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return;
+ }
+
+ /* Feature specific handling */
+ if (uc_hdlrs[feature].ipa_uc_response_hdlr) {
+ res = uc_hdlrs[feature].ipa_uc_response_hdlr(
+ ipa_ctx->uc_ctx.uc_sram_mmio,
+ &ipa_ctx->uc_ctx.uc_status);
+ if (res == 0) {
+ IPADBG("feature %d specific response handler\n",
+ feature);
+ complete_all(&ipa_ctx->uc_ctx.uc_completion);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return;
+ }
+ }
+
+ /* General handling */
+ if (ipa_ctx->uc_ctx.uc_sram_mmio->responseOp ==
+ IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED) {
+ ipa_ctx->uc_ctx.uc_loaded = true;
+ IPAERR("IPA uC loaded\n");
+ /*
+ * The proxy vote is held until uC is loaded to ensure that
+ * IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED is received.
+ */
+ ipa2_proxy_clk_unvote();
+ for (i = 0; i < IPA_HW_NUM_FEATURES; i++) {
+ if (uc_hdlrs[i].ipa_uc_loaded_hdlr)
+ uc_hdlrs[i].ipa_uc_loaded_hdlr();
+ }
+ /* Queue the work to enable holb monitoring on IPA-USB Producer
+ * pipe if valid.
+ */
+ if (ipa_ctx->ipa_hw_type == IPA_HW_v2_6L)
+ queue_work(ipa_holb_wq, &ipa_holb_work);
+ } else if (ipa_ctx->uc_ctx.uc_sram_mmio->responseOp ==
+ IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) {
+ uc_rsp.raw32b = ipa_ctx->uc_ctx.uc_sram_mmio->responseParams;
+ IPADBG("uC cmd response opcode=%u status=%u\n",
+ uc_rsp.params.originalCmdOp,
+ uc_rsp.params.status);
+ if (uc_rsp.params.originalCmdOp ==
+ ipa_ctx->uc_ctx.pending_cmd) {
+ ipa_ctx->uc_ctx.uc_status = uc_rsp.params.status;
+ complete_all(&ipa_ctx->uc_ctx.uc_completion);
+ } else {
+ IPAERR("Expected cmd=%u rcvd cmd=%u\n",
+ ipa_ctx->uc_ctx.pending_cmd,
+ uc_rsp.params.originalCmdOp);
+ }
+ } else {
+ IPAERR("Unsupported uC rsp opcode = %u\n",
+ ipa_ctx->uc_ctx.uc_sram_mmio->responseOp);
+ }
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+/**
+ * ipa_uc_interface_init() - Initialize the interface with the uC
+ *
+ * Return value: 0 on success, negative value otherwise
+ */
+int ipa_uc_interface_init(void)
+{
+ int result;
+ unsigned long phys_addr;
+
+ if (ipa_ctx->uc_ctx.uc_inited) {
+ IPADBG("uC interface already initialized\n");
+ return 0;
+ }
+
+ ipa_holb_wq = create_singlethread_workqueue(
+ HOLB_WORKQUEUE_NAME);
+ if (!ipa_holb_wq) {
+ IPAERR("HOLB workqueue creation failed\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&ipa_ctx->uc_ctx.uc_lock);
+
+ if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) {
+ phys_addr = ipa_ctx->ipa_wrapper_base +
+ ipa_ctx->ctrl->ipa_reg_base_ofst +
+ IPA_SRAM_SW_FIRST_v2_5;
+ } else {
+ phys_addr = ipa_ctx->ipa_wrapper_base +
+ ipa_ctx->ctrl->ipa_reg_base_ofst +
+ IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(
+ ipa_ctx->smem_restricted_bytes / 4);
+ }
+
+ ipa_ctx->uc_ctx.uc_sram_mmio = ioremap(phys_addr,
+ IPA_RAM_UC_SMEM_SIZE);
+ if (!ipa_ctx->uc_ctx.uc_sram_mmio) {
+ IPAERR("Fail to ioremap IPA uC SRAM\n");
+ result = -ENOMEM;
+ goto remap_fail;
+ }
+
+ result = ipa2_add_interrupt_handler(IPA_UC_IRQ_0,
+ ipa_uc_event_handler, true,
+ ipa_ctx);
+ if (result) {
+ IPAERR("Fail to register for UC_IRQ0 rsp interrupt\n");
+ result = -EFAULT;
+ goto irq_fail0;
+ }
+
+ result = ipa2_add_interrupt_handler(IPA_UC_IRQ_1,
+ ipa_uc_response_hdlr, true,
+ ipa_ctx);
+ if (result) {
+ IPAERR("fail to register for UC_IRQ1 rsp interrupt\n");
+ result = -EFAULT;
+ goto irq_fail1;
+ }
+
+ ipa_ctx->uc_ctx.uc_inited = true;
+
+ IPADBG("IPA uC interface is initialized\n");
+ return 0;
+
+irq_fail1:
+ ipa2_remove_interrupt_handler(IPA_UC_IRQ_0);
+irq_fail0:
+ iounmap(ipa_ctx->uc_ctx.uc_sram_mmio);
+remap_fail:
+ return result;
+}
+EXPORT_SYMBOL(ipa_uc_interface_init);
+
+/**
+ * ipa_uc_send_cmd() - Send a command to the uC
+ *
+ * Note: In case the operation times out (No response from the uC) or
+ * polling maximal amount of retries has reached, the logic
+ * considers it as an invalid state of the uC/IPA, and
+ * issues a kernel panic.
+ *
+ * Returns: 0 on success.
+ * -EINVAL in case of invalid input.
+ * -EBADF in case uC interface is not initialized /
+ * or the uC has failed previously.
+ * -EFAULT in case the received status doesn't match
+ * the expected.
+ */
+int ipa_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
+ bool polling_mode, unsigned long timeout_jiffies)
+{
+ int index;
+ union IpaHwCpuCmdCompletedResponseData_t uc_rsp;
+
+ mutex_lock(&ipa_ctx->uc_ctx.uc_lock);
+
+ if (ipa2_uc_state_check()) {
+ IPADBG("uC send command aborted\n");
+ mutex_unlock(&ipa_ctx->uc_ctx.uc_lock);
+ return -EBADF;
+ }
+
+ init_completion(&ipa_ctx->uc_ctx.uc_completion);
+
+ ipa_ctx->uc_ctx.uc_sram_mmio->cmdParams = cmd;
+ ipa_ctx->uc_ctx.uc_sram_mmio->cmdOp = opcode;
+ ipa_ctx->uc_ctx.pending_cmd = opcode;
+
+ ipa_ctx->uc_ctx.uc_sram_mmio->responseOp = 0;
+ ipa_ctx->uc_ctx.uc_sram_mmio->responseParams = 0;
+
+ ipa_ctx->uc_ctx.uc_status = 0;
+
+ /* ensure write to shared memory is done before triggering uc */
+ wmb();
+
+ ipa_write_reg(ipa_ctx->mmio, IPA_IRQ_EE_UC_n_OFFS(0), 0x1);
+
+ if (polling_mode) {
+ for (index = 0; index < IPA_UC_POLL_MAX_RETRY; index++) {
+ if (ipa_ctx->uc_ctx.uc_sram_mmio->responseOp ==
+ IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) {
+ uc_rsp.raw32b = ipa_ctx->uc_ctx.uc_sram_mmio->
+ responseParams;
+ if (uc_rsp.params.originalCmdOp ==
+ ipa_ctx->uc_ctx.pending_cmd) {
+ ipa_ctx->uc_ctx.pending_cmd = -1;
+ break;
+ }
+ }
+ usleep_range(IPA_UC_POLL_SLEEP_USEC,
+ IPA_UC_POLL_SLEEP_USEC);
+ }
+
+ if (index == IPA_UC_POLL_MAX_RETRY) {
+ IPAERR("uC max polling retries reached\n");
+ if (ipa_ctx->uc_ctx.uc_failed) {
+ IPAERR("uC reported on Error, errorType = %s\n",
+ ipa_hw_error_str(ipa_ctx->
+ uc_ctx.uc_error_type));
+ }
+ mutex_unlock(&ipa_ctx->uc_ctx.uc_lock);
+ BUG();
+ return -EFAULT;
+ }
+ } else {
+ if (wait_for_completion_timeout(&ipa_ctx->uc_ctx.uc_completion,
+ timeout_jiffies) == 0) {
+ IPAERR("uC timed out\n");
+ if (ipa_ctx->uc_ctx.uc_failed) {
+ IPAERR("uC reported on Error, errorType = %s\n",
+ ipa_hw_error_str(ipa_ctx->
+ uc_ctx.uc_error_type));
+ }
+ mutex_unlock(&ipa_ctx->uc_ctx.uc_lock);
+ BUG();
+ return -EFAULT;
+ }
+ }
+
+ if (ipa_ctx->uc_ctx.uc_status != expected_status) {
+ IPAERR("Recevied status %u, Expected status %u\n",
+ ipa_ctx->uc_ctx.uc_status, expected_status);
+ ipa_ctx->uc_ctx.pending_cmd = -1;
+ mutex_unlock(&ipa_ctx->uc_ctx.uc_lock);
+ return -EFAULT;
+ }
+
+ ipa_ctx->uc_ctx.pending_cmd = -1;
+ mutex_unlock(&ipa_ctx->uc_ctx.uc_lock);
+
+ IPADBG("uC cmd %u send succeeded\n", opcode);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_uc_send_cmd);
+
+/**
+ * ipa_uc_register_handlers() - Registers event, response and log event
+ * handlers for a specific feature.Please note
+ * that currently only one handler can be
+ * registered per feature.
+ *
+ * Return value: None
+ */
+void ipa_uc_register_handlers(enum ipa_hw_features feature,
+ struct ipa_uc_hdlrs *hdlrs)
+{
+
+ if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
+ IPAERR("Feature %u is invalid, not registering hdlrs\n",
+ feature);
+ return;
+ }
+
+ mutex_lock(&ipa_ctx->uc_ctx.uc_lock);
+ uc_hdlrs[feature] = *hdlrs;
+ mutex_unlock(&ipa_ctx->uc_ctx.uc_lock);
+
+ IPADBG("uC handlers registered for feature %u\n", feature);
+}
+EXPORT_SYMBOL(ipa_uc_register_handlers);
+
+/**
+ * ipa_uc_reset_pipe() - reset a BAM pipe using the uC interface
+ * @ipa_client: [in] ipa client handle representing the pipe
+ *
+ * The function uses the uC interface in order to issue a BAM
+ * PIPE reset request. The uC makes sure there's no traffic in
+ * the TX command queue before issuing the reset.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_uc_reset_pipe(enum ipa_client_type ipa_client)
+{
+ union IpaHwResetPipeCmdData_t cmd;
+ int ep_idx;
+ int ret;
+
+ ep_idx = ipa2_get_ep_mapping(ipa_client);
+ if (ep_idx == -1) {
+ IPAERR("Invalid IPA client\n");
+ return 0;
+ }
+
+ /*
+ * If the uC interface has not been initialized yet,
+ * continue with the sequence without resetting the
+ * pipe.
+ */
+ if (ipa2_uc_state_check()) {
+ IPADBG("uC interface will not be used to reset %s pipe %d\n",
+ IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD",
+ ep_idx);
+ return 0;
+ }
+
+ /*
+ * IPA consumer = 0, IPA producer = 1.
+ * IPA driver concept of PROD/CONS is the opposite of the
+ * IPA HW concept. Therefore, IPA AP CLIENT PRODUCER = IPA CONSUMER,
+ * and vice-versa.
+ */
+ cmd.params.direction = (u8)(IPA_CLIENT_IS_PROD(ipa_client) ? 0 : 1);
+ cmd.params.pipeNum = (u8)ep_idx;
+
+ IPADBG("uC pipe reset on IPA %s pipe %d\n",
+ IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD", ep_idx);
+
+ ret = ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_RESET_PIPE, 0,
+ false, 10*HZ);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_reset_pipe);
+
+/**
+ * ipa_uc_monitor_holb() - Enable/Disable holb monitoring of a producer pipe.
+ * @ipa_client: [in] ipa client handle representing the pipe
+ *
+ * The function uses the uC interface in order to disable/enable holb
+ * monitoring.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_uc_monitor_holb(enum ipa_client_type ipa_client, bool enable)
+{
+ union IpaHwmonitorHolbCmdData_t cmd;
+ int ep_idx;
+ int ret;
+
+ /* HOLB monitoring is applicable only to 2.6L. */
+ if (ipa_ctx->ipa_hw_type != IPA_HW_v2_6L) {
+ IPADBG("Not applicable on this target\n");
+ return 0;
+ }
+
+ ep_idx = ipa2_get_ep_mapping(ipa_client);
+ if (ep_idx == -1) {
+ IPAERR("Invalid IPA client\n");
+ return 0;
+ }
+
+ /*
+ * If the uC interface has not been initialized yet,
+ * continue with the sequence without resetting the
+ * pipe.
+ */
+ if (ipa2_uc_state_check()) {
+ IPADBG("uC interface will not be used to reset %s pipe %d\n",
+ IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD",
+ ep_idx);
+ return 0;
+ }
+
+ /*
+ * IPA consumer = 0, IPA producer = 1.
+ * IPA driver concept of PROD/CONS is the opposite of the
+ * IPA HW concept. Therefore, IPA AP CLIENT PRODUCER = IPA CONSUMER,
+ * and vice-versa.
+ */
+ cmd.params.monitorPipe = (u8)(enable ? 1 : 0);
+ cmd.params.pipeNum = (u8)ep_idx;
+
+ IPADBG("uC holb monitoring on IPA pipe %d, Enable: %d\n",
+ ep_idx, enable);
+
+ ret = ipa_uc_send_cmd(cmd.raw32b,
+ IPA_CPU_2_HW_CMD_UPDATE_HOLB_MONITORING, 0,
+ false, 10*HZ);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_monitor_holb);
+
+/**
+ * ipa_start_monitor_holb() - Send HOLB command to monitor IPA-USB
+ * producer pipe.
+ *
+ * This function is called after uc is loaded to start monitoring
+ * IPA pipe towrds USB in case if USB is already connected.
+ *
+ * Return codes:
+ * None
+ */
+static void ipa_start_monitor_holb(struct work_struct *work)
+{
+ IPADBG("starting holb monitoring on IPA_CLIENT_USB_CONS\n");
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ ipa_uc_monitor_holb(IPA_CLIENT_USB_CONS, true);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+
+/**
+ * ipa_uc_notify_clk_state() - notify to uC of clock enable / disable
+ * @enabled: true if clock are enabled
+ *
+ * The function uses the uC interface in order to notify uC before IPA clocks
+ * are disabled to make sure uC is not in the middle of operation.
+ * Also after clocks are enabled ned to notify uC to start processing.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_uc_notify_clk_state(bool enabled)
+{
+ u32 opcode;
+
+ /*
+ * If the uC interface has not been initialized yet,
+ * don't notify the uC on the enable/disable
+ */
+ if (ipa2_uc_state_check()) {
+ IPADBG("uC interface will not notify the UC on clock state\n");
+ return 0;
+ }
+
+ IPADBG("uC clock %s notification\n", (enabled) ? "UNGATE" : "GATE");
+
+ opcode = (enabled) ? IPA_CPU_2_HW_CMD_CLK_UNGATE :
+ IPA_CPU_2_HW_CMD_CLK_GATE;
+
+ return ipa_uc_send_cmd(0, opcode, 0, true, 0);
+}
+EXPORT_SYMBOL(ipa_uc_notify_clk_state);
+
+/**
+ * ipa_uc_update_hw_flags() - send uC the HW flags to be used
+ * @flags: This field is expected to be used as bitmask for enum ipa_hw_flags
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_uc_update_hw_flags(u32 flags)
+{
+ union IpaHwUpdateFlagsCmdData_t cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.params.newFlags = flags;
+ return ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_UPDATE_FLAGS, 0,
+ false, HZ);
+}
+EXPORT_SYMBOL(ipa_uc_update_hw_flags);
+
+/**
+ * ipa_uc_memcpy() - Perform a memcpy action using IPA uC
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
+{
+ int res;
+ struct ipa_mem_buffer mem;
+ struct IpaHwMemCopyData_t *cmd;
+
+ IPADBG("dest 0x%pa src 0x%pa len %d\n", &dest, &src, len);
+ mem.size = sizeof(cmd);
+ mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+ return -ENOMEM;
+ }
+ cmd = (struct IpaHwMemCopyData_t *)mem.base;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->destination_addr = dest;
+ cmd->dest_buffer_size = len;
+ cmd->source_addr = src;
+ cmd->source_buffer_size = len;
+ res = ipa_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MEMCPY, 0,
+ true, 10 * HZ);
+ if (res) {
+ IPAERR("ipa_uc_send_cmd failed %d\n", res);
+ goto free_coherent;
+ }
+
+ res = 0;
+free_coherent:
+ dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+ return res;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c
new file mode 100644
index 0000000..08d7363
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c
@@ -0,0 +1,966 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa.h>
+#include "ipa_i.h"
+
+/* MHI uC interface definitions */
+#define IPA_HW_INTERFACE_MHI_VERSION 0x0004
+
+#define IPA_HW_MAX_NUMBER_OF_CHANNELS 2
+#define IPA_HW_MAX_NUMBER_OF_EVENTRINGS 2
+#define IPA_HW_MAX_CHANNEL_HANDLE (IPA_HW_MAX_NUMBER_OF_CHANNELS-1)
+
+/**
+ * Values that represent the MHI commands from CPU to IPA HW.
+ * @IPA_CPU_2_HW_CMD_MHI_INIT: Initialize HW to be ready for MHI processing.
+ * Once operation was completed HW shall respond with
+ * IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED.
+ * @IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL: Initialize specific channel to be ready
+ * to serve MHI transfers. Once initialization was completed HW shall
+ * respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE.
+ * IPA_HW_MHI_CHANNEL_STATE_ENABLE
+ * @IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI: Update MHI MSI interrupts data.
+ * Once operation was completed HW shall respond with
+ * IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED.
+ * @IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE: Change specific channel
+ * processing state following host request. Once operation was completed
+ * HW shall respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE.
+ * @IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO: Info related to DL UL syncronization.
+ * @IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE: Cmd to stop event ring processing.
+ */
+enum ipa_cpu_2_hw_mhi_commands {
+ IPA_CPU_2_HW_CMD_MHI_INIT
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+ IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1),
+ IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2),
+ IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 3),
+ IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4),
+ IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5)
+};
+
+/**
+ * Values that represent MHI related HW responses to CPU commands.
+ * @IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE: Response to
+ * IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL or
+ * IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE commands.
+ */
+enum ipa_hw_2_cpu_mhi_responses {
+ IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+};
+
+/**
+ * Values that represent MHI related HW event to be sent to CPU.
+ * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR: Event specify the device detected an
+ * error in an element from the transfer ring associated with the channel
+ * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST: Event specify a bam
+ * interrupt was asserted when MHI engine is suspended
+ */
+enum ipa_hw_2_cpu_mhi_events {
+ IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+ IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1),
+};
+
+/**
+ * Channel error types.
+ * @IPA_HW_CHANNEL_ERROR_NONE: No error persists.
+ * @IPA_HW_CHANNEL_INVALID_RE_ERROR: Invalid Ring Element was detected
+ */
+enum ipa_hw_channel_errors {
+ IPA_HW_CHANNEL_ERROR_NONE,
+ IPA_HW_CHANNEL_INVALID_RE_ERROR
+};
+
+/**
+ * MHI error types.
+ * @IPA_HW_INVALID_MMIO_ERROR: Invalid data read from MMIO space
+ * @IPA_HW_INVALID_CHANNEL_ERROR: Invalid data read from channel context array
+ * @IPA_HW_INVALID_EVENT_ERROR: Invalid data read from event ring context array
+ * @IPA_HW_NO_ED_IN_RING_ERROR: No event descriptors are available to report on
+ * secondary event ring
+ * @IPA_HW_LINK_ERROR: Link error
+ */
+enum ipa_hw_mhi_errors {
+ IPA_HW_INVALID_MMIO_ERROR
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+ IPA_HW_INVALID_CHANNEL_ERROR
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1),
+ IPA_HW_INVALID_EVENT_ERROR
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2),
+ IPA_HW_NO_ED_IN_RING_ERROR
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4),
+ IPA_HW_LINK_ERROR
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5),
+};
+
+
+/**
+ * Structure referring to the common and MHI section of 128B shared memory
+ * located in offset zero of SW Partition in IPA SRAM.
+ * The shared memory is used for communication between IPA HW and CPU.
+ * @common: common section in IPA SRAM
+ * @interfaceVersionMhi: The MHI interface version as reported by HW
+ * @mhiState: Overall MHI state
+ * @reserved_2B: reserved
+ * @mhiCnl0State: State of MHI channel 0.
+ * The state carries information regarding the error type.
+ * See IPA_HW_MHI_CHANNEL_STATES.
+ * @mhiCnl0State: State of MHI channel 1.
+ * @mhiCnl0State: State of MHI channel 2.
+ * @mhiCnl0State: State of MHI channel 3
+ * @mhiCnl0State: State of MHI channel 4.
+ * @mhiCnl0State: State of MHI channel 5.
+ * @mhiCnl0State: State of MHI channel 6.
+ * @mhiCnl0State: State of MHI channel 7.
+ * @reserved_37_34: reserved
+ * @reserved_3B_38: reserved
+ * @reserved_3F_3C: reserved
+ */
+struct IpaHwSharedMemMhiMapping_t {
+ struct IpaHwSharedMemCommonMapping_t common;
+ u16 interfaceVersionMhi;
+ u8 mhiState;
+ u8 reserved_2B;
+ u8 mhiCnl0State;
+ u8 mhiCnl1State;
+ u8 mhiCnl2State;
+ u8 mhiCnl3State;
+ u8 mhiCnl4State;
+ u8 mhiCnl5State;
+ u8 mhiCnl6State;
+ u8 mhiCnl7State;
+ u32 reserved_37_34;
+ u32 reserved_3B_38;
+ u32 reserved_3F_3C;
+};
+
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT command.
+ * Parameters are sent as pointer thus should be reside in address accessible
+ * to HW.
+ * @msiAddress: The MSI base (in device space) used for asserting the interrupt
+ * (MSI) associated with the event ring
+ * mmioBaseAddress: The address (in device space) of MMIO structure in
+ * host space
+ * deviceMhiCtrlBaseAddress: Base address of the memory region in the device
+ * address space where the MHI control data structures are allocated by
+ * the host, including channel context array, event context array,
+ * and rings. This value is used for host/device address translation.
+ * deviceMhiDataBaseAddress: Base address of the memory region in the device
+ * address space where the MHI data buffers are allocated by the host.
+ * This value is used for host/device address translation.
+ * firstChannelIndex: First channel ID. Doorbell 0 is mapped to this channel
+ * firstEventRingIndex: First event ring ID. Doorbell 16 is mapped to this
+ * event ring.
+ */
+struct IpaHwMhiInitCmdData_t {
+ u32 msiAddress;
+ u32 mmioBaseAddress;
+ u32 deviceMhiCtrlBaseAddress;
+ u32 deviceMhiDataBaseAddress;
+ u32 firstChannelIndex;
+ u32 firstEventRingIndex;
+};
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL
+ * command. Parameters are sent as 32b immediate parameters.
+ * @hannelHandle: The channel identifier as allocated by driver.
+ * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @contexArrayIndex: Unique index for channels, between 0 and 255. The index is
+ * used as an index in channel context array structures.
+ * @bamPipeId: The BAM pipe number for pipe dedicated for this channel
+ * @channelDirection: The direction of the channel as defined in the channel
+ * type field (CHTYPE) in the channel context data structure.
+ * @reserved: reserved.
+ */
+union IpaHwMhiInitChannelCmdData_t {
+ struct IpaHwMhiInitChannelCmdParams_t {
+ u32 channelHandle:8;
+ u32 contexArrayIndex:8;
+ u32 bamPipeId:6;
+ u32 channelDirection:2;
+ u32 reserved:8;
+ } params;
+ u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI command.
+ * @msiAddress_low: The MSI lower base addr (in device space) used for asserting
+ * the interrupt (MSI) associated with the event ring.
+ * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting
+ * the interrupt (MSI) associated with the event ring.
+ * @msiMask: Mask indicating number of messages assigned by the host to device
+ * @msiData: Data Pattern to use when generating the MSI
+ */
+struct IpaHwMhiMsiCmdData_t {
+ u32 msiAddress_low;
+ u32 msiAddress_hi;
+ u32 msiMask;
+ u32 msiData;
+};
+
+/**
+ * Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE command.
+ * Parameters are sent as 32b immediate parameters.
+ * @requestedState: The requested channel state as was indicated from Host.
+ * Use IPA_HW_MHI_CHANNEL_STATES to specify the requested state
+ * @channelHandle: The channel identifier as allocated by driver.
+ * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @LPTransitionRejected: Indication that low power state transition was
+ * rejected
+ * @reserved: reserved
+ */
+union IpaHwMhiChangeChannelStateCmdData_t {
+ struct IpaHwMhiChangeChannelStateCmdParams_t {
+ u32 requestedState:8;
+ u32 channelHandle:8;
+ u32 LPTransitionRejected:8;
+ u32 reserved:8;
+ } params;
+ u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE command.
+ * Parameters are sent as 32b immediate parameters.
+ * @channelHandle: The channel identifier as allocated by driver.
+ * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @reserved: reserved
+ */
+union IpaHwMhiStopEventUpdateData_t {
+ struct IpaHwMhiStopEventUpdateDataParams_t {
+ u32 channelHandle:8;
+ u32 reserved:24;
+ } params;
+ u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ * IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE response.
+ * Parameters are sent as 32b immediate parameters.
+ * @state: The new channel state. In case state is not as requested this is
+ * error indication for the last command
+ * @channelHandle: The channel identifier
+ * @additonalParams: For stop: the number of pending bam descriptors currently
+ * queued
+*/
+union IpaHwMhiChangeChannelStateResponseData_t {
+ struct IpaHwMhiChangeChannelStateResponseParams_t {
+ u32 state:8;
+ u32 channelHandle:8;
+ u32 additonalParams:16;
+ } params;
+ u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ * IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR event.
+ * Parameters are sent as 32b immediate parameters.
+ * @errorType: Type of error - IPA_HW_CHANNEL_ERRORS
+ * @channelHandle: The channel identifier as allocated by driver.
+ * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @reserved: reserved
+ */
+union IpaHwMhiChannelErrorEventData_t {
+ struct IpaHwMhiChannelErrorEventParams_t {
+ u32 errorType:8;
+ u32 channelHandle:8;
+ u32 reserved:16;
+ } params;
+ u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ * IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST event.
+ * Parameters are sent as 32b immediate parameters.
+ * @channelHandle: The channel identifier as allocated by driver.
+ * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @reserved: reserved
+ */
+union IpaHwMhiChannelWakeupEventData_t {
+ struct IpaHwMhiChannelWakeupEventParams_t {
+ u32 channelHandle:8;
+ u32 reserved:24;
+ } params;
+ u32 raw32b;
+};
+
+/**
+ * Structure holding the MHI Common statistics
+ * @numULDLSync: Number of times UL activity trigged due to DL activity
+ * @numULTimerExpired: Number of times UL Accm Timer expired
+ */
+struct IpaHwStatsMhiCmnInfoData_t {
+ u32 numULDLSync;
+ u32 numULTimerExpired;
+ u32 numChEvCtxWpRead;
+ u32 reserved;
+};
+
+/**
+ * Structure holding the MHI Channel statistics
+ * @doorbellInt: The number of doorbell int
+ * @reProccesed: The number of ring elements processed
+ * @bamFifoFull: Number of times Bam Fifo got full
+ * @bamFifoEmpty: Number of times Bam Fifo got empty
+ * @bamFifoUsageHigh: Number of times Bam fifo usage went above 75%
+ * @bamFifoUsageLow: Number of times Bam fifo usage went below 25%
+ * @bamInt: Number of BAM Interrupts
+ * @ringFull: Number of times Transfer Ring got full
+ * @ringEmpty: umber of times Transfer Ring got empty
+ * @ringUsageHigh: Number of times Transfer Ring usage went above 75%
+ * @ringUsageLow: Number of times Transfer Ring usage went below 25%
+ * @delayedMsi: Number of times device triggered MSI to host after
+ * Interrupt Moderation Timer expiry
+ * @immediateMsi: Number of times device triggered MSI to host immediately
+ * @thresholdMsi: Number of times device triggered MSI due to max pending
+ * events threshold reached
+ * @numSuspend: Number of times channel was suspended
+ * @numResume: Number of times channel was suspended
+ * @num_OOB: Number of times we indicated that we are OOB
+ * @num_OOB_timer_expiry: Number of times we indicated that we are OOB
+ * after timer expiry
+ * @num_OOB_moderation_timer_start: Number of times we started timer after
+ * sending OOB and hitting OOB again before we processed threshold
+ * number of packets
+ * @num_db_mode_evt: Number of times we indicated that we are in Doorbell mode
+ */
+struct IpaHwStatsMhiCnlInfoData_t {
+ u32 doorbellInt;
+ u32 reProccesed;
+ u32 bamFifoFull;
+ u32 bamFifoEmpty;
+ u32 bamFifoUsageHigh;
+ u32 bamFifoUsageLow;
+ u32 bamInt;
+ u32 ringFull;
+ u32 ringEmpty;
+ u32 ringUsageHigh;
+ u32 ringUsageLow;
+ u32 delayedMsi;
+ u32 immediateMsi;
+ u32 thresholdMsi;
+ u32 numSuspend;
+ u32 numResume;
+ u32 num_OOB;
+ u32 num_OOB_timer_expiry;
+ u32 num_OOB_moderation_timer_start;
+ u32 num_db_mode_evt;
+};
+
+/**
+ * Structure holding the MHI statistics
+ * @mhiCmnStats: Stats pertaining to MHI
+ * @mhiCnlStats: Stats pertaining to each channel
+ */
+struct IpaHwStatsMhiInfoData_t {
+ struct IpaHwStatsMhiCmnInfoData_t mhiCmnStats;
+ struct IpaHwStatsMhiCnlInfoData_t mhiCnlStats[
+ IPA_HW_MAX_NUMBER_OF_CHANNELS];
+};
+
+/**
+ * Structure holding the MHI Common Config info
+ * @isDlUlSyncEnabled: Flag to indicate if DL-UL synchronization is enabled
+ * @UlAccmVal: Out Channel(UL) accumulation time in ms when DL UL Sync is
+ * enabled
+ * @ulMsiEventThreshold: Threshold at which HW fires MSI to host for UL events
+ * @dlMsiEventThreshold: Threshold at which HW fires MSI to host for DL events
+ */
+struct IpaHwConfigMhiCmnInfoData_t {
+ u8 isDlUlSyncEnabled;
+ u8 UlAccmVal;
+ u8 ulMsiEventThreshold;
+ u8 dlMsiEventThreshold;
+};
+
+/**
+ * Structure holding the parameters for MSI info data
+ * @msiAddress_low: The MSI lower base addr (in device space) used for asserting
+ * the interrupt (MSI) associated with the event ring.
+ * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting
+ * the interrupt (MSI) associated with the event ring.
+ * @msiMask: Mask indicating number of messages assigned by the host to device
+ * @msiData: Data Pattern to use when generating the MSI
+ */
+struct IpaHwConfigMhiMsiInfoData_t {
+ u32 msiAddress_low;
+ u32 msiAddress_hi;
+ u32 msiMask;
+ u32 msiData;
+};
+
+/**
+ * Structure holding the MHI Channel Config info
+ * @transferRingSize: The Transfer Ring size in terms of Ring Elements
+ * @transferRingIndex: The Transfer Ring channel number as defined by host
+ * @eventRingIndex: The Event Ring Index associated with this Transfer Ring
+ * @bamPipeIndex: The BAM Pipe associated with this channel
+ * @isOutChannel: Indication for the direction of channel
+ * @reserved_0: Reserved byte for maintaining 4byte alignment
+ * @reserved_1: Reserved byte for maintaining 4byte alignment
+ */
+struct IpaHwConfigMhiCnlInfoData_t {
+ u16 transferRingSize;
+ u8 transferRingIndex;
+ u8 eventRingIndex;
+ u8 bamPipeIndex;
+ u8 isOutChannel;
+ u8 reserved_0;
+ u8 reserved_1;
+};
+
+/**
+ * Structure holding the MHI Event Config info
+ * @msiVec: msi vector to invoke MSI interrupt
+ * @intmodtValue: Interrupt moderation timer (in milliseconds)
+ * @eventRingSize: The Event Ring size in terms of Ring Elements
+ * @eventRingIndex: The Event Ring number as defined by host
+ * @reserved_0: Reserved byte for maintaining 4byte alignment
+ * @reserved_1: Reserved byte for maintaining 4byte alignment
+ * @reserved_2: Reserved byte for maintaining 4byte alignment
+ */
+struct IpaHwConfigMhiEventInfoData_t {
+ u32 msiVec;
+ u16 intmodtValue;
+ u16 eventRingSize;
+ u8 eventRingIndex;
+ u8 reserved_0;
+ u8 reserved_1;
+ u8 reserved_2;
+};
+
+/**
+ * Structure holding the MHI Config info
+ * @mhiCmnCfg: Common Config pertaining to MHI
+ * @mhiMsiCfg: Config pertaining to MSI config
+ * @mhiCnlCfg: Config pertaining to each channel
+ * @mhiEvtCfg: Config pertaining to each event Ring
+ */
+struct IpaHwConfigMhiInfoData_t {
+ struct IpaHwConfigMhiCmnInfoData_t mhiCmnCfg;
+ struct IpaHwConfigMhiMsiInfoData_t mhiMsiCfg;
+ struct IpaHwConfigMhiCnlInfoData_t mhiCnlCfg[
+ IPA_HW_MAX_NUMBER_OF_CHANNELS];
+ struct IpaHwConfigMhiEventInfoData_t mhiEvtCfg[
+ IPA_HW_MAX_NUMBER_OF_EVENTRINGS];
+};
+
+
+struct ipa_uc_mhi_ctx {
+ u8 expected_responseOp;
+ u32 expected_responseParams;
+ void (*ready_cb)(void);
+ void (*wakeup_request_cb)(void);
+ u32 mhi_uc_stats_ofst;
+ struct IpaHwStatsMhiInfoData_t *mhi_uc_stats_mmio;
+};
+
+#define PRINT_COMMON_STATS(x) \
+ (nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \
+ #x "=0x%x\n", ipa_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCmnStats.x))
+
+#define PRINT_CHANNEL_STATS(ch, x) \
+ (nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \
+ #x "=0x%x\n", ipa_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCnlStats[ch].x))
+
+struct ipa_uc_mhi_ctx *ipa_uc_mhi_ctx;
+
+static int ipa_uc_mhi_response_hdlr(struct IpaHwSharedMemCommonMapping_t
+ *uc_sram_mmio, u32 *uc_status)
+{
+ IPADBG("responseOp=%d\n", uc_sram_mmio->responseOp);
+ if (uc_sram_mmio->responseOp == ipa_uc_mhi_ctx->expected_responseOp &&
+ uc_sram_mmio->responseParams ==
+ ipa_uc_mhi_ctx->expected_responseParams) {
+ *uc_status = 0;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void ipa_uc_mhi_event_hdlr(struct IpaHwSharedMemCommonMapping_t
+ *uc_sram_mmio)
+{
+ if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+ IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR) {
+ union IpaHwMhiChannelErrorEventData_t evt;
+
+ IPAERR("Channel error\n");
+ evt.raw32b = uc_sram_mmio->eventParams;
+ IPAERR("errorType=%d channelHandle=%d reserved=%d\n",
+ evt.params.errorType, evt.params.channelHandle,
+ evt.params.reserved);
+ } else if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+ IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST) {
+ union IpaHwMhiChannelWakeupEventData_t evt;
+
+ IPADBG("WakeUp channel request\n");
+ evt.raw32b = uc_sram_mmio->eventParams;
+ IPADBG("channelHandle=%d reserved=%d\n",
+ evt.params.channelHandle, evt.params.reserved);
+ ipa_uc_mhi_ctx->wakeup_request_cb();
+ }
+}
+
+static void ipa_uc_mhi_event_log_info_hdlr(
+ struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+
+{
+ if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_MHI)) == 0) {
+ IPAERR("MHI feature missing 0x%x\n",
+ uc_event_top_mmio->featureMask);
+ return;
+ }
+
+ if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_MHI].
+ params.size != sizeof(struct IpaHwStatsMhiInfoData_t)) {
+ IPAERR("mhi stats sz invalid exp=%zu is=%u\n",
+ sizeof(struct IpaHwStatsMhiInfoData_t),
+ uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_MHI].params.size);
+ return;
+ }
+
+ ipa_uc_mhi_ctx->mhi_uc_stats_ofst = uc_event_top_mmio->
+ statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_MHI].params.offset;
+ IPAERR("MHI stats ofst=0x%x\n", ipa_uc_mhi_ctx->mhi_uc_stats_ofst);
+ if (ipa_uc_mhi_ctx->mhi_uc_stats_ofst +
+ sizeof(struct IpaHwStatsMhiInfoData_t) >=
+ ipa_ctx->ctrl->ipa_reg_base_ofst +
+ IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) +
+ ipa_ctx->smem_sz) {
+ IPAERR("uc_mhi_stats 0x%x outside SRAM\n",
+ ipa_uc_mhi_ctx->mhi_uc_stats_ofst);
+ return;
+ }
+
+ ipa_uc_mhi_ctx->mhi_uc_stats_mmio =
+ ioremap(ipa_ctx->ipa_wrapper_base +
+ ipa_uc_mhi_ctx->mhi_uc_stats_ofst,
+ sizeof(struct IpaHwStatsMhiInfoData_t));
+ if (!ipa_uc_mhi_ctx->mhi_uc_stats_mmio) {
+ IPAERR("fail to ioremap uc mhi stats\n");
+ return;
+ }
+}
+
+int ipa2_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void))
+{
+ struct ipa_uc_hdlrs hdlrs;
+
+ if (ipa_uc_mhi_ctx) {
+ IPAERR("Already initialized\n");
+ return -EFAULT;
+ }
+
+ ipa_uc_mhi_ctx = kzalloc(sizeof(*ipa_uc_mhi_ctx), GFP_KERNEL);
+ if (!ipa_uc_mhi_ctx) {
+ IPAERR("no mem\n");
+ return -ENOMEM;
+ }
+
+ ipa_uc_mhi_ctx->ready_cb = ready_cb;
+ ipa_uc_mhi_ctx->wakeup_request_cb = wakeup_request_cb;
+
+ memset(&hdlrs, 0, sizeof(hdlrs));
+ hdlrs.ipa_uc_loaded_hdlr = ipa_uc_mhi_ctx->ready_cb;
+ hdlrs.ipa_uc_response_hdlr = ipa_uc_mhi_response_hdlr;
+ hdlrs.ipa_uc_event_hdlr = ipa_uc_mhi_event_hdlr;
+ hdlrs.ipa_uc_event_log_info_hdlr = ipa_uc_mhi_event_log_info_hdlr;
+ ipa_uc_register_handlers(IPA_HW_FEATURE_MHI, &hdlrs);
+
+ IPADBG("Done\n");
+ return 0;
+}
+
+void ipa2_uc_mhi_cleanup(void)
+{
+ struct ipa_uc_hdlrs null_hdlrs = { 0 };
+
+ IPADBG("Enter\n");
+
+ if (!ipa_uc_mhi_ctx) {
+ IPAERR("ipa3_uc_mhi_ctx is not initialized\n");
+ return;
+ }
+ ipa_uc_register_handlers(IPA_HW_FEATURE_MHI, &null_hdlrs);
+ kfree(ipa_uc_mhi_ctx);
+ ipa_uc_mhi_ctx = NULL;
+
+ IPADBG("Done\n");
+}
+
+int ipa_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
+ u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx,
+ u32 first_evt_idx)
+{
+ int res;
+ struct ipa_mem_buffer mem;
+ struct IpaHwMhiInitCmdData_t *init_cmd_data;
+ struct IpaHwMhiMsiCmdData_t *msi_cmd;
+
+ if (!ipa_uc_mhi_ctx) {
+ IPAERR("Not initialized\n");
+ return -EFAULT;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ res = ipa_uc_update_hw_flags(0);
+ if (res) {
+ IPAERR("ipa_uc_update_hw_flags failed %d\n", res);
+ goto disable_clks;
+ }
+
+ mem.size = sizeof(*init_cmd_data);
+ mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+ res = -ENOMEM;
+ goto disable_clks;
+ }
+ memset(mem.base, 0, mem.size);
+ init_cmd_data = (struct IpaHwMhiInitCmdData_t *)mem.base;
+ init_cmd_data->msiAddress = msi->addr_low;
+ init_cmd_data->mmioBaseAddress = mmio_addr;
+ init_cmd_data->deviceMhiCtrlBaseAddress = host_ctrl_addr;
+ init_cmd_data->deviceMhiDataBaseAddress = host_data_addr;
+ init_cmd_data->firstChannelIndex = first_ch_idx;
+ init_cmd_data->firstEventRingIndex = first_evt_idx;
+ res = ipa_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MHI_INIT, 0,
+ false, HZ);
+ if (res) {
+ IPAERR("ipa_uc_send_cmd failed %d\n", res);
+ dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
+ mem.phys_base);
+ goto disable_clks;
+ }
+
+ dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+ mem.size = sizeof(*msi_cmd);
+ mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+ res = -ENOMEM;
+ goto disable_clks;
+ }
+
+ msi_cmd = (struct IpaHwMhiMsiCmdData_t *)mem.base;
+ msi_cmd->msiAddress_hi = msi->addr_hi;
+ msi_cmd->msiAddress_low = msi->addr_low;
+ msi_cmd->msiData = msi->data;
+ msi_cmd->msiMask = msi->mask;
+ res = ipa_uc_send_cmd((u32)mem.phys_base,
+ IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI, 0, false, HZ);
+ if (res) {
+ IPAERR("ipa_uc_send_cmd failed %d\n", res);
+ dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
+ mem.phys_base);
+ goto disable_clks;
+ }
+
+ dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+ res = 0;
+
+disable_clks:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return res;
+
+}
+
+int ipa_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle,
+ int contexArrayIndex, int channelDirection)
+
+{
+ int res;
+ union IpaHwMhiInitChannelCmdData_t init_cmd;
+ union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+
+ if (!ipa_uc_mhi_ctx) {
+ IPAERR("Not initialized\n");
+ return -EFAULT;
+ }
+
+ if (ipa_ep_idx < 0 || ipa_ep_idx >= ipa_ctx->ipa_num_pipes) {
+ IPAERR("Invalid ipa_ep_idx.\n");
+ return -EINVAL;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ memset(&uc_rsp, 0, sizeof(uc_rsp));
+ uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+ uc_rsp.params.channelHandle = channelHandle;
+ ipa_uc_mhi_ctx->expected_responseOp =
+ IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+ ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+ memset(&init_cmd, 0, sizeof(init_cmd));
+ init_cmd.params.channelHandle = channelHandle;
+ init_cmd.params.contexArrayIndex = contexArrayIndex;
+ init_cmd.params.bamPipeId = ipa_ep_idx;
+ init_cmd.params.channelDirection = channelDirection;
+
+ res = ipa_uc_send_cmd(init_cmd.raw32b,
+ IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL, 0, false, HZ);
+ if (res) {
+ IPAERR("ipa_uc_send_cmd failed %d\n", res);
+ goto disable_clks;
+ }
+
+ res = 0;
+
+disable_clks:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return res;
+}
+
+
+int ipa2_uc_mhi_reset_channel(int channelHandle)
+{
+ union IpaHwMhiChangeChannelStateCmdData_t cmd;
+ union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+ int res;
+
+ if (!ipa_uc_mhi_ctx) {
+ IPAERR("Not initialized\n");
+ return -EFAULT;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ memset(&uc_rsp, 0, sizeof(uc_rsp));
+ uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
+ uc_rsp.params.channelHandle = channelHandle;
+ ipa_uc_mhi_ctx->expected_responseOp =
+ IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+ ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
+ cmd.params.channelHandle = channelHandle;
+ res = ipa_uc_send_cmd(cmd.raw32b,
+ IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ);
+ if (res) {
+ IPAERR("ipa_uc_send_cmd failed %d\n", res);
+ goto disable_clks;
+ }
+
+ res = 0;
+
+disable_clks:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return res;
+}
+
+int ipa2_uc_mhi_suspend_channel(int channelHandle)
+{
+ union IpaHwMhiChangeChannelStateCmdData_t cmd;
+ union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+ int res;
+
+ if (!ipa_uc_mhi_ctx) {
+ IPAERR("Not initialized\n");
+ return -EFAULT;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ memset(&uc_rsp, 0, sizeof(uc_rsp));
+ uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
+ uc_rsp.params.channelHandle = channelHandle;
+ ipa_uc_mhi_ctx->expected_responseOp =
+ IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+ ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
+ cmd.params.channelHandle = channelHandle;
+ res = ipa_uc_send_cmd(cmd.raw32b,
+ IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ);
+ if (res) {
+ IPAERR("ipa_uc_send_cmd failed %d\n", res);
+ goto disable_clks;
+ }
+
+ res = 0;
+
+disable_clks:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return res;
+}
+
+int ipa_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected)
+{
+ union IpaHwMhiChangeChannelStateCmdData_t cmd;
+ union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+ int res;
+
+ if (!ipa_uc_mhi_ctx) {
+ IPAERR("Not initialized\n");
+ return -EFAULT;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ memset(&uc_rsp, 0, sizeof(uc_rsp));
+ uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+ uc_rsp.params.channelHandle = channelHandle;
+ ipa_uc_mhi_ctx->expected_responseOp =
+ IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+ ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_RUN;
+ cmd.params.channelHandle = channelHandle;
+ cmd.params.LPTransitionRejected = LPTransitionRejected;
+ res = ipa_uc_send_cmd(cmd.raw32b,
+ IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ);
+ if (res) {
+ IPAERR("ipa_uc_send_cmd failed %d\n", res);
+ goto disable_clks;
+ }
+
+ res = 0;
+
+disable_clks:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return res;
+}
+
+int ipa2_uc_mhi_stop_event_update_channel(int channelHandle)
+{
+ union IpaHwMhiStopEventUpdateData_t cmd;
+ int res;
+
+ if (!ipa_uc_mhi_ctx) {
+ IPAERR("Not initialized\n");
+ return -EFAULT;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.params.channelHandle = channelHandle;
+
+ ipa_uc_mhi_ctx->expected_responseOp =
+ IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE;
+ ipa_uc_mhi_ctx->expected_responseParams = cmd.raw32b;
+
+ res = ipa_uc_send_cmd(cmd.raw32b,
+ IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE, 0, false, HZ);
+ if (res) {
+ IPAERR("ipa_uc_send_cmd failed %d\n", res);
+ goto disable_clks;
+ }
+
+ res = 0;
+disable_clks:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return res;
+}
+
+int ipa2_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd)
+{
+ int res;
+
+ if (!ipa_uc_mhi_ctx) {
+ IPAERR("Not initialized\n");
+ return -EFAULT;
+ }
+
+ IPADBG("isDlUlSyncEnabled=0x%x UlAccmVal=0x%x\n",
+ cmd->params.isDlUlSyncEnabled, cmd->params.UlAccmVal);
+ IPADBG("ulMsiEventThreshold=0x%x dlMsiEventThreshold=0x%x\n",
+ cmd->params.ulMsiEventThreshold,
+ cmd->params.dlMsiEventThreshold);
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ res = ipa_uc_send_cmd(cmd->raw32b,
+ IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO, 0, false, HZ);
+ if (res) {
+ IPAERR("ipa_uc_send_cmd failed %d\n", res);
+ goto disable_clks;
+ }
+
+ res = 0;
+disable_clks:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return res;
+}
+
+int ipa2_uc_mhi_print_stats(char *dbg_buff, int size)
+{
+ int nBytes = 0;
+ int i;
+
+ if (!ipa_uc_mhi_ctx->mhi_uc_stats_mmio) {
+ IPAERR("MHI uc stats is not valid\n");
+ return 0;
+ }
+
+ nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes,
+ "Common Stats:\n");
+ PRINT_COMMON_STATS(numULDLSync);
+ PRINT_COMMON_STATS(numULTimerExpired);
+ PRINT_COMMON_STATS(numChEvCtxWpRead);
+
+ for (i = 0; i < IPA_HW_MAX_NUMBER_OF_CHANNELS; i++) {
+ nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes,
+ "Channel %d Stats:\n", i);
+ PRINT_CHANNEL_STATS(i, doorbellInt);
+ PRINT_CHANNEL_STATS(i, reProccesed);
+ PRINT_CHANNEL_STATS(i, bamFifoFull);
+ PRINT_CHANNEL_STATS(i, bamFifoEmpty);
+ PRINT_CHANNEL_STATS(i, bamFifoUsageHigh);
+ PRINT_CHANNEL_STATS(i, bamFifoUsageLow);
+ PRINT_CHANNEL_STATS(i, bamInt);
+ PRINT_CHANNEL_STATS(i, ringFull);
+ PRINT_CHANNEL_STATS(i, ringEmpty);
+ PRINT_CHANNEL_STATS(i, ringUsageHigh);
+ PRINT_CHANNEL_STATS(i, ringUsageLow);
+ PRINT_CHANNEL_STATS(i, delayedMsi);
+ PRINT_CHANNEL_STATS(i, immediateMsi);
+ PRINT_CHANNEL_STATS(i, thresholdMsi);
+ PRINT_CHANNEL_STATS(i, numSuspend);
+ PRINT_CHANNEL_STATS(i, numResume);
+ PRINT_CHANNEL_STATS(i, num_OOB);
+ PRINT_CHANNEL_STATS(i, num_OOB_timer_expiry);
+ PRINT_CHANNEL_STATS(i, num_OOB_moderation_timer_start);
+ PRINT_CHANNEL_STATS(i, num_db_mode_evt);
+ }
+
+ return nBytes;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
new file mode 100644
index 0000000..08ed47f
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
@@ -0,0 +1,438 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "ipa_i.h"
+
+#define IPA_UC_NTN_DB_PA_TX 0x79620DC
+#define IPA_UC_NTN_DB_PA_RX 0x79620D8
+
+static void ipa_uc_ntn_event_handler(
+ struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio)
+{
+ union IpaHwNTNErrorEventData_t ntn_evt;
+
+ if (uc_sram_mmio->eventOp == IPA_HW_2_CPU_EVENT_NTN_ERROR) {
+ ntn_evt.raw32b = uc_sram_mmio->eventParams;
+ IPADBG("uC NTN evt errType=%u pipe=%d cherrType=%u\n",
+ ntn_evt.params.ntn_error_type,
+ ntn_evt.params.ipa_pipe_number,
+ ntn_evt.params.ntn_ch_err_type);
+ }
+}
+
+static void ipa_uc_ntn_event_log_info_handler(
+ struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+{
+ if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_NTN)) == 0) {
+ IPAERR("NTN feature missing 0x%x\n",
+ uc_event_top_mmio->featureMask);
+ return;
+ }
+
+ if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_NTN].
+ params.size != sizeof(struct IpaHwStatsNTNInfoData_t)) {
+ IPAERR("NTN stats sz invalid exp=%zu is=%u\n",
+ sizeof(struct IpaHwStatsNTNInfoData_t),
+ uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_NTN].params.size);
+ return;
+ }
+
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst = uc_event_top_mmio->
+ statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_NTN].params.offset;
+ IPAERR("NTN stats ofst=0x%x\n", ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+ if (ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst +
+ sizeof(struct IpaHwStatsNTNInfoData_t) >=
+ ipa_ctx->ctrl->ipa_reg_base_ofst +
+ IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) +
+ ipa_ctx->smem_sz) {
+ IPAERR("uc_ntn_stats 0x%x outside SRAM\n",
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+ return;
+ }
+
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio =
+ ioremap(ipa_ctx->ipa_wrapper_base +
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst,
+ sizeof(struct IpaHwStatsNTNInfoData_t));
+ if (!ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+ IPAERR("fail to ioremap uc ntn stats\n");
+ return;
+ }
+}
+
+/**
+ * ipa2_get_wdi_stats() - Query WDI statistics from uc
+ * @stats: [inout] stats blob from client populated by driver
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa2_get_ntn_stats(struct IpaHwStatsNTNInfoData_t *stats)
+{
+#define TX_STATS(y) stats->tx_ch_stats[0].y = \
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) stats->rx_ch_stats[0].y = \
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (!stats || !ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+ IPAERR("bad parms stats=%p ntn_stats=%p\n",
+ stats,
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio);
+ return -EINVAL;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ TX_STATS(num_pkts_processed);
+ TX_STATS(tail_ptr_val);
+ TX_STATS(num_db_fired);
+ TX_STATS(tx_comp_ring_stats.ringFull);
+ TX_STATS(tx_comp_ring_stats.ringEmpty);
+ TX_STATS(tx_comp_ring_stats.ringUsageHigh);
+ TX_STATS(tx_comp_ring_stats.ringUsageLow);
+ TX_STATS(tx_comp_ring_stats.RingUtilCount);
+ TX_STATS(bam_stats.bamFifoFull);
+ TX_STATS(bam_stats.bamFifoEmpty);
+ TX_STATS(bam_stats.bamFifoUsageHigh);
+ TX_STATS(bam_stats.bamFifoUsageLow);
+ TX_STATS(bam_stats.bamUtilCount);
+ TX_STATS(num_db);
+ TX_STATS(num_unexpected_db);
+ TX_STATS(num_bam_int_handled);
+ TX_STATS(num_bam_int_in_non_running_state);
+ TX_STATS(num_qmb_int_handled);
+ TX_STATS(num_bam_int_handled_while_wait_for_bam);
+ TX_STATS(num_bam_int_handled_while_not_in_bam);
+
+ RX_STATS(max_outstanding_pkts);
+ RX_STATS(num_pkts_processed);
+ RX_STATS(rx_ring_rp_value);
+ RX_STATS(rx_ind_ring_stats.ringFull);
+ RX_STATS(rx_ind_ring_stats.ringEmpty);
+ RX_STATS(rx_ind_ring_stats.ringUsageHigh);
+ RX_STATS(rx_ind_ring_stats.ringUsageLow);
+ RX_STATS(rx_ind_ring_stats.RingUtilCount);
+ RX_STATS(bam_stats.bamFifoFull);
+ RX_STATS(bam_stats.bamFifoEmpty);
+ RX_STATS(bam_stats.bamFifoUsageHigh);
+ RX_STATS(bam_stats.bamFifoUsageLow);
+ RX_STATS(bam_stats.bamUtilCount);
+ RX_STATS(num_bam_int_handled);
+ RX_STATS(num_db);
+ RX_STATS(num_unexpected_db);
+ RX_STATS(num_pkts_in_dis_uninit_state);
+ RX_STATS(num_bam_int_handled_while_not_in_bam);
+ RX_STATS(num_bam_int_handled_while_in_bam_state);
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+int ipa2_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data)
+{
+ int ret;
+
+ ret = ipa2_uc_state_check();
+ if (ret) {
+ ipa_ctx->uc_ntn_ctx.uc_ready_cb = ipa_ready_cb;
+ ipa_ctx->uc_ntn_ctx.priv = user_data;
+ }
+
+ return -EEXIST;
+}
+
+static void ipa_uc_ntn_loaded_handler(void)
+{
+ if (!ipa_ctx) {
+ IPAERR("IPA ctx is null\n");
+ return;
+ }
+
+ if (ipa_ctx->uc_ntn_ctx.uc_ready_cb) {
+ ipa_ctx->uc_ntn_ctx.uc_ready_cb(
+ ipa_ctx->uc_ntn_ctx.priv);
+
+ ipa_ctx->uc_ntn_ctx.uc_ready_cb =
+ NULL;
+ ipa_ctx->uc_ntn_ctx.priv = NULL;
+ }
+}
+
+int ipa_ntn_init(void)
+{
+ struct ipa_uc_hdlrs uc_ntn_cbs = { 0 };
+
+ uc_ntn_cbs.ipa_uc_event_hdlr = ipa_uc_ntn_event_handler;
+ uc_ntn_cbs.ipa_uc_event_log_info_hdlr =
+ ipa_uc_ntn_event_log_info_handler;
+ uc_ntn_cbs.ipa_uc_loaded_hdlr =
+ ipa_uc_ntn_loaded_handler;
+
+ ipa_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs);
+
+ return 0;
+}
+
+static int ipa2_uc_send_ntn_setup_pipe_cmd(
+ struct ipa_ntn_setup_info *ntn_info, u8 dir)
+{
+ int ipa_ep_idx;
+ int result = 0;
+ struct ipa_mem_buffer cmd;
+ struct IpaHwNtnSetUpCmdData_t *Ntn_params;
+ struct IpaHwOffloadSetUpCmdData_t *cmd_data;
+
+ if (ntn_info == NULL) {
+ IPAERR("invalid input\n");
+ return -EINVAL;
+ }
+
+ ipa_ep_idx = ipa_get_ep_mapping(ntn_info->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("fail to get ep idx.\n");
+ return -EFAULT;
+ }
+
+ IPADBG("client=%d ep=%d\n", ntn_info->client, ipa_ep_idx);
+
+ IPADBG("ring_base_pa = 0x%pa\n",
+ &ntn_info->ring_base_pa);
+ IPADBG("ntn_ring_size = %d\n", ntn_info->ntn_ring_size);
+ IPADBG("buff_pool_base_pa = 0x%pa\n", &ntn_info->buff_pool_base_pa);
+ IPADBG("num_buffers = %d\n", ntn_info->num_buffers);
+ IPADBG("data_buff_size = %d\n", ntn_info->data_buff_size);
+ IPADBG("tail_ptr_base_pa = 0x%pa\n", &ntn_info->ntn_reg_base_ptr_pa);
+
+ cmd.size = sizeof(*cmd_data);
+ cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size,
+ &cmd.phys_base, GFP_KERNEL);
+ if (cmd.base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ return -ENOMEM;
+ }
+
+ cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base;
+ cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+ Ntn_params = &cmd_data->SetupCh_params.NtnSetupCh_params;
+ Ntn_params->ring_base_pa = ntn_info->ring_base_pa;
+ Ntn_params->buff_pool_base_pa = ntn_info->buff_pool_base_pa;
+ Ntn_params->ntn_ring_size = ntn_info->ntn_ring_size;
+ Ntn_params->num_buffers = ntn_info->num_buffers;
+ Ntn_params->ntn_reg_base_ptr_pa = ntn_info->ntn_reg_base_ptr_pa;
+ Ntn_params->data_buff_size = ntn_info->data_buff_size;
+ Ntn_params->ipa_pipe_number = ipa_ep_idx;
+ Ntn_params->dir = dir;
+
+ result = ipa_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result)
+ result = -EFAULT;
+
+ dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+ return result;
+}
+
+/**
+ * ipa2_setup_uc_ntn_pipes() - setup uc offload pipes
+ */
+int ipa2_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp)
+{
+ int ipa_ep_idx_ul, ipa_ep_idx_dl;
+ struct ipa_ep_context *ep_ul, *ep_dl;
+ int result = 0;
+
+ if (in == NULL) {
+ IPAERR("invalid input\n");
+ return -EINVAL;
+ }
+
+ ipa_ep_idx_ul = ipa_get_ep_mapping(in->ul.client);
+ ipa_ep_idx_dl = ipa_get_ep_mapping(in->dl.client);
+ if (ipa_ep_idx_ul == -1 || ipa_ep_idx_dl == -1) {
+ IPAERR("fail to alloc EP.\n");
+ return -EFAULT;
+ }
+
+ ep_ul = &ipa_ctx->ep[ipa_ep_idx_ul];
+ ep_dl = &ipa_ctx->ep[ipa_ep_idx_dl];
+
+ if (ep_ul->valid || ep_dl->valid) {
+ IPAERR("EP already allocated ul:%d dl:%d\n",
+ ep_ul->valid, ep_dl->valid);
+ return -EFAULT;
+ }
+
+ memset(ep_ul, 0, offsetof(struct ipa_ep_context, sys));
+ memset(ep_dl, 0, offsetof(struct ipa_ep_context, sys));
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ /* setup ul ep cfg */
+ ep_ul->valid = 1;
+ ep_ul->client = in->ul.client;
+ result = ipa_enable_data_path(ipa_ep_idx_ul);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx_ul);
+ return -EFAULT;
+ }
+ ep_ul->client_notify = notify;
+ ep_ul->priv = priv;
+
+ memset(&ep_ul->cfg, 0, sizeof(ep_ul->cfg));
+ ep_ul->cfg.nat.nat_en = IPA_SRC_NAT;
+ ep_ul->cfg.hdr.hdr_len = hdr_len;
+ ep_ul->cfg.mode.mode = IPA_BASIC;
+
+ if (ipa2_cfg_ep(ipa_ep_idx_ul, &ep_ul->cfg)) {
+ IPAERR("fail to setup ul pipe cfg\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa2_uc_send_ntn_setup_pipe_cmd(&in->ul, IPA_NTN_RX_DIR)) {
+ IPAERR("fail to send cmd to uc for ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa_install_dflt_flt_rules(ipa_ep_idx_ul);
+ outp->ul_uc_db_pa = IPA_UC_NTN_DB_PA_RX;
+ ep_ul->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+ IPAERR("client %d (ep: %d) connected\n", in->ul.client,
+ ipa_ep_idx_ul);
+
+ /* setup dl ep cfg */
+ ep_dl->valid = 1;
+ ep_dl->client = in->dl.client;
+ result = ipa_enable_data_path(ipa_ep_idx_dl);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx_dl);
+ result = -EFAULT;
+ goto fail;
+ }
+
+ memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg));
+ ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT;
+ ep_dl->cfg.hdr.hdr_len = hdr_len;
+ ep_dl->cfg.mode.mode = IPA_BASIC;
+
+ if (ipa2_cfg_ep(ipa_ep_idx_dl, &ep_dl->cfg)) {
+ IPAERR("fail to setup dl pipe cfg\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa2_uc_send_ntn_setup_pipe_cmd(&in->dl, IPA_NTN_TX_DIR)) {
+ IPAERR("fail to send cmd to uc for dl pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX;
+ ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+ IPAERR("client %d (ep: %d) connected\n", in->dl.client,
+ ipa_ep_idx_dl);
+
+fail:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return result;
+}
+
+/**
+ * ipa2_tear_down_uc_offload_pipes() - tear down uc offload pipes
+ */
+
+int ipa2_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
+ int ipa_ep_idx_dl)
+{
+ struct ipa_mem_buffer cmd;
+ struct ipa_ep_context *ep_ul, *ep_dl;
+ struct IpaHwOffloadCommonChCmdData_t *cmd_data;
+ union IpaHwNtnCommonChCmdData_t *tear;
+ int result = 0;
+
+ IPADBG("ep_ul = %d\n", ipa_ep_idx_ul);
+ IPADBG("ep_dl = %d\n", ipa_ep_idx_dl);
+
+ ep_ul = &ipa_ctx->ep[ipa_ep_idx_ul];
+ ep_dl = &ipa_ctx->ep[ipa_ep_idx_dl];
+
+ if (ep_ul->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED ||
+ ep_dl->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED) {
+ IPAERR("channel bad state: ul %d dl %d\n",
+ ep_ul->uc_offload_state, ep_dl->uc_offload_state);
+ return -EFAULT;
+ }
+
+ cmd.size = sizeof(*cmd_data);
+ cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size,
+ &cmd.phys_base, GFP_KERNEL);
+ if (cmd.base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ return -ENOMEM;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ /* teardown the UL pipe */
+ cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base;
+ cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+ tear = &cmd_data->CommonCh_params.NtnCommonCh_params;
+ tear->params.ipa_pipe_number = ipa_ep_idx_ul;
+ result = ipa_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result) {
+ IPAERR("fail to tear down ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa_disable_data_path(ipa_ep_idx_ul);
+ ipa_delete_dflt_flt_rules(ipa_ep_idx_ul);
+ memset(&ipa_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa_ep_context));
+ IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
+
+ /* teardown the DL pipe */
+ tear->params.ipa_pipe_number = ipa_ep_idx_dl;
+ result = ipa_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result) {
+ IPAERR("fail to tear down ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa_disable_data_path(ipa_ep_idx_dl);
+ memset(&ipa_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa_ep_context));
+ IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
+
+fail:
+ dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h
new file mode 100644
index 0000000..3bec471
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h
@@ -0,0 +1,514 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_UC_OFFLOAD_I_H_
+#define _IPA_UC_OFFLOAD_I_H_
+
+#include <linux/ipa.h>
+#include "ipa_i.h"
+
+/*
+ * Neutrino protocol related data structures
+ */
+
+#define IPA_UC_MAX_NTN_TX_CHANNELS 1
+#define IPA_UC_MAX_NTN_RX_CHANNELS 1
+
+#define IPA_NTN_TX_DIR 1
+#define IPA_NTN_RX_DIR 2
+
+/**
+ * @brief Enum value determined based on the feature it
+ * corresponds to
+ * +----------------+----------------+
+ * | 3 bits | 5 bits |
+ * +----------------+----------------+
+ * | HW_FEATURE | OPCODE |
+ * +----------------+----------------+
+ *
+ */
+#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode)
+#define EXTRACT_UC_FEATURE(value) (value >> 5)
+
+#define IPA_HW_NUM_FEATURES 0x8
+
+/**
+ * enum ipa_hw_features - Values that represent the features supported in IPA HW
+ * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW
+ * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW
+ * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW
+ * @IPA_HW_FEATURE_NTN : Feature related to NTN operation in IPA HW
+ * @IPA_HW_FEATURE_OFFLOAD : Feature related to NTN operation in IPA HW
+*/
+enum ipa_hw_features {
+ IPA_HW_FEATURE_COMMON = 0x0,
+ IPA_HW_FEATURE_MHI = 0x1,
+ IPA_HW_FEATURE_WDI = 0x3,
+ IPA_HW_FEATURE_NTN = 0x4,
+ IPA_HW_FEATURE_OFFLOAD = 0x5,
+ IPA_HW_FEATURE_MAX = IPA_HW_NUM_FEATURES
+};
+
+/**
+ * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common
+ * section in 128B shared memory located in offset zero of SW Partition in IPA
+ * SRAM.
+ * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS
+ * @cmdParams : CPU->HW command parameter. The parameter filed can hold 32 bits
+ * of parameters (immediate parameters) and point on structure in
+ * system memory (in such case the address must be accessible
+ * for HW)
+ * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES
+ * @responseParams : HW->CPU response parameter. The parameter filed can hold
+ * 32 bits of parameters (immediate parameters) and point
+ * on structure in system memory
+ * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS
+ * @eventParams : HW->CPU event parameter. The parameter filed can hold 32 bits
+ * of parameters (immediate parameters) and point on
+ * structure in system memory
+ * @firstErrorAddress : Contains the address of first error-source on SNOC
+ * @hwState : State of HW. The state carries information regarding the error
+ * type.
+ * @warningCounter : The warnings counter. The counter carries information
+ * regarding non fatal errors in HW
+ * @interfaceVersionCommon : The Common interface version as reported by HW
+ *
+ * The shared memory is used for communication between IPA HW and CPU.
+ */
+struct IpaHwSharedMemCommonMapping_t {
+ u8 cmdOp;
+ u8 reserved_01;
+ u16 reserved_03_02;
+ u32 cmdParams;
+ u8 responseOp;
+ u8 reserved_09;
+ u16 reserved_0B_0A;
+ u32 responseParams;
+ u8 eventOp;
+ u8 reserved_11;
+ u16 reserved_13_12;
+ u32 eventParams;
+ u32 reserved_1B_18;
+ u32 firstErrorAddress;
+ u8 hwState;
+ u8 warningCounter;
+ u16 reserved_23_22;
+ u16 interfaceVersionCommon;
+ u16 reserved_27_26;
+} __packed;
+
+/**
+ * union IpaHwFeatureInfoData_t - parameters for stats/config blob
+ *
+ * @offset : Location of a feature within the EventInfoData
+ * @size : Size of the feature
+ */
+union IpaHwFeatureInfoData_t {
+ struct IpaHwFeatureInfoParams_t {
+ u32 offset:16;
+ u32 size:16;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * struct IpaHwEventInfoData_t - Structure holding the parameters for
+ * statistics and config info
+ *
+ * @baseAddrOffset : Base Address Offset of the statistics or config
+ * structure from IPA_WRAPPER_BASE
+ * @IpaHwFeatureInfoData_t : Location and size of each feature within
+ * the statistics or config structure
+ *
+ * @note Information about each feature in the featureInfo[]
+ * array is populated at predefined indices per the IPA_HW_FEATURES
+ * enum definition
+ */
+struct IpaHwEventInfoData_t {
+ u32 baseAddrOffset;
+ union IpaHwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES];
+} __packed;
+
+/**
+ * struct IpaHwEventLogInfoData_t - Structure holding the parameters for
+ * IPA_HW_2_CPU_EVENT_LOG_INFO Event
+ *
+ * @featureMask : Mask indicating the features enabled in HW.
+ * Refer IPA_HW_FEATURE_MASK
+ * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event
+ * Log Buffer structure
+ * @statsInfo : Statistics related information
+ * @configInfo : Configuration related information
+ *
+ * @note The offset location of this structure from IPA_WRAPPER_BASE
+ * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO
+ * Event
+ */
+struct IpaHwEventLogInfoData_t {
+ u32 featureMask;
+ u32 circBuffBaseAddrOffset;
+ struct IpaHwEventInfoData_t statsInfo;
+ struct IpaHwEventInfoData_t configInfo;
+
+} __packed;
+
+/**
+ * struct ipa_uc_ntn_ctx
+ * @ntn_uc_stats_ofst: Neutrino stats offset
+ * @ntn_uc_stats_mmio: Neutrino stats
+ * @priv: private data of client
+ * @uc_ready_cb: uc Ready cb
+ */
+struct ipa_uc_ntn_ctx {
+ u32 ntn_uc_stats_ofst;
+ struct IpaHwStatsNTNInfoData_t *ntn_uc_stats_mmio;
+ void *priv;
+ ipa_uc_ready_cb uc_ready_cb;
+};
+
+/**
+ * enum ipa_hw_2_cpu_ntn_events - Values that represent HW event
+ * to be sent to CPU
+ * @IPA_HW_2_CPU_EVENT_NTN_ERROR : Event to specify that HW
+ * detected an error in NTN
+ *
+ */
+enum ipa_hw_2_cpu_ntn_events {
+ IPA_HW_2_CPU_EVENT_NTN_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_NTN, 0),
+};
+
+
+/**
+ * enum ipa_hw_ntn_errors - NTN specific error types.
+ * @IPA_HW_NTN_ERROR_NONE : No error persists
+ * @IPA_HW_NTN_CHANNEL_ERROR : Error is specific to channel
+ */
+enum ipa_hw_ntn_errors {
+ IPA_HW_NTN_ERROR_NONE = 0,
+ IPA_HW_NTN_CHANNEL_ERROR = 1
+};
+
+/**
+ * enum ipa_hw_ntn_channel_states - Values that represent NTN
+ * channel state machine.
+ * @IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED : Channel is
+ * initialized but disabled
+ * @IPA_HW_NTN_CHANNEL_STATE_RUNNING : Channel is running.
+ * Entered after SET_UP_COMMAND is processed successfully
+ * @IPA_HW_NTN_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_NTN_CHANNEL_STATE_INVALID : Invalid state. Shall not
+ * be in use in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not reflect the
+ * sub-state the state machine may be in.
+ */
+enum ipa_hw_ntn_channel_states {
+ IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED = 1,
+ IPA_HW_NTN_CHANNEL_STATE_RUNNING = 2,
+ IPA_HW_NTN_CHANNEL_STATE_ERROR = 3,
+ IPA_HW_NTN_CHANNEL_STATE_INVALID = 0xFF
+};
+
+/**
+ * enum ipa_hw_ntn_channel_errors - List of NTN Channel error
+ * types. This is present in the event param
+ * @IPA_HW_NTN_CH_ERR_NONE: No error persists
+ * @IPA_HW_NTN_TX_FSM_ERROR: Error in the state machine
+ * transition
+ * @IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL: Error while calculating
+ * num RE to bring
+ * @IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL: Write pointer update
+ * failed in Rx ring
+ * @IPA_HW_NTN_RX_FSM_ERROR: Error in the state machine
+ * transition
+ * @IPA_HW_NTN_RX_CACHE_NON_EMPTY:
+ * @IPA_HW_NTN_CH_ERR_RESERVED:
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in.
+ */
+enum ipa_hw_ntn_channel_errors {
+ IPA_HW_NTN_CH_ERR_NONE = 0,
+ IPA_HW_NTN_TX_RING_WP_UPDATE_FAIL = 1,
+ IPA_HW_NTN_TX_FSM_ERROR = 2,
+ IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL = 3,
+ IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL = 4,
+ IPA_HW_NTN_RX_FSM_ERROR = 5,
+ IPA_HW_NTN_RX_CACHE_NON_EMPTY = 6,
+ IPA_HW_NTN_CH_ERR_RESERVED = 0xFF
+};
+
+
+/**
+ * struct IpaHwNtnSetUpCmdData_t - Ntn setup command data
+ * @ring_base_pa: physical address of the base of the Tx/Rx NTN
+ * ring
+ * @buff_pool_base_pa: physical address of the base of the Tx/Rx
+ * buffer pool
+ * @ntn_ring_size: size of the Tx/Rx NTN ring
+ * @num_buffers: Rx/tx buffer pool size
+ * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN
+ * Ring's tail pointer
+ * @ipa_pipe_number: IPA pipe number that has to be used for the
+ * Tx/Rx path
+ * @dir: Tx/Rx Direction
+ * @data_buff_size: size of the each data buffer allocated in
+ * DDR
+ */
+struct IpaHwNtnSetUpCmdData_t {
+ u32 ring_base_pa;
+ u32 buff_pool_base_pa;
+ u16 ntn_ring_size;
+ u16 num_buffers;
+ u32 ntn_reg_base_ptr_pa;
+ u8 ipa_pipe_number;
+ u8 dir;
+ u16 data_buff_size;
+
+} __packed;
+
+/**
+ * struct IpaHwNtnCommonChCmdData_t - Structure holding the
+ * parameters for Ntn Tear down command data params
+ *
+ *@ipa_pipe_number: IPA pipe number. This could be Tx or an Rx pipe
+ */
+union IpaHwNtnCommonChCmdData_t {
+ struct IpaHwNtnCommonChCmdParams_t {
+ u32 ipa_pipe_number :8;
+ u32 reserved :24;
+ } __packed params;
+ uint32_t raw32b;
+} __packed;
+
+
+/**
+ * struct IpaHwNTNErrorEventData_t - Structure holding the
+ * IPA_HW_2_CPU_EVENT_NTN_ERROR event. The parameters are passed
+ * as immediate params in the shared memory
+ *
+ *@ntn_error_type: type of NTN error (IPA_HW_NTN_ERRORS)
+ *@ipa_pipe_number: IPA pipe number on which error has happened
+ * Applicable only if error type indicates channel error
+ *@ntn_ch_err_type: Information about the channel error (if
+ * available)
+ */
+union IpaHwNTNErrorEventData_t {
+ struct IpaHwNTNErrorEventParams_t {
+ u32 ntn_error_type :8;
+ u32 reserved :8;
+ u32 ipa_pipe_number :8;
+ u32 ntn_ch_err_type :8;
+ } __packed params;
+ uint32_t raw32b;
+} __packed;
+
+/**
+ * struct NTNRxInfoData_t - NTN Structure holding the
+ * Rx pipe information
+ *
+ *@max_outstanding_pkts: Number of outstanding packets in Rx
+ * Ring
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@rx_ring_rp_value: Read pointer last advertized to the WLAN FW
+ *
+ *@ntn_ch_err_type: Information about the channel error (if
+ * available)
+ *@rx_ind_ring_stats:
+ *@bam_stats:
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_pkts_in_dis_uninit_state:
+ *@num_bam_int_handled_while_not_in_bam: Number of Bam
+ * Interrupts handled by FW
+ *@num_bam_int_handled_while_in_bam_state: Number of Bam
+ * Interrupts handled by FW
+ */
+struct NTNRxInfoData_t {
+ u32 max_outstanding_pkts;
+ u32 num_pkts_processed;
+ u32 rx_ring_rp_value;
+ struct IpaHwRingStats_t rx_ind_ring_stats;
+ struct IpaHwBamStats_t bam_stats;
+ u32 num_bam_int_handled;
+ u32 num_db;
+ u32 num_unexpected_db;
+ u32 num_pkts_in_dis_uninit_state;
+ u32 num_bam_int_handled_while_not_in_bam;
+ u32 num_bam_int_handled_while_in_bam_state;
+} __packed;
+
+
+/**
+ * struct NTNTxInfoData_t - Structure holding the NTN Tx channel
+ * Ensure that this is always word aligned
+ *
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@tail_ptr_val: Latest value of doorbell written to copy engine
+ *@num_db_fired: Number of DB from uC FW to Copy engine
+ *
+ *@tx_comp_ring_stats:
+ *@bam_stats:
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_bam_int_in_non_running_state: Number of Bam interrupts
+ * while not in Running state
+ *@num_qmb_int_handled: Number of QMB interrupts handled
+ *@num_bam_int_handled_while_wait_for_bam: Number of times the
+ * Imm Cmd is injected due to fw_desc change
+ */
+struct NTNTxInfoData_t {
+ u32 num_pkts_processed;
+ u32 tail_ptr_val;
+ u32 num_db_fired;
+ struct IpaHwRingStats_t tx_comp_ring_stats;
+ struct IpaHwBamStats_t bam_stats;
+ u32 num_db;
+ u32 num_unexpected_db;
+ u32 num_bam_int_handled;
+ u32 num_bam_int_in_non_running_state;
+ u32 num_qmb_int_handled;
+ u32 num_bam_int_handled_while_wait_for_bam;
+ u32 num_bam_int_handled_while_not_in_bam;
+} __packed;
+
+
+/**
+ * struct IpaHwStatsNTNInfoData_t - Structure holding the NTN Tx
+ * channel Ensure that this is always word aligned
+ *
+ */
+struct IpaHwStatsNTNInfoData_t {
+ struct NTNRxInfoData_t rx_ch_stats[IPA_UC_MAX_NTN_RX_CHANNELS];
+ struct NTNTxInfoData_t tx_ch_stats[IPA_UC_MAX_NTN_TX_CHANNELS];
+} __packed;
+
+
+/*
+ * uC offload related data structures
+ */
+#define IPA_UC_OFFLOAD_CONNECTED BIT(0)
+#define IPA_UC_OFFLOAD_ENABLED BIT(1)
+#define IPA_UC_OFFLOAD_RESUMED BIT(2)
+
+/**
+ * enum ipa_cpu_2_hw_offload_commands - Values that represent
+ * the offload commands from CPU
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP : Command to set up
+ * Offload protocol's Tx/Rx Path
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_RX_SET_UP : Command to tear down
+ * Offload protocol's Tx/ Rx Path
+ */
+enum ipa_cpu_2_hw_offload_commands {
+ IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+};
+
+
+/**
+ * enum ipa_hw_offload_channel_states - Values that represent
+ * offload channel state machine.
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED : Channel is initialized
+ * but disabled
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING : Channel is running. Entered after
+ * SET_UP_COMMAND is processed successfully
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID : Invalid state. Shall not be in use
+ * in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in
+ */
+enum ipa_hw_offload_channel_states {
+ IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED = 1,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING = 2,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR = 3,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID = 0xFF
+};
+
+
+/**
+ * enum ipa_hw_2_cpu_cmd_resp_status - Values that represent
+ * offload related command response status to be sent to CPU.
+ */
+enum ipa_hw_2_cpu_offload_cmd_resp_status {
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 0),
+ IPA_HW_2_CPU_OFFLOAD_MAX_TX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4),
+ IPA_HW_2_CPU_OFFLOAD_UNKNOWN_TX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5),
+ IPA_HW_2_CPU_OFFLOAD_TX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6),
+ IPA_HW_2_CPU_OFFLOAD_TX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 7),
+ IPA_HW_2_CPU_OFFLOAD_MAX_RX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 8),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 9),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 10),
+ IPA_HW_2_CPU_OFFLOAD_UNKNOWN_RX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 11),
+ IPA_HW_2_CPU_OFFLOAD_RX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 12),
+ IPA_HW_2_CPU_OFFLOAD_RX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 13),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 14),
+};
+
+/**
+ * struct IpaHwSetUpCmd -
+ *
+ *
+ */
+union IpaHwSetUpCmd {
+ struct IpaHwNtnSetUpCmdData_t NtnSetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwOffloadSetUpCmdData_t -
+ *
+ *
+ */
+struct IpaHwOffloadSetUpCmdData_t {
+ u8 protocol;
+ union IpaHwSetUpCmd SetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwCommonChCmd - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN
+ *
+ *
+ */
+union IpaHwCommonChCmd {
+ union IpaHwNtnCommonChCmdData_t NtnCommonCh_params;
+} __packed;
+
+struct IpaHwOffloadCommonChCmdData_t {
+ u8 protocol;
+ union IpaHwCommonChCmd CommonCh_params;
+} __packed;
+
+#endif /* _IPA_UC_OFFLOAD_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
new file mode 100644
index 0000000..abeb359
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -0,0 +1,1613 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "ipa_i.h"
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+
+#define IPA_HOLB_TMR_DIS 0x0
+
+#define IPA_HW_INTERFACE_WDI_VERSION 0x0001
+#define IPA_HW_WDI_RX_MBOX_START_INDEX 48
+#define IPA_HW_WDI_TX_MBOX_START_INDEX 50
+#define IPA_WDI_RING_ALIGNMENT 8
+
+#define IPA_WDI_CONNECTED BIT(0)
+#define IPA_WDI_ENABLED BIT(1)
+#define IPA_WDI_RESUMED BIT(2)
+#define IPA_UC_POLL_SLEEP_USEC 100
+
+#define IPA_WDI_RX_RING_RES 0
+#define IPA_WDI_RX_RING_RP_RES 1
+#define IPA_WDI_TX_RING_RES 2
+#define IPA_WDI_CE_RING_RES 3
+#define IPA_WDI_CE_DB_RES 4
+#define IPA_WDI_MAX_RES 5
+
+struct ipa_wdi_res {
+ struct ipa_wdi_buffer_info *res;
+ unsigned int nents;
+ bool valid;
+};
+
+static struct ipa_wdi_res wdi_res[IPA_WDI_MAX_RES];
+
+static void ipa_uc_wdi_loaded_handler(void);
+
+/**
+ * enum ipa_hw_2_cpu_wdi_events - Values that represent HW event to be sent to
+ * CPU.
+ * @IPA_HW_2_CPU_EVENT_WDI_ERROR : Event to specify that HW detected an error
+ * in WDI
+ */
+enum ipa_hw_2_cpu_wdi_events {
+ IPA_HW_2_CPU_EVENT_WDI_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+};
+
+/**
+ * enum ipa_hw_wdi_channel_states - Values that represent WDI channel state
+ * machine.
+ * @IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED : Channel is initialized but
+ * disabled
+ * @IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND : Channel is enabled but in
+ * suspended state
+ * @IPA_HW_WDI_CHANNEL_STATE_RUNNING : Channel is running. Entered after
+ * SET_UP_COMMAND is processed successfully
+ * @IPA_HW_WDI_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_WDI_CHANNEL_STATE_INVALID : Invalid state. Shall not be in use in
+ * operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not reflect the
+ * sub-state the state machine may be in.
+ */
+enum ipa_hw_wdi_channel_states {
+ IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED = 1,
+ IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND = 2,
+ IPA_HW_WDI_CHANNEL_STATE_RUNNING = 3,
+ IPA_HW_WDI_CHANNEL_STATE_ERROR = 4,
+ IPA_HW_WDI_CHANNEL_STATE_INVALID = 0xFF
+};
+
+/**
+ * enum ipa_cpu_2_hw_commands - Values that represent the WDI commands from CPU
+ * @IPA_CPU_2_HW_CMD_WDI_TX_SET_UP : Command to set up WDI Tx Path
+ * @IPA_CPU_2_HW_CMD_WDI_RX_SET_UP : Command to set up WDI Rx Path
+ * @IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG : Provide extended config info for Rx path
+ * @IPA_CPU_2_HW_CMD_WDI_CH_ENABLE : Command to enable a channel
+ * @IPA_CPU_2_HW_CMD_WDI_CH_DISABLE : Command to disable a channel
+ * @IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND : Command to suspend a channel
+ * @IPA_CPU_2_HW_CMD_WDI_CH_RESUME : Command to resume a channel
+ * @IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN : Command to tear down WDI Tx/ Rx Path
+ */
+enum ipa_cpu_2_hw_wdi_commands {
+ IPA_CPU_2_HW_CMD_WDI_TX_SET_UP =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+ IPA_CPU_2_HW_CMD_WDI_RX_SET_UP =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
+ IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
+ IPA_CPU_2_HW_CMD_WDI_CH_ENABLE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
+ IPA_CPU_2_HW_CMD_WDI_CH_DISABLE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
+ IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
+ IPA_CPU_2_HW_CMD_WDI_CH_RESUME =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
+ IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
+};
+
+/**
+ * enum ipa_hw_2_cpu_cmd_resp_status - Values that represent WDI related
+ * command response status to be sent to CPU.
+ */
+enum ipa_hw_2_cpu_cmd_resp_status {
+ IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+ IPA_HW_2_CPU_MAX_WDI_TX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
+ IPA_HW_2_CPU_WDI_CE_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
+ IPA_HW_2_CPU_WDI_CE_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
+ IPA_HW_2_CPU_WDI_CE_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
+ IPA_HW_2_CPU_WDI_COMP_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
+ IPA_HW_2_CPU_WDI_COMP_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
+ IPA_HW_2_CPU_WDI_COMP_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
+ IPA_HW_2_CPU_WDI_UNKNOWN_TX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 8),
+ IPA_HW_2_CPU_WDI_TX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 9),
+ IPA_HW_2_CPU_WDI_TX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 10),
+ IPA_HW_2_CPU_MAX_WDI_RX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 11),
+ IPA_HW_2_CPU_WDI_RX_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 12),
+ IPA_HW_2_CPU_WDI_RX_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 13),
+ IPA_HW_2_CPU_WDI_UNKNOWN_RX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 14),
+ IPA_HW_2_CPU_WDI_RX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 15),
+ IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 16),
+};
+
+/**
+ * enum ipa_hw_wdi_errors - WDI specific error types.
+ * @IPA_HW_WDI_ERROR_NONE : No error persists
+ * @IPA_HW_WDI_CHANNEL_ERROR : Error is specific to channel
+ */
+enum ipa_hw_wdi_errors {
+ IPA_HW_WDI_ERROR_NONE = 0,
+ IPA_HW_WDI_CHANNEL_ERROR = 1
+};
+
+/**
+ * enum ipa_hw_wdi_ch_errors = List of WDI Channel error types. This is present
+ * in the event param.
+ * @IPA_HW_WDI_CH_ERR_NONE : No error persists
+ * @IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL : Write pointer update failed in Tx
+ * Completion ring
+ * @IPA_HW_WDI_TX_FSM_ERROR : Error in the state machine transition
+ * @IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL : Error while calculating num RE to bring
+ * @IPA_HW_WDI_CH_ERR_RESERVED : Reserved - Not available for CPU to use
+*/
+enum ipa_hw_wdi_ch_errors {
+ IPA_HW_WDI_CH_ERR_NONE = 0,
+ IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL = 1,
+ IPA_HW_WDI_TX_FSM_ERROR = 2,
+ IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL = 3,
+ IPA_HW_WDI_CH_ERR_RESERVED = 0xFF
+};
+
+/**
+ * struct IpaHwSharedMemWdiMapping_t - Structure referring to the common and
+ * WDI section of 128B shared memory located in offset zero of SW Partition in
+ * IPA SRAM.
+ *
+ * The shared memory is used for communication between IPA HW and CPU.
+ */
+struct IpaHwSharedMemWdiMapping_t {
+ struct IpaHwSharedMemCommonMapping_t common;
+ u32 reserved_2B_28;
+ u32 reserved_2F_2C;
+ u32 reserved_33_30;
+ u32 reserved_37_34;
+ u32 reserved_3B_38;
+ u32 reserved_3F_3C;
+ u16 interfaceVersionWdi;
+ u16 reserved_43_42;
+ u8 wdi_tx_ch_0_state;
+ u8 wdi_rx_ch_0_state;
+ u16 reserved_47_46;
+} __packed;
+
+/**
+ * struct IpaHwWdiTxSetUpCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_TX_SET_UP command.
+ * @comp_ring_base_pa : This is the physical address of the base of the Tx
+ * completion ring
+ * @comp_ring_size : This is the size of the Tx completion ring
+ * @reserved_comp_ring : Reserved field for expansion of Completion ring params
+ * @ce_ring_base_pa : This is the physical address of the base of the Copy
+ * Engine Source Ring
+ * @ce_ring_size : Copy Engine Ring size
+ * @reserved_ce_ring : Reserved field for expansion of CE ring params
+ * @ce_ring_doorbell_pa : This is the physical address of the doorbell that the
+ * IPA uC has to write into to trigger the copy engine
+ * @num_tx_buffers : Number of pkt buffers allocated. The size of the CE ring
+ * and the Tx completion ring has to be atleast ( num_tx_buffers + 1)
+ * @ipa_pipe_number : This is the IPA pipe number that has to be used for the
+ * Tx path
+ * @reserved : Reserved field
+ *
+ * Parameters are sent as pointer thus should be reside in address accessible
+ * to HW
+ */
+struct IpaHwWdiTxSetUpCmdData_t {
+ u32 comp_ring_base_pa;
+ u16 comp_ring_size;
+ u16 reserved_comp_ring;
+ u32 ce_ring_base_pa;
+ u16 ce_ring_size;
+ u16 reserved_ce_ring;
+ u32 ce_ring_doorbell_pa;
+ u16 num_tx_buffers;
+ u8 ipa_pipe_number;
+ u8 reserved;
+} __packed;
+
+/**
+ * struct IpaHwWdiRxSetUpCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_RX_SET_UP command.
+ * @rx_ring_base_pa : This is the physical address of the base of the Rx ring
+ * (containing Rx buffers)
+ * @rx_ring_size : This is the size of the Rx ring
+ * @rx_ring_rp_pa : This is the physical address of the location through which
+ * IPA uc is expected to communicate about the Read pointer into the Rx Ring
+ * @ipa_pipe_number : This is the IPA pipe number that has to be used for the
+ * Rx path
+ *
+ * Parameters are sent as pointer thus should be reside in address accessible
+ * to HW
+*/
+struct IpaHwWdiRxSetUpCmdData_t {
+ u32 rx_ring_base_pa;
+ u32 rx_ring_size;
+ u32 rx_ring_rp_pa;
+ u8 ipa_pipe_number;
+} __packed;
+
+/**
+ * union IpaHwWdiRxExtCfgCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG command.
+ * @ipa_pipe_number : The IPA pipe number for which this config is passed
+ * @qmap_id : QMAP ID to be set in the metadata register
+ * @reserved : Reserved
+ *
+ * The parameters are passed as immediate params in the shared memory
+*/
+union IpaHwWdiRxExtCfgCmdData_t {
+ struct IpaHwWdiRxExtCfgCmdParams_t {
+ u32 ipa_pipe_number:8;
+ u32 qmap_id:8;
+ u32 reserved:16;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwWdiCommonChCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN,
+ * IPA_CPU_2_HW_CMD_WDI_CH_ENABLE,
+ * IPA_CPU_2_HW_CMD_WDI_CH_DISABLE,
+ * IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND,
+ * IPA_CPU_2_HW_CMD_WDI_CH_RESUME command.
+ * @ipa_pipe_number : The IPA pipe number. This could be Tx or an Rx pipe
+ * @reserved : Reserved
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+union IpaHwWdiCommonChCmdData_t {
+ struct IpaHwWdiCommonChCmdParams_t {
+ u32 ipa_pipe_number:8;
+ u32 reserved:24;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwWdiErrorEventData_t - parameters for IPA_HW_2_CPU_EVENT_WDI_ERROR
+ * event.
+ * @wdi_error_type : The IPA pipe number to be torn down. This could be Tx or
+ * an Rx pipe
+ * @reserved : Reserved
+ * @ipa_pipe_number : IPA pipe number on which error has happened. Applicable
+ * only if error type indicates channel error
+ * @wdi_ch_err_type : Information about the channel error (if available)
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+union IpaHwWdiErrorEventData_t {
+ struct IpaHwWdiErrorEventParams_t {
+ u32 wdi_error_type:8;
+ u32 reserved:8;
+ u32 ipa_pipe_number:8;
+ u32 wdi_ch_err_type:8;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+static void ipa_uc_wdi_event_log_info_handler(
+struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+
+{
+ if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_WDI)) == 0) {
+ IPAERR("WDI feature missing 0x%x\n",
+ uc_event_top_mmio->featureMask);
+ return;
+ }
+
+ if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_WDI].
+ params.size != sizeof(struct IpaHwStatsWDIInfoData_t)) {
+ IPAERR("wdi stats sz invalid exp=%zu is=%u\n",
+ sizeof(struct IpaHwStatsWDIInfoData_t),
+ uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_WDI].params.size);
+ return;
+ }
+
+ ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst = uc_event_top_mmio->
+ statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_WDI].params.offset;
+ IPAERR("WDI stats ofst=0x%x\n", ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst);
+ if (ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst +
+ sizeof(struct IpaHwStatsWDIInfoData_t) >=
+ ipa_ctx->ctrl->ipa_reg_base_ofst +
+ IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) +
+ ipa_ctx->smem_sz) {
+ IPAERR("uc_wdi_stats 0x%x outside SRAM\n",
+ ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst);
+ return;
+ }
+
+ ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio =
+ ioremap(ipa_ctx->ipa_wrapper_base +
+ ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst,
+ sizeof(struct IpaHwStatsWDIInfoData_t));
+ if (!ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) {
+ IPAERR("fail to ioremap uc wdi stats\n");
+ return;
+ }
+}
+
+static void ipa_uc_wdi_event_handler(struct IpaHwSharedMemCommonMapping_t
+ *uc_sram_mmio)
+
+{
+ union IpaHwWdiErrorEventData_t wdi_evt;
+ struct IpaHwSharedMemWdiMapping_t *wdi_sram_mmio_ext;
+
+ if (uc_sram_mmio->eventOp ==
+ IPA_HW_2_CPU_EVENT_WDI_ERROR) {
+ wdi_evt.raw32b = uc_sram_mmio->eventParams;
+ IPADBG("uC WDI evt errType=%u pipe=%d cherrType=%u\n",
+ wdi_evt.params.wdi_error_type,
+ wdi_evt.params.ipa_pipe_number,
+ wdi_evt.params.wdi_ch_err_type);
+ wdi_sram_mmio_ext =
+ (struct IpaHwSharedMemWdiMapping_t *)
+ uc_sram_mmio;
+ IPADBG("tx_ch_state=%u rx_ch_state=%u\n",
+ wdi_sram_mmio_ext->wdi_tx_ch_0_state,
+ wdi_sram_mmio_ext->wdi_rx_ch_0_state);
+ }
+}
+
+/**
+ * ipa2_get_wdi_stats() - Query WDI statistics from uc
+ * @stats: [inout] stats blob from client populated by driver
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa2_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
+{
+#define TX_STATS(y) stats->tx_ch_stats.y = \
+ ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->tx_ch_stats.y
+#define RX_STATS(y) stats->rx_ch_stats.y = \
+ ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->rx_ch_stats.y
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (!stats || !ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) {
+ IPAERR("bad parms stats=%p wdi_stats=%p\n",
+ stats,
+ ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio);
+ return -EINVAL;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ TX_STATS(num_pkts_processed);
+ TX_STATS(copy_engine_doorbell_value);
+ TX_STATS(num_db_fired);
+ TX_STATS(tx_comp_ring_stats.ringFull);
+ TX_STATS(tx_comp_ring_stats.ringEmpty);
+ TX_STATS(tx_comp_ring_stats.ringUsageHigh);
+ TX_STATS(tx_comp_ring_stats.ringUsageLow);
+ TX_STATS(tx_comp_ring_stats.RingUtilCount);
+ TX_STATS(bam_stats.bamFifoFull);
+ TX_STATS(bam_stats.bamFifoEmpty);
+ TX_STATS(bam_stats.bamFifoUsageHigh);
+ TX_STATS(bam_stats.bamFifoUsageLow);
+ TX_STATS(bam_stats.bamUtilCount);
+ TX_STATS(num_db);
+ TX_STATS(num_unexpected_db);
+ TX_STATS(num_bam_int_handled);
+ TX_STATS(num_bam_int_in_non_running_state);
+ TX_STATS(num_qmb_int_handled);
+ TX_STATS(num_bam_int_handled_while_wait_for_bam);
+
+ RX_STATS(max_outstanding_pkts);
+ RX_STATS(num_pkts_processed);
+ RX_STATS(rx_ring_rp_value);
+ RX_STATS(rx_ind_ring_stats.ringFull);
+ RX_STATS(rx_ind_ring_stats.ringEmpty);
+ RX_STATS(rx_ind_ring_stats.ringUsageHigh);
+ RX_STATS(rx_ind_ring_stats.ringUsageLow);
+ RX_STATS(rx_ind_ring_stats.RingUtilCount);
+ RX_STATS(bam_stats.bamFifoFull);
+ RX_STATS(bam_stats.bamFifoEmpty);
+ RX_STATS(bam_stats.bamFifoUsageHigh);
+ RX_STATS(bam_stats.bamFifoUsageLow);
+ RX_STATS(bam_stats.bamUtilCount);
+ RX_STATS(num_bam_int_handled);
+ RX_STATS(num_db);
+ RX_STATS(num_unexpected_db);
+ RX_STATS(num_pkts_in_dis_uninit_state);
+ RX_STATS(reserved1);
+ RX_STATS(reserved2);
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+int ipa_wdi_init(void)
+{
+ struct ipa_uc_hdlrs uc_wdi_cbs = { 0 };
+
+ uc_wdi_cbs.ipa_uc_event_hdlr = ipa_uc_wdi_event_handler;
+ uc_wdi_cbs.ipa_uc_event_log_info_hdlr =
+ ipa_uc_wdi_event_log_info_handler;
+ uc_wdi_cbs.ipa_uc_loaded_hdlr =
+ ipa_uc_wdi_loaded_handler;
+
+ ipa_uc_register_handlers(IPA_HW_FEATURE_WDI, &uc_wdi_cbs);
+
+ return 0;
+}
+
+static int ipa_create_uc_smmu_mapping_pa(phys_addr_t pa, size_t len,
+ bool device, unsigned long *iova)
+{
+ struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx();
+ unsigned long va = roundup(cb->next_addr, PAGE_SIZE);
+ int prot = IOMMU_READ | IOMMU_WRITE;
+ size_t true_len = roundup(len + pa - rounddown(pa, PAGE_SIZE),
+ PAGE_SIZE);
+ int ret;
+
+ if (!cb->valid) {
+ IPAERR("No SMMU CB setup\n");
+ return -EINVAL;
+ }
+
+ ret = ipa_iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
+ true_len,
+ device ? (prot | IOMMU_DEVICE) : prot);
+ if (ret) {
+ IPAERR("iommu map failed for pa=%pa len=%zu\n", &pa, true_len);
+ return -EINVAL;
+ }
+
+ ipa_ctx->wdi_map_cnt++;
+ cb->next_addr = va + true_len;
+ *iova = va + pa - rounddown(pa, PAGE_SIZE);
+ return 0;
+}
+
+static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt,
+ unsigned long *iova)
+{
+ struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx();
+ unsigned long va = roundup(cb->next_addr, PAGE_SIZE);
+ int prot = IOMMU_READ | IOMMU_WRITE;
+ int ret;
+ int i;
+ struct scatterlist *sg;
+ unsigned long start_iova = va;
+ phys_addr_t phys;
+ size_t len;
+ int count = 0;
+
+ if (!cb->valid) {
+ IPAERR("No SMMU CB setup\n");
+ return -EINVAL;
+ }
+ if (!sgt) {
+ IPAERR("Bad parameters, scatter / gather list is NULL\n");
+ return -EINVAL;
+ }
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ phys = page_to_phys(sg_page(sg));
+ len = PAGE_ALIGN(sg->offset + sg->length);
+
+ ret = ipa_iommu_map(cb->mapping->domain, va, phys, len, prot);
+ if (ret) {
+ IPAERR("iommu map failed for pa=%pa len=%zu\n",
+ &phys, len);
+ goto bad_mapping;
+ }
+ va += len;
+ ipa_ctx->wdi_map_cnt++;
+ count++;
+ }
+ cb->next_addr = va;
+ *iova = start_iova;
+
+ return 0;
+
+bad_mapping:
+ for_each_sg(sgt->sgl, sg, count, i)
+ iommu_unmap(cb->mapping->domain, sg_dma_address(sg),
+ sg_dma_len(sg));
+ return -EINVAL;
+}
+
+static void ipa_release_uc_smmu_mappings(enum ipa_client_type client)
+{
+ struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx();
+ int i;
+ int j;
+ int start;
+ int end;
+
+ if (IPA_CLIENT_IS_CONS(client)) {
+ start = IPA_WDI_TX_RING_RES;
+ end = IPA_WDI_CE_DB_RES;
+ } else {
+ start = IPA_WDI_RX_RING_RES;
+ end = IPA_WDI_RX_RING_RP_RES;
+ }
+
+ for (i = start; i <= end; i++) {
+ if (wdi_res[i].valid) {
+ for (j = 0; j < wdi_res[i].nents; j++) {
+ iommu_unmap(cb->mapping->domain,
+ wdi_res[i].res[j].iova,
+ wdi_res[i].res[j].size);
+ ipa_ctx->wdi_map_cnt--;
+ }
+ kfree(wdi_res[i].res);
+ wdi_res[i].valid = false;
+ }
+ }
+
+ if (ipa_ctx->wdi_map_cnt == 0)
+ cb->next_addr = cb->va_end;
+
+}
+
+static void ipa_save_uc_smmu_mapping_pa(int res_idx, phys_addr_t pa,
+ unsigned long iova, size_t len)
+{
+ IPADBG("--res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx,
+ &pa, iova, len);
+ wdi_res[res_idx].res = kzalloc(sizeof(struct ipa_wdi_res), GFP_KERNEL);
+ if (!wdi_res[res_idx].res)
+ BUG();
+ wdi_res[res_idx].nents = 1;
+ wdi_res[res_idx].valid = true;
+ wdi_res[res_idx].res->pa = rounddown(pa, PAGE_SIZE);
+ wdi_res[res_idx].res->iova = rounddown(iova, PAGE_SIZE);
+ wdi_res[res_idx].res->size = roundup(len + pa - rounddown(pa,
+ PAGE_SIZE), PAGE_SIZE);
+ IPADBG("res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx,
+ &wdi_res[res_idx].res->pa, wdi_res[res_idx].res->iova,
+ wdi_res[res_idx].res->size);
+}
+
+static void ipa_save_uc_smmu_mapping_sgt(int res_idx, struct sg_table *sgt,
+ unsigned long iova)
+{
+ int i;
+ struct scatterlist *sg;
+ unsigned long curr_iova = iova;
+
+ if (!sgt) {
+ IPAERR("Bad parameters, scatter / gather list is NULL\n");
+ return;
+ }
+
+ wdi_res[res_idx].res = kcalloc(sgt->nents, sizeof(struct ipa_wdi_res),
+ GFP_KERNEL);
+ if (!wdi_res[res_idx].res)
+ BUG();
+ wdi_res[res_idx].nents = sgt->nents;
+ wdi_res[res_idx].valid = true;
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ wdi_res[res_idx].res[i].pa = page_to_phys(sg_page(sg));
+ wdi_res[res_idx].res[i].iova = curr_iova;
+ wdi_res[res_idx].res[i].size = PAGE_ALIGN(sg->offset +
+ sg->length);
+ IPADBG("res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx,
+ &wdi_res[res_idx].res[i].pa,
+ wdi_res[res_idx].res[i].iova,
+ wdi_res[res_idx].res[i].size);
+ curr_iova += wdi_res[res_idx].res[i].size;
+ }
+}
+
+static int ipa_create_uc_smmu_mapping(int res_idx, bool wlan_smmu_en,
+ phys_addr_t pa, struct sg_table *sgt, size_t len, bool device,
+ unsigned long *iova)
+{
+ /* support for SMMU on WLAN but no SMMU on IPA */
+ if (wlan_smmu_en && ipa_ctx->smmu_s1_bypass) {
+ IPAERR("Unsupported SMMU pairing\n");
+ return -EINVAL;
+ }
+
+ /* legacy: no SMMUs on either end */
+ if (!wlan_smmu_en && ipa_ctx->smmu_s1_bypass) {
+ *iova = pa;
+ return 0;
+ }
+
+ /* no SMMU on WLAN but SMMU on IPA */
+ if (!wlan_smmu_en && !ipa_ctx->smmu_s1_bypass) {
+ if (ipa_create_uc_smmu_mapping_pa(pa, len,
+ (res_idx == IPA_WDI_CE_DB_RES) ? true : false, iova)) {
+ IPAERR("Fail to create mapping res %d\n", res_idx);
+ return -EFAULT;
+ }
+ ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len);
+ return 0;
+ }
+
+ /* SMMU on WLAN and SMMU on IPA */
+ if (wlan_smmu_en && !ipa_ctx->smmu_s1_bypass) {
+ switch (res_idx) {
+ case IPA_WDI_RX_RING_RP_RES:
+ case IPA_WDI_CE_DB_RES:
+ if (ipa_create_uc_smmu_mapping_pa(pa, len,
+ (res_idx == IPA_WDI_CE_DB_RES) ? true : false,
+ iova)) {
+ IPAERR("Fail to create mapping res %d\n",
+ res_idx);
+ return -EFAULT;
+ }
+ ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len);
+ break;
+ case IPA_WDI_RX_RING_RES:
+ case IPA_WDI_TX_RING_RES:
+ case IPA_WDI_CE_RING_RES:
+ if (ipa_create_uc_smmu_mapping_sgt(sgt, iova)) {
+ IPAERR("Fail to create mapping res %d\n",
+ res_idx);
+ return -EFAULT;
+ }
+ ipa_save_uc_smmu_mapping_sgt(res_idx, sgt, *iova);
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ipa2_connect_wdi_pipe() - WDI client connect
+ * @in: [in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+ struct ipa_wdi_out_params *out)
+{
+ int ipa_ep_idx;
+ int result = -EFAULT;
+ struct ipa_ep_context *ep;
+ struct ipa_mem_buffer cmd;
+ struct IpaHwWdiTxSetUpCmdData_t *tx;
+ struct IpaHwWdiRxSetUpCmdData_t *rx;
+ struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+ unsigned long va;
+ phys_addr_t pa;
+ u32 len;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (in == NULL || out == NULL || in->sys.client >= IPA_CLIENT_MAX) {
+ IPAERR("bad parm. in=%p out=%p\n", in, out);
+ if (in)
+ IPAERR("client = %d\n", in->sys.client);
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+ if (in->u.dl.comp_ring_base_pa % IPA_WDI_RING_ALIGNMENT ||
+ in->u.dl.ce_ring_base_pa % IPA_WDI_RING_ALIGNMENT) {
+ IPAERR("alignment failure on TX\n");
+ return -EINVAL;
+ }
+ } else {
+ if (in->u.ul.rdy_ring_base_pa % IPA_WDI_RING_ALIGNMENT) {
+ IPAERR("alignment failure on RX\n");
+ return -EINVAL;
+ }
+ }
+
+ result = ipa2_uc_state_check();
+ if (result)
+ return result;
+
+ ipa_ep_idx = ipa2_get_ep_mapping(in->sys.client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("fail to alloc EP.\n");
+ goto fail;
+ }
+
+ ep = &ipa_ctx->ep[ipa_ep_idx];
+
+ if (ep->valid) {
+ IPAERR("EP already allocated.\n");
+ goto fail;
+ }
+
+ memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+ IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
+
+ IPADBG("client=%d ep=%d\n", in->sys.client, ipa_ep_idx);
+ if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+ cmd.size = sizeof(*tx);
+ IPADBG("comp_ring_base_pa=0x%pa\n",
+ &in->u.dl.comp_ring_base_pa);
+ IPADBG("comp_ring_size=%d\n", in->u.dl.comp_ring_size);
+ IPADBG("ce_ring_base_pa=0x%pa\n", &in->u.dl.ce_ring_base_pa);
+ IPADBG("ce_ring_size=%d\n", in->u.dl.ce_ring_size);
+ IPADBG("ce_ring_doorbell_pa=0x%pa\n",
+ &in->u.dl.ce_door_bell_pa);
+ IPADBG("num_tx_buffers=%d\n", in->u.dl.num_tx_buffers);
+ } else {
+ cmd.size = sizeof(*rx);
+ IPADBG("rx_ring_base_pa=0x%pa\n", &in->u.ul.rdy_ring_base_pa);
+ IPADBG("rx_ring_size=%d\n", in->u.ul.rdy_ring_size);
+ IPADBG("rx_ring_rp_pa=0x%pa\n", &in->u.ul.rdy_ring_rp_pa);
+ }
+
+ cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size,
+ &cmd.phys_base, GFP_KERNEL);
+ if (cmd.base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ result = -ENOMEM;
+ goto dma_alloc_fail;
+ }
+
+ if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+ tx = (struct IpaHwWdiTxSetUpCmdData_t *)cmd.base;
+
+ len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size :
+ in->u.dl.comp_ring_size;
+ IPADBG("TX ring smmu_en=%d ring_size=%d %d\n", in->smmu_enabled,
+ in->u.dl_smmu.comp_ring_size,
+ in->u.dl.comp_ring_size);
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES,
+ in->smmu_enabled,
+ in->u.dl.comp_ring_base_pa,
+ &in->u.dl_smmu.comp_ring,
+ len,
+ false,
+ &va)) {
+ IPAERR("fail to create uc mapping TX ring.\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ tx->comp_ring_base_pa = va;
+ tx->comp_ring_size = len;
+
+ len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size :
+ in->u.dl.ce_ring_size;
+ IPADBG("TX CE ring smmu_en=%d ring_size=%d %d\n",
+ in->smmu_enabled,
+ in->u.dl_smmu.ce_ring_size,
+ in->u.dl.ce_ring_size);
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES,
+ in->smmu_enabled,
+ in->u.dl.ce_ring_base_pa,
+ &in->u.dl_smmu.ce_ring,
+ len,
+ false,
+ &va)) {
+ IPAERR("fail to create uc mapping CE ring.\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ tx->ce_ring_base_pa = va;
+ tx->ce_ring_size = len;
+
+ pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa :
+ in->u.dl.ce_door_bell_pa;
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES,
+ in->smmu_enabled,
+ pa,
+ NULL,
+ 4,
+ true,
+ &va)) {
+ IPAERR("fail to create uc mapping CE DB.\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ tx->ce_ring_doorbell_pa = va;
+
+ tx->num_tx_buffers = in->u.dl.num_tx_buffers;
+ tx->ipa_pipe_number = ipa_ep_idx;
+ if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) {
+ out->uc_door_bell_pa =
+ ipa_ctx->ipa_wrapper_base +
+ IPA_REG_BASE_OFST_v2_5 +
+ IPA_UC_MAILBOX_m_n_OFFS_v2_5(
+ IPA_HW_WDI_TX_MBOX_START_INDEX/32,
+ IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
+ } else {
+ out->uc_door_bell_pa =
+ ipa_ctx->ipa_wrapper_base +
+ IPA_REG_BASE_OFST_v2_0 +
+ IPA_UC_MAILBOX_m_n_OFFS(
+ IPA_HW_WDI_TX_MBOX_START_INDEX/32,
+ IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
+ }
+ } else {
+ rx = (struct IpaHwWdiRxSetUpCmdData_t *)cmd.base;
+
+ len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size :
+ in->u.ul.rdy_ring_size;
+ IPADBG("RX ring smmu_en=%d ring_size=%d %d\n", in->smmu_enabled,
+ in->u.ul_smmu.rdy_ring_size,
+ in->u.ul.rdy_ring_size);
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES,
+ in->smmu_enabled,
+ in->u.ul.rdy_ring_base_pa,
+ &in->u.ul_smmu.rdy_ring,
+ len,
+ false,
+ &va)) {
+ IPAERR("fail to create uc mapping RX ring.\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ rx->rx_ring_base_pa = va;
+ rx->rx_ring_size = len;
+
+ pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa :
+ in->u.ul.rdy_ring_rp_pa;
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
+ in->smmu_enabled,
+ pa,
+ NULL,
+ 4,
+ false,
+ &va)) {
+ IPAERR("fail to create uc mapping RX rng RP\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ rx->rx_ring_rp_pa = va;
+
+ rx->ipa_pipe_number = ipa_ep_idx;
+ if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) {
+ out->uc_door_bell_pa =
+ ipa_ctx->ipa_wrapper_base +
+ IPA_REG_BASE_OFST_v2_5 +
+ IPA_UC_MAILBOX_m_n_OFFS_v2_5(
+ IPA_HW_WDI_RX_MBOX_START_INDEX/32,
+ IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
+ } else {
+ out->uc_door_bell_pa =
+ ipa_ctx->ipa_wrapper_base +
+ IPA_REG_BASE_OFST_v2_0 +
+ IPA_UC_MAILBOX_m_n_OFFS(
+ IPA_HW_WDI_RX_MBOX_START_INDEX/32,
+ IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
+ }
+ }
+
+ ep->valid = 1;
+ ep->client = in->sys.client;
+ ep->keep_ipa_awake = in->sys.keep_ipa_awake;
+ result = ipa_disable_data_path(ipa_ep_idx);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx);
+ goto uc_timeout;
+ }
+ if (IPA_CLIENT_IS_PROD(in->sys.client)) {
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_delay = true;
+ ipa2_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
+ }
+
+ result = ipa_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CLIENT_IS_CONS(in->sys.client) ?
+ IPA_CPU_2_HW_CMD_WDI_TX_SET_UP :
+ IPA_CPU_2_HW_CMD_WDI_RX_SET_UP,
+ IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+
+ if (result) {
+ result = -EFAULT;
+ goto uc_timeout;
+ }
+
+ ep->skip_ep_cfg = in->sys.skip_ep_cfg;
+ ep->client_notify = in->sys.notify;
+ ep->priv = in->sys.priv;
+
+ if (!ep->skip_ep_cfg) {
+ if (ipa2_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) {
+ IPAERR("fail to configure EP.\n");
+ goto ipa_cfg_ep_fail;
+ }
+ IPADBG("ep configuration successful\n");
+ } else {
+ IPADBG("Skipping endpoint configuration.\n");
+ }
+
+ out->clnt_hdl = ipa_ep_idx;
+
+ if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client))
+ ipa_install_dflt_flt_rules(ipa_ep_idx);
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+
+ dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+ ep->uc_offload_state |= IPA_WDI_CONNECTED;
+ IPADBG("client %d (ep: %d) connected\n", in->sys.client, ipa_ep_idx);
+
+ return 0;
+
+ipa_cfg_ep_fail:
+ memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+uc_timeout:
+ ipa_release_uc_smmu_mappings(in->sys.client);
+ dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+dma_alloc_fail:
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+fail:
+ return result;
+}
+
+
+/**
+ * ipa2_disconnect_wdi_pipe() - WDI client disconnect
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_disconnect_wdi_pipe(u32 clnt_hdl)
+{
+ int result = 0;
+ struct ipa_ep_context *ep;
+ union IpaHwWdiCommonChCmdData_t tear;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm, %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ result = ipa2_uc_state_check();
+ if (result)
+ return result;
+
+ IPADBG("ep=%d\n", clnt_hdl);
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+
+ if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+ return -EFAULT;
+ }
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ tear.params.ipa_pipe_number = clnt_hdl;
+
+ result = ipa_uc_send_cmd(tear.raw32b,
+ IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN,
+ IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+
+ if (result) {
+ result = -EFAULT;
+ goto uc_timeout;
+ }
+
+ ipa_delete_dflt_flt_rules(clnt_hdl);
+ ipa_release_uc_smmu_mappings(ep->client);
+
+ memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+uc_timeout:
+ return result;
+}
+
+/**
+ * ipa2_enable_wdi_pipe() - WDI client enable
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_enable_wdi_pipe(u32 clnt_hdl)
+{
+ int result = 0;
+ struct ipa_ep_context *ep;
+ union IpaHwWdiCommonChCmdData_t enable;
+ struct ipa_ep_cfg_holb holb_cfg;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm, %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ result = ipa2_uc_state_check();
+ if (result)
+ return result;
+
+ IPADBG("ep=%d\n", clnt_hdl);
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+
+ if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+ return -EFAULT;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ enable.params.ipa_pipe_number = clnt_hdl;
+
+ result = ipa_uc_send_cmd(enable.raw32b,
+ IPA_CPU_2_HW_CMD_WDI_CH_ENABLE,
+ IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+
+ if (result) {
+ result = -EFAULT;
+ goto uc_timeout;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ep->client)) {
+ memset(&holb_cfg, 0, sizeof(holb_cfg));
+ holb_cfg.en = IPA_HOLB_TMR_DIS;
+ holb_cfg.tmr_val = 0;
+ result = ipa2_cfg_ep_holb(clnt_hdl, &holb_cfg);
+ }
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ ep->uc_offload_state |= IPA_WDI_ENABLED;
+ IPADBG("client (ep: %d) enabled\n", clnt_hdl);
+
+uc_timeout:
+ return result;
+}
+
+/**
+ * ipa2_disable_wdi_pipe() - WDI client disable
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_disable_wdi_pipe(u32 clnt_hdl)
+{
+ int result = 0;
+ struct ipa_ep_context *ep;
+ union IpaHwWdiCommonChCmdData_t disable;
+ struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+ u32 prod_hdl;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm, %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ result = ipa2_uc_state_check();
+ if (result)
+ return result;
+
+ IPADBG("ep=%d\n", clnt_hdl);
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+ return -EFAULT;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ result = ipa_disable_data_path(clnt_hdl);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ clnt_hdl);
+ result = -EPERM;
+ goto uc_timeout;
+ }
+
+ /**
+ * To avoid data stall during continuous SAP on/off before
+ * setting delay to IPA Consumer pipe, remove delay and enable
+ * holb on IPA Producer pipe
+ */
+ if (IPA_CLIENT_IS_PROD(ep->client)) {
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+
+ prod_hdl = ipa2_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+ if (ipa_ctx->ep[prod_hdl].valid == 1) {
+ result = ipa_disable_data_path(prod_hdl);
+ if (result) {
+ IPAERR("disable data path failed\n");
+ IPAERR("res=%d clnt=%d\n",
+ result, prod_hdl);
+ result = -EPERM;
+ goto uc_timeout;
+ }
+ }
+ usleep_range(IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC,
+ IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC);
+ }
+
+ disable.params.ipa_pipe_number = clnt_hdl;
+
+ result = ipa_uc_send_cmd(disable.raw32b,
+ IPA_CPU_2_HW_CMD_WDI_CH_DISABLE,
+ IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+
+ if (result) {
+ result = -EFAULT;
+ goto uc_timeout;
+ }
+
+ /* Set the delay after disabling IPA Producer pipe */
+ if (IPA_CLIENT_IS_PROD(ep->client)) {
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_delay = true;
+ ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ }
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ ep->uc_offload_state &= ~IPA_WDI_ENABLED;
+ IPADBG("client (ep: %d) disabled\n", clnt_hdl);
+
+uc_timeout:
+ return result;
+}
+
+/**
+ * ipa2_resume_wdi_pipe() - WDI client resume
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_resume_wdi_pipe(u32 clnt_hdl)
+{
+ int result = 0;
+ struct ipa_ep_context *ep;
+ union IpaHwWdiCommonChCmdData_t resume;
+ struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm, %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ result = ipa2_uc_state_check();
+ if (result)
+ return result;
+
+ IPADBG("ep=%d\n", clnt_hdl);
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+ return -EFAULT;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ resume.params.ipa_pipe_number = clnt_hdl;
+
+ result = ipa_uc_send_cmd(resume.raw32b,
+ IPA_CPU_2_HW_CMD_WDI_CH_RESUME,
+ IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+
+ if (result) {
+ result = -EFAULT;
+ goto uc_timeout;
+ }
+
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ result = ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ if (result)
+ IPAERR("client (ep: %d) fail un-susp/delay result=%d\n",
+ clnt_hdl, result);
+ else
+ IPADBG("client (ep: %d) un-susp/delay\n", clnt_hdl);
+
+ ep->uc_offload_state |= IPA_WDI_RESUMED;
+ IPADBG("client (ep: %d) resumed\n", clnt_hdl);
+
+uc_timeout:
+ return result;
+}
+
+/**
+ * ipa2_suspend_wdi_pipe() - WDI client suspend
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_suspend_wdi_pipe(u32 clnt_hdl)
+{
+ int result = 0;
+ struct ipa_ep_context *ep;
+ union IpaHwWdiCommonChCmdData_t suspend;
+ struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm, %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ result = ipa2_uc_state_check();
+ if (result)
+ return result;
+
+ IPADBG("ep=%d\n", clnt_hdl);
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED |
+ IPA_WDI_RESUMED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+ return -EFAULT;
+ }
+
+ suspend.params.ipa_pipe_number = clnt_hdl;
+
+ if (IPA_CLIENT_IS_PROD(ep->client)) {
+ IPADBG("Post suspend event first for IPA Producer\n");
+ IPADBG("Client: %d clnt_hdl: %d\n", ep->client, clnt_hdl);
+ result = ipa_uc_send_cmd(suspend.raw32b,
+ IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND,
+ IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+
+ if (result) {
+ result = -EFAULT;
+ goto uc_timeout;
+ }
+ }
+
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ if (IPA_CLIENT_IS_CONS(ep->client)) {
+ ep_cfg_ctrl.ipa_ep_suspend = true;
+ result = ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ if (result)
+ IPAERR("client (ep: %d) failed to suspend result=%d\n",
+ clnt_hdl, result);
+ else
+ IPADBG("client (ep: %d) suspended\n", clnt_hdl);
+ } else {
+ ep_cfg_ctrl.ipa_ep_delay = true;
+ result = ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ if (result)
+ IPAERR("client (ep: %d) failed to delay result=%d\n",
+ clnt_hdl, result);
+ else
+ IPADBG("client (ep: %d) delayed\n", clnt_hdl);
+ }
+
+ if (IPA_CLIENT_IS_CONS(ep->client)) {
+ result = ipa_uc_send_cmd(suspend.raw32b,
+ IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND,
+ IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+
+ if (result) {
+ result = -EFAULT;
+ goto uc_timeout;
+ }
+ }
+
+ ipa_ctx->tag_process_before_gating = true;
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ ep->uc_offload_state &= ~IPA_WDI_RESUMED;
+ IPADBG("client (ep: %d) suspended\n", clnt_hdl);
+
+uc_timeout:
+ return result;
+}
+
+int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
+{
+ int result = 0;
+ struct ipa_ep_context *ep;
+ union IpaHwWdiRxExtCfgCmdData_t qmap;
+
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm, %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ result = ipa2_uc_state_check();
+ if (result)
+ return result;
+
+ IPADBG("ep=%d\n", clnt_hdl);
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+
+ if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+ return -EFAULT;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+ qmap.params.ipa_pipe_number = clnt_hdl;
+ qmap.params.qmap_id = qmap_id;
+
+ result = ipa_uc_send_cmd(qmap.raw32b,
+ IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG,
+ IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+
+ if (result) {
+ result = -EFAULT;
+ goto uc_timeout;
+ }
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ IPADBG("client (ep: %d) qmap_id %d updated\n", clnt_hdl, qmap_id);
+
+uc_timeout:
+ return result;
+}
+
+/**
+ * ipa2_uc_reg_rdyCB() - To register uC
+ * ready CB if uC not ready
+ * @inout: [in/out] input/output parameters
+ * from/to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa2_uc_reg_rdyCB(
+ struct ipa_wdi_uc_ready_params *inout)
+{
+ int result = 0;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (inout == NULL) {
+ IPAERR("bad parm. inout=%p ", inout);
+ return -EINVAL;
+ }
+
+ result = ipa2_uc_state_check();
+ if (result) {
+ inout->is_uC_ready = false;
+ ipa_ctx->uc_wdi_ctx.uc_ready_cb = inout->notify;
+ ipa_ctx->uc_wdi_ctx.priv = inout->priv;
+ } else {
+ inout->is_uC_ready = true;
+ }
+
+ return 0;
+}
+
+/**
+ * ipa2_uc_dereg_rdyCB() - To de-register uC ready CB
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa2_uc_dereg_rdyCB(void)
+{
+ ipa_ctx->uc_wdi_ctx.uc_ready_cb = NULL;
+ ipa_ctx->uc_wdi_ctx.priv = NULL;
+
+ return 0;
+}
+
+
+/**
+ * ipa2_uc_wdi_get_dbpa() - To retrieve
+ * doorbell physical address of wlan pipes
+ * @param: [in/out] input/output parameters
+ * from/to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa2_uc_wdi_get_dbpa(
+ struct ipa_wdi_db_params *param)
+{
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (param == NULL || param->client >= IPA_CLIENT_MAX) {
+ IPAERR("bad parm. param=%p ", param);
+ if (param)
+ IPAERR("client = %d\n", param->client);
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_CONS(param->client)) {
+ if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) {
+ param->uc_door_bell_pa =
+ ipa_ctx->ipa_wrapper_base +
+ IPA_REG_BASE_OFST_v2_5 +
+ IPA_UC_MAILBOX_m_n_OFFS_v2_5(
+ IPA_HW_WDI_TX_MBOX_START_INDEX/32,
+ IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
+ } else {
+ param->uc_door_bell_pa =
+ ipa_ctx->ipa_wrapper_base +
+ IPA_REG_BASE_OFST_v2_0 +
+ IPA_UC_MAILBOX_m_n_OFFS(
+ IPA_HW_WDI_TX_MBOX_START_INDEX/32,
+ IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
+ }
+ } else {
+ if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) {
+ param->uc_door_bell_pa =
+ ipa_ctx->ipa_wrapper_base +
+ IPA_REG_BASE_OFST_v2_5 +
+ IPA_UC_MAILBOX_m_n_OFFS_v2_5(
+ IPA_HW_WDI_RX_MBOX_START_INDEX/32,
+ IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
+ } else {
+ param->uc_door_bell_pa =
+ ipa_ctx->ipa_wrapper_base +
+ IPA_REG_BASE_OFST_v2_0 +
+ IPA_UC_MAILBOX_m_n_OFFS(
+ IPA_HW_WDI_RX_MBOX_START_INDEX/32,
+ IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
+ }
+ }
+
+ return 0;
+}
+
+static void ipa_uc_wdi_loaded_handler(void)
+{
+ if (!ipa_ctx) {
+ IPAERR("IPA ctx is null\n");
+ return;
+ }
+
+ if (ipa_ctx->uc_wdi_ctx.uc_ready_cb) {
+ ipa_ctx->uc_wdi_ctx.uc_ready_cb(
+ ipa_ctx->uc_wdi_ctx.priv);
+
+ ipa_ctx->uc_wdi_ctx.uc_ready_cb =
+ NULL;
+ ipa_ctx->uc_wdi_ctx.priv = NULL;
+ }
+}
+
+int ipa2_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
+{
+ struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx();
+ int i;
+ int ret = 0;
+ int prot = IOMMU_READ | IOMMU_WRITE;
+
+ if (!info) {
+ IPAERR("info = %p\n", info);
+ return -EINVAL;
+ }
+
+ if (!cb->valid) {
+ IPAERR("No SMMU CB setup\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_buffers; i++) {
+ IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
+ &info[i].pa, info[i].iova, info[i].size);
+ info[i].result = ipa_iommu_map(cb->iommu,
+ rounddown(info[i].iova, PAGE_SIZE),
+ rounddown(info[i].pa, PAGE_SIZE),
+ roundup(info[i].size + info[i].pa -
+ rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE),
+ prot);
+ }
+
+ return ret;
+}
+
+int ipa2_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
+{
+ struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx();
+ int i;
+ int ret = 0;
+
+ if (!info) {
+ IPAERR("info = %p\n", info);
+ return -EINVAL;
+ }
+
+ if (!cb->valid) {
+ IPAERR("No SMMU CB setup\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_buffers; i++) {
+ IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
+ &info[i].pa, info[i].iova, info[i].size);
+ info[i].result = iommu_unmap(cb->iommu,
+ rounddown(info[i].iova, PAGE_SIZE),
+ roundup(info[i].size + info[i].pa -
+ rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE));
+ }
+
+ return ret;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
new file mode 100644
index 0000000..e2b7fe1
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -0,0 +1,5185 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <net/ip.h>
+#include <linux/genalloc.h> /* gen_pool_alloc() */
+#include <linux/io.h>
+#include <linux/ratelimit.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include "ipa_i.h"
+#include "../ipa_rm_i.h"
+
+#define IPA_V1_CLK_RATE (92.31 * 1000 * 1000UL)
+#define IPA_V1_1_CLK_RATE (100 * 1000 * 1000UL)
+#define IPA_V2_0_CLK_RATE_SVS (75 * 1000 * 1000UL)
+#define IPA_V2_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL)
+#define IPA_V2_0_CLK_RATE_TURBO (200 * 1000 * 1000UL)
+#define IPA_V1_MAX_HOLB_TMR_VAL (512 - 1)
+#define IPA_V2_0_MAX_HOLB_TMR_VAL (65536 - 1)
+#define IPA_V2_5_MAX_HOLB_TMR_VAL (4294967296 - 1)
+#define IPA_V2_6L_MAX_HOLB_TMR_VAL IPA_V2_5_MAX_HOLB_TMR_VAL
+
+#define IPA_V2_0_BW_THRESHOLD_TURBO_MBPS (1000)
+#define IPA_V2_0_BW_THRESHOLD_NOMINAL_MBPS (600)
+
+/* Max pipes + ICs for TAG process */
+#define IPA_TAG_MAX_DESC (IPA_MAX_NUM_PIPES + 6)
+
+#define IPA_TAG_SLEEP_MIN_USEC (1000)
+#define IPA_TAG_SLEEP_MAX_USEC (2000)
+#define IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT (10 * HZ)
+#define IPA_BCR_REG_VAL (0x001FFF7F)
+#define IPA_AGGR_GRAN_MIN (1)
+#define IPA_AGGR_GRAN_MAX (32)
+#define IPA_EOT_COAL_GRAN_MIN (1)
+#define IPA_EOT_COAL_GRAN_MAX (16)
+#define MSEC 1000
+#define MIN_RX_POLL_TIME 1
+#define MAX_RX_POLL_TIME 5
+#define UPPER_CUTOFF 50
+#define LOWER_CUTOFF 10
+
+#define IPA_DEFAULT_SYS_YELLOW_WM 32
+
+#define IPA_AGGR_BYTE_LIMIT (\
+ IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK >> \
+ IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT)
+#define IPA_AGGR_PKT_LIMIT (\
+ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >> \
+ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT)
+
+static const int ipa_ofst_meq32[] = { IPA_OFFSET_MEQ32_0,
+ IPA_OFFSET_MEQ32_1, -1 };
+static const int ipa_ofst_meq128[] = { IPA_OFFSET_MEQ128_0,
+ IPA_OFFSET_MEQ128_1, -1 };
+static const int ipa_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0,
+ IPA_IHL_OFFSET_RANGE16_1, -1 };
+static const int ipa_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0,
+ IPA_IHL_OFFSET_MEQ32_1, -1 };
+#define IPA_1_1 (0)
+#define IPA_2_0 (1)
+#define IPA_2_6L (2)
+
+#define INVALID_EP_MAPPING_INDEX (-1)
+
+static const int ep_mapping[3][IPA_CLIENT_MAX] = {
+ [IPA_1_1][IPA_CLIENT_HSIC1_PROD] = 19,
+ [IPA_1_1][IPA_CLIENT_WLAN1_PROD] = -1,
+ [IPA_1_1][IPA_CLIENT_HSIC2_PROD] = 12,
+ [IPA_1_1][IPA_CLIENT_USB2_PROD] = 12,
+ [IPA_1_1][IPA_CLIENT_HSIC3_PROD] = 13,
+ [IPA_1_1][IPA_CLIENT_USB3_PROD] = 13,
+ [IPA_1_1][IPA_CLIENT_HSIC4_PROD] = 0,
+ [IPA_1_1][IPA_CLIENT_USB4_PROD] = 0,
+ [IPA_1_1][IPA_CLIENT_HSIC5_PROD] = -1,
+ [IPA_1_1][IPA_CLIENT_USB_PROD] = 11,
+ [IPA_1_1][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = 15,
+ [IPA_1_1][IPA_CLIENT_A2_EMBEDDED_PROD] = 8,
+ [IPA_1_1][IPA_CLIENT_A2_TETHERED_PROD] = 6,
+ [IPA_1_1][IPA_CLIENT_APPS_LAN_WAN_PROD] = 2,
+ [IPA_1_1][IPA_CLIENT_APPS_CMD_PROD] = 1,
+ [IPA_1_1][IPA_CLIENT_ODU_PROD] = -1,
+ [IPA_1_1][IPA_CLIENT_MHI_PROD] = -1,
+ [IPA_1_1][IPA_CLIENT_Q6_LAN_PROD] = 5,
+ [IPA_1_1][IPA_CLIENT_Q6_WAN_PROD] = -1,
+ [IPA_1_1][IPA_CLIENT_Q6_CMD_PROD] = -1,
+
+ [IPA_1_1][IPA_CLIENT_HSIC1_CONS] = 14,
+ [IPA_1_1][IPA_CLIENT_WLAN1_CONS] = -1,
+ [IPA_1_1][IPA_CLIENT_HSIC2_CONS] = 16,
+ [IPA_1_1][IPA_CLIENT_USB2_CONS] = 16,
+ [IPA_1_1][IPA_CLIENT_WLAN2_CONS] = -1,
+ [IPA_1_1][IPA_CLIENT_HSIC3_CONS] = 17,
+ [IPA_1_1][IPA_CLIENT_USB3_CONS] = 17,
+ [IPA_1_1][IPA_CLIENT_WLAN3_CONS] = -1,
+ [IPA_1_1][IPA_CLIENT_HSIC4_CONS] = 18,
+ [IPA_1_1][IPA_CLIENT_USB4_CONS] = 18,
+ [IPA_1_1][IPA_CLIENT_WLAN4_CONS] = -1,
+ [IPA_1_1][IPA_CLIENT_HSIC5_CONS] = -1,
+ [IPA_1_1][IPA_CLIENT_USB_CONS] = 10,
+ [IPA_1_1][IPA_CLIENT_USB_DPL_CONS] = -1,
+ [IPA_1_1][IPA_CLIENT_A2_EMBEDDED_CONS] = 9,
+ [IPA_1_1][IPA_CLIENT_A2_TETHERED_CONS] = 7,
+ [IPA_1_1][IPA_CLIENT_A5_LAN_WAN_CONS] = 3,
+ [IPA_1_1][IPA_CLIENT_APPS_LAN_CONS] = -1,
+ [IPA_1_1][IPA_CLIENT_APPS_WAN_CONS] = -1,
+ [IPA_1_1][IPA_CLIENT_ODU_EMB_CONS] = -1,
+ [IPA_1_1][IPA_CLIENT_ODU_TETH_CONS] = -1,
+ [IPA_1_1][IPA_CLIENT_MHI_CONS] = -1,
+ [IPA_1_1][IPA_CLIENT_Q6_LAN_CONS] = 4,
+ [IPA_1_1][IPA_CLIENT_Q6_WAN_CONS] = -1,
+
+
+ [IPA_2_0][IPA_CLIENT_HSIC1_PROD] = 12,
+ [IPA_2_0][IPA_CLIENT_WLAN1_PROD] = 18,
+ [IPA_2_0][IPA_CLIENT_HSIC2_PROD] = -1,
+ [IPA_2_0][IPA_CLIENT_USB2_PROD] = 12,
+ [IPA_2_0][IPA_CLIENT_HSIC3_PROD] = -1,
+ [IPA_2_0][IPA_CLIENT_USB3_PROD] = 13,
+ [IPA_2_0][IPA_CLIENT_HSIC4_PROD] = -1,
+ [IPA_2_0][IPA_CLIENT_USB4_PROD] = 0,
+ [IPA_2_0][IPA_CLIENT_HSIC5_PROD] = -1,
+ [IPA_2_0][IPA_CLIENT_USB_PROD] = 11,
+ [IPA_2_0][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = -1,
+ [IPA_2_0][IPA_CLIENT_A2_EMBEDDED_PROD] = -1,
+ [IPA_2_0][IPA_CLIENT_A2_TETHERED_PROD] = -1,
+ [IPA_2_0][IPA_CLIENT_APPS_LAN_WAN_PROD] = 4,
+ [IPA_2_0][IPA_CLIENT_APPS_CMD_PROD] = 3,
+ [IPA_2_0][IPA_CLIENT_ODU_PROD] = 12,
+ [IPA_2_0][IPA_CLIENT_MHI_PROD] = 18,
+ [IPA_2_0][IPA_CLIENT_Q6_LAN_PROD] = 6,
+ [IPA_2_0][IPA_CLIENT_Q6_WAN_PROD] = -1,
+ [IPA_2_0][IPA_CLIENT_Q6_CMD_PROD] = 7,
+ [IPA_2_0][IPA_CLIENT_Q6_DECOMP_PROD] = -1,
+ [IPA_2_0][IPA_CLIENT_Q6_DECOMP2_PROD] = -1,
+ [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD]
+ = 12,
+ [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
+ = 19,
+ /* Only for test purpose */
+ [IPA_2_0][IPA_CLIENT_TEST_PROD] = 19,
+ [IPA_2_0][IPA_CLIENT_TEST1_PROD] = 19,
+ [IPA_2_0][IPA_CLIENT_TEST2_PROD] = 12,
+ [IPA_2_0][IPA_CLIENT_TEST3_PROD] = 11,
+ [IPA_2_0][IPA_CLIENT_TEST4_PROD] = 0,
+
+ [IPA_2_0][IPA_CLIENT_HSIC1_CONS] = 13,
+ [IPA_2_0][IPA_CLIENT_WLAN1_CONS] = 17,
+ [IPA_2_0][IPA_CLIENT_HSIC2_CONS] = -1,
+ [IPA_2_0][IPA_CLIENT_USB2_CONS] = -1,
+ [IPA_2_0][IPA_CLIENT_WLAN2_CONS] = 16,
+ [IPA_2_0][IPA_CLIENT_HSIC3_CONS] = -1,
+ [IPA_2_0][IPA_CLIENT_USB3_CONS] = -1,
+ [IPA_2_0][IPA_CLIENT_WLAN3_CONS] = 14,
+ [IPA_2_0][IPA_CLIENT_HSIC4_CONS] = -1,
+ [IPA_2_0][IPA_CLIENT_USB4_CONS] = -1,
+ [IPA_2_0][IPA_CLIENT_WLAN4_CONS] = 19,
+ [IPA_2_0][IPA_CLIENT_HSIC5_CONS] = -1,
+ [IPA_2_0][IPA_CLIENT_USB_CONS] = 15,
+ [IPA_2_0][IPA_CLIENT_USB_DPL_CONS] = 0,
+ [IPA_2_0][IPA_CLIENT_A2_EMBEDDED_CONS] = -1,
+ [IPA_2_0][IPA_CLIENT_A2_TETHERED_CONS] = -1,
+ [IPA_2_0][IPA_CLIENT_A5_LAN_WAN_CONS] = -1,
+ [IPA_2_0][IPA_CLIENT_APPS_LAN_CONS] = 2,
+ [IPA_2_0][IPA_CLIENT_APPS_WAN_CONS] = 5,
+ [IPA_2_0][IPA_CLIENT_ODU_EMB_CONS] = 13,
+ [IPA_2_0][IPA_CLIENT_ODU_TETH_CONS] = 1,
+ [IPA_2_0][IPA_CLIENT_MHI_CONS] = 17,
+ [IPA_2_0][IPA_CLIENT_Q6_LAN_CONS] = 8,
+ [IPA_2_0][IPA_CLIENT_Q6_WAN_CONS] = 9,
+ [IPA_2_0][IPA_CLIENT_Q6_DUN_CONS] = -1,
+ [IPA_2_0][IPA_CLIENT_Q6_DECOMP_CONS] = -1,
+ [IPA_2_0][IPA_CLIENT_Q6_DECOMP2_CONS] = -1,
+ [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS]
+ = 13,
+ [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS]
+ = 16,
+ [IPA_2_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]
+ = 10,
+ /* Only for test purpose */
+ [IPA_2_0][IPA_CLIENT_TEST_CONS] = 1,
+ [IPA_2_0][IPA_CLIENT_TEST1_CONS] = 1,
+ [IPA_2_0][IPA_CLIENT_TEST2_CONS] = 16,
+ [IPA_2_0][IPA_CLIENT_TEST3_CONS] = 13,
+ [IPA_2_0][IPA_CLIENT_TEST4_CONS] = 15,
+
+
+ [IPA_2_6L][IPA_CLIENT_HSIC1_PROD] = -1,
+ [IPA_2_6L][IPA_CLIENT_WLAN1_PROD] = -1,
+ [IPA_2_6L][IPA_CLIENT_HSIC2_PROD] = -1,
+ [IPA_2_6L][IPA_CLIENT_USB2_PROD] = -1,
+ [IPA_2_6L][IPA_CLIENT_HSIC3_PROD] = -1,
+ [IPA_2_6L][IPA_CLIENT_USB3_PROD] = -1,
+ [IPA_2_6L][IPA_CLIENT_HSIC4_PROD] = -1,
+ [IPA_2_6L][IPA_CLIENT_USB4_PROD] = -1,
+ [IPA_2_6L][IPA_CLIENT_HSIC5_PROD] = -1,
+ [IPA_2_6L][IPA_CLIENT_USB_PROD] = 1,
+ [IPA_2_6L][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = -1,
+ [IPA_2_6L][IPA_CLIENT_A2_EMBEDDED_PROD] = -1,
+ [IPA_2_6L][IPA_CLIENT_A2_TETHERED_PROD] = -1,
+ [IPA_2_6L][IPA_CLIENT_APPS_LAN_WAN_PROD] = 4,
+ [IPA_2_6L][IPA_CLIENT_APPS_CMD_PROD] = 3,
+ [IPA_2_6L][IPA_CLIENT_ODU_PROD] = -1,
+ [IPA_2_6L][IPA_CLIENT_MHI_PROD] = -1,
+ [IPA_2_6L][IPA_CLIENT_Q6_LAN_PROD] = 6,
+ [IPA_2_6L][IPA_CLIENT_Q6_WAN_PROD] = -1,
+ [IPA_2_6L][IPA_CLIENT_Q6_CMD_PROD] = 7,
+ [IPA_2_6L][IPA_CLIENT_Q6_DECOMP_PROD] = 11,
+ [IPA_2_6L][IPA_CLIENT_Q6_DECOMP2_PROD] = 13,
+ [IPA_2_6L][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD]
+ = -1,
+ [IPA_2_6L][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
+ = -1,
+ /* Only for test purpose */
+ [IPA_2_6L][IPA_CLIENT_TEST_PROD] = 11,
+ [IPA_2_6L][IPA_CLIENT_TEST1_PROD] = 11,
+ [IPA_2_6L][IPA_CLIENT_TEST2_PROD] = 12,
+ [IPA_2_6L][IPA_CLIENT_TEST3_PROD] = 13,
+ [IPA_2_6L][IPA_CLIENT_TEST4_PROD] = 14,
+
+ [IPA_2_6L][IPA_CLIENT_HSIC1_CONS] = -1,
+ [IPA_2_6L][IPA_CLIENT_WLAN1_CONS] = -1,
+ [IPA_2_6L][IPA_CLIENT_HSIC2_CONS] = -1,
+ [IPA_2_6L][IPA_CLIENT_USB2_CONS] = -1,
+ [IPA_2_6L][IPA_CLIENT_WLAN2_CONS] = -1,
+ [IPA_2_6L][IPA_CLIENT_HSIC3_CONS] = -1,
+ [IPA_2_6L][IPA_CLIENT_USB3_CONS] = -1,
+ [IPA_2_6L][IPA_CLIENT_WLAN3_CONS] = -1,
+ [IPA_2_6L][IPA_CLIENT_HSIC4_CONS] = -1,
+ [IPA_2_6L][IPA_CLIENT_USB4_CONS] = -1,
+ [IPA_2_6L][IPA_CLIENT_WLAN4_CONS] = -1,
+ [IPA_2_6L][IPA_CLIENT_HSIC5_CONS] = -1,
+ [IPA_2_6L][IPA_CLIENT_USB_CONS] = 0,
+ [IPA_2_6L][IPA_CLIENT_USB_DPL_CONS] = 10,
+ [IPA_2_6L][IPA_CLIENT_A2_EMBEDDED_CONS] = -1,
+ [IPA_2_6L][IPA_CLIENT_A2_TETHERED_CONS] = -1,
+ [IPA_2_6L][IPA_CLIENT_A5_LAN_WAN_CONS] = -1,
+ [IPA_2_6L][IPA_CLIENT_APPS_LAN_CONS] = 2,
+ [IPA_2_6L][IPA_CLIENT_APPS_WAN_CONS] = 5,
+ [IPA_2_6L][IPA_CLIENT_ODU_EMB_CONS] = -1,
+ [IPA_2_6L][IPA_CLIENT_ODU_TETH_CONS] = -1,
+ [IPA_2_6L][IPA_CLIENT_MHI_CONS] = -1,
+ [IPA_2_6L][IPA_CLIENT_Q6_LAN_CONS] = 8,
+ [IPA_2_6L][IPA_CLIENT_Q6_WAN_CONS] = 9,
+ [IPA_2_6L][IPA_CLIENT_Q6_DUN_CONS] = -1,
+ [IPA_2_6L][IPA_CLIENT_Q6_DECOMP_CONS] = 12,
+ [IPA_2_6L][IPA_CLIENT_Q6_DECOMP2_CONS] = 14,
+ [IPA_2_6L][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS]
+ = -1,
+ [IPA_2_6L][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS]
+ = -1,
+ [IPA_2_6L][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]
+ = -1,
+ /* Only for test purpose */
+ [IPA_2_6L][IPA_CLIENT_TEST_CONS] = 15,
+ [IPA_2_6L][IPA_CLIENT_TEST1_CONS] = 15,
+ [IPA_2_6L][IPA_CLIENT_TEST2_CONS] = 0,
+ [IPA_2_6L][IPA_CLIENT_TEST3_CONS] = 1,
+ [IPA_2_6L][IPA_CLIENT_TEST4_CONS] = 10,
+};
+
+static struct msm_bus_vectors ipa_init_vectors_v1_1[] = {
+ {
+ .src = MSM_BUS_MASTER_IPA,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 0,
+ .ib = 0,
+ },
+ {
+ .src = MSM_BUS_MASTER_BAM_DMA,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 0,
+ .ib = 0,
+ },
+ {
+ .src = MSM_BUS_MASTER_BAM_DMA,
+ .dst = MSM_BUS_SLAVE_OCIMEM,
+ .ab = 0,
+ .ib = 0,
+ },
+};
+
+static struct msm_bus_vectors ipa_init_vectors_v2_0[] = {
+ {
+ .src = MSM_BUS_MASTER_IPA,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 0,
+ .ib = 0,
+ },
+ {
+ .src = MSM_BUS_MASTER_IPA,
+ .dst = MSM_BUS_SLAVE_OCIMEM,
+ .ab = 0,
+ .ib = 0,
+ },
+};
+
+static struct msm_bus_vectors ipa_max_perf_vectors_v1_1[] = {
+ {
+ .src = MSM_BUS_MASTER_IPA,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 50000000,
+ .ib = 960000000,
+ },
+ {
+ .src = MSM_BUS_MASTER_BAM_DMA,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 50000000,
+ .ib = 960000000,
+ },
+ {
+ .src = MSM_BUS_MASTER_BAM_DMA,
+ .dst = MSM_BUS_SLAVE_OCIMEM,
+ .ab = 50000000,
+ .ib = 960000000,
+ },
+};
+
+static struct msm_bus_vectors ipa_nominal_perf_vectors_v2_0[] = {
+ {
+ .src = MSM_BUS_MASTER_IPA,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 100000000,
+ .ib = 1300000000,
+ },
+ {
+ .src = MSM_BUS_MASTER_IPA,
+ .dst = MSM_BUS_SLAVE_OCIMEM,
+ .ab = 100000000,
+ .ib = 1300000000,
+ },
+};
+
+static struct msm_bus_paths ipa_usecases_v1_1[] = {
+ {
+ ARRAY_SIZE(ipa_init_vectors_v1_1),
+ ipa_init_vectors_v1_1,
+ },
+ {
+ ARRAY_SIZE(ipa_max_perf_vectors_v1_1),
+ ipa_max_perf_vectors_v1_1,
+ },
+};
+
+static struct msm_bus_paths ipa_usecases_v2_0[] = {
+ {
+ ARRAY_SIZE(ipa_init_vectors_v2_0),
+ ipa_init_vectors_v2_0,
+ },
+ {
+ ARRAY_SIZE(ipa_nominal_perf_vectors_v2_0),
+ ipa_nominal_perf_vectors_v2_0,
+ },
+};
+
+static struct msm_bus_scale_pdata ipa_bus_client_pdata_v1_1 = {
+ ipa_usecases_v1_1,
+ ARRAY_SIZE(ipa_usecases_v1_1),
+ .name = "ipa",
+};
+
+static struct msm_bus_scale_pdata ipa_bus_client_pdata_v2_0 = {
+ ipa_usecases_v2_0,
+ ARRAY_SIZE(ipa_usecases_v2_0),
+ .name = "ipa",
+};
+
+void ipa_active_clients_lock(void)
+{
+ unsigned long flags;
+
+ mutex_lock(&ipa_ctx->ipa_active_clients.mutex);
+ spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, flags);
+ ipa_ctx->ipa_active_clients.mutex_locked = true;
+ spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, flags);
+}
+
+int ipa_active_clients_trylock(unsigned long *flags)
+{
+ spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, *flags);
+ if (ipa_ctx->ipa_active_clients.mutex_locked) {
+ spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock,
+ *flags);
+ return 0;
+ }
+
+ return 1;
+}
+
+void ipa_active_clients_trylock_unlock(unsigned long *flags)
+{
+ spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, *flags);
+}
+
+void ipa_active_clients_unlock(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, flags);
+ ipa_ctx->ipa_active_clients.mutex_locked = false;
+ spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, flags);
+ mutex_unlock(&ipa_ctx->ipa_active_clients.mutex);
+}
+
+/**
+ * ipa_get_clients_from_rm_resource() - get IPA clients which are related to an
+ * IPA_RM resource
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ * @clients: [OUT] Empty array which will contain the list of clients. The
+ * caller must initialize this array.
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa_get_clients_from_rm_resource(
+ enum ipa_rm_resource_name resource,
+ struct ipa_client_names *clients)
+{
+ int i = 0;
+
+ if (resource < 0 ||
+ resource >= IPA_RM_RESOURCE_MAX ||
+ !clients) {
+ IPAERR("Bad parameters\n");
+ return -EINVAL;
+ }
+
+ switch (resource) {
+ case IPA_RM_RESOURCE_USB_CONS:
+ clients->names[i++] = IPA_CLIENT_USB_CONS;
+ break;
+ case IPA_RM_RESOURCE_HSIC_CONS:
+ clients->names[i++] = IPA_CLIENT_HSIC1_CONS;
+ break;
+ case IPA_RM_RESOURCE_WLAN_CONS:
+ clients->names[i++] = IPA_CLIENT_WLAN1_CONS;
+ clients->names[i++] = IPA_CLIENT_WLAN2_CONS;
+ clients->names[i++] = IPA_CLIENT_WLAN3_CONS;
+ clients->names[i++] = IPA_CLIENT_WLAN4_CONS;
+ break;
+ case IPA_RM_RESOURCE_MHI_CONS:
+ clients->names[i++] = IPA_CLIENT_MHI_CONS;
+ break;
+ case IPA_RM_RESOURCE_USB_PROD:
+ clients->names[i++] = IPA_CLIENT_USB_PROD;
+ break;
+ case IPA_RM_RESOURCE_HSIC_PROD:
+ clients->names[i++] = IPA_CLIENT_HSIC1_PROD;
+ break;
+ case IPA_RM_RESOURCE_MHI_PROD:
+ clients->names[i++] = IPA_CLIENT_MHI_PROD;
+ break;
+ default:
+ break;
+ }
+ clients->length = i;
+
+ return 0;
+}
+
+/**
+ * ipa_should_pipe_be_suspended() - returns true when the client's pipe should
+ * be suspended during a power save scenario. False otherwise.
+ *
+ * @client: [IN] IPA client
+ */
+bool ipa_should_pipe_be_suspended(enum ipa_client_type client)
+{
+ struct ipa_ep_context *ep;
+ int ipa_ep_idx;
+
+ ipa_ep_idx = ipa2_get_ep_mapping(client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ WARN_ON(1);
+ return false;
+ }
+
+ ep = &ipa_ctx->ep[ipa_ep_idx];
+
+ if (ep->keep_ipa_awake)
+ return false;
+
+ if (client == IPA_CLIENT_USB_CONS ||
+ client == IPA_CLIENT_MHI_CONS ||
+ client == IPA_CLIENT_HSIC1_CONS ||
+ client == IPA_CLIENT_WLAN1_CONS ||
+ client == IPA_CLIENT_WLAN2_CONS ||
+ client == IPA_CLIENT_WLAN3_CONS ||
+ client == IPA_CLIENT_WLAN4_CONS)
+ return true;
+
+ return false;
+}
+
+/**
+ * ipa2_suspend_resource_sync() - suspend client endpoints related to the IPA_RM
+ * resource and decrement active clients counter, which may result in clock
+ * gating of IPA clocks.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa2_suspend_resource_sync(enum ipa_rm_resource_name resource)
+{
+ struct ipa_client_names clients;
+ int res;
+ int index;
+ struct ipa_ep_cfg_ctrl suspend;
+ enum ipa_client_type client;
+ int ipa_ep_idx;
+ bool pipe_suspended = false;
+
+ memset(&clients, 0, sizeof(clients));
+ res = ipa_get_clients_from_rm_resource(resource, &clients);
+ if (res) {
+ IPAERR("Bad params.\n");
+ return res;
+ }
+
+ for (index = 0; index < clients.length; index++) {
+ client = clients.names[index];
+ ipa_ep_idx = ipa2_get_ep_mapping(client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ res = -EINVAL;
+ continue;
+ }
+ ipa_ctx->resume_on_connect[client] = false;
+ if (ipa_ctx->ep[ipa_ep_idx].client == client &&
+ ipa_should_pipe_be_suspended(client)) {
+ if (ipa_ctx->ep[ipa_ep_idx].valid) {
+ /* suspend endpoint */
+ memset(&suspend, 0, sizeof(suspend));
+ suspend.ipa_ep_suspend = true;
+ ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend);
+ pipe_suspended = true;
+ }
+ }
+ }
+ /* Sleep ~1 msec */
+ if (pipe_suspended)
+ usleep_range(1000, 2000);
+
+ /* before gating IPA clocks do TAG process */
+ ipa_ctx->tag_process_before_gating = true;
+ IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(resource));
+
+ return 0;
+}
+
+/**
+ * ipa2_suspend_resource_no_block() - suspend client endpoints related to the
+ * IPA_RM resource and decrement active clients counter. This function is
+ * guaranteed to avoid sleeping.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa2_suspend_resource_no_block(enum ipa_rm_resource_name resource)
+{
+ int res;
+ struct ipa_client_names clients;
+ int index;
+ enum ipa_client_type client;
+ struct ipa_ep_cfg_ctrl suspend;
+ int ipa_ep_idx;
+ unsigned long flags;
+ struct ipa_active_client_logging_info log_info;
+
+ if (ipa_active_clients_trylock(&flags) == 0)
+ return -EPERM;
+ if (ipa_ctx->ipa_active_clients.cnt == 1) {
+ res = -EPERM;
+ goto bail;
+ }
+
+ memset(&clients, 0, sizeof(clients));
+ res = ipa_get_clients_from_rm_resource(resource, &clients);
+ if (res) {
+ IPAERR("ipa_get_clients_from_rm_resource() failed, name = %d.\n"
+ , resource);
+ goto bail;
+ }
+
+ for (index = 0; index < clients.length; index++) {
+ client = clients.names[index];
+ ipa_ep_idx = ipa2_get_ep_mapping(client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ res = -EINVAL;
+ continue;
+ }
+ ipa_ctx->resume_on_connect[client] = false;
+ if (ipa_ctx->ep[ipa_ep_idx].client == client &&
+ ipa_should_pipe_be_suspended(client)) {
+ if (ipa_ctx->ep[ipa_ep_idx].valid) {
+ /* suspend endpoint */
+ memset(&suspend, 0, sizeof(suspend));
+ suspend.ipa_ep_suspend = true;
+ ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend);
+ }
+ }
+ }
+
+ if (res == 0) {
+ IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
+ ipa_rm_resource_str(resource));
+ ipa2_active_clients_log_dec(&log_info, true);
+ ipa_ctx->ipa_active_clients.cnt--;
+ IPADBG("active clients = %d\n",
+ ipa_ctx->ipa_active_clients.cnt);
+ }
+bail:
+ ipa_active_clients_trylock_unlock(&flags);
+
+ return res;
+}
+
+/**
+ * ipa2_resume_resource() - resume client endpoints related to the IPA_RM
+ * resource.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa2_resume_resource(enum ipa_rm_resource_name resource)
+{
+
+ struct ipa_client_names clients;
+ int res;
+ int index;
+ struct ipa_ep_cfg_ctrl suspend;
+ enum ipa_client_type client;
+ int ipa_ep_idx;
+
+ memset(&clients, 0, sizeof(clients));
+ res = ipa_get_clients_from_rm_resource(resource, &clients);
+ if (res) {
+ IPAERR("ipa_get_clients_from_rm_resource() failed.\n");
+ return res;
+ }
+
+ for (index = 0; index < clients.length; index++) {
+ client = clients.names[index];
+ ipa_ep_idx = ipa2_get_ep_mapping(client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ res = -EINVAL;
+ continue;
+ }
+ /*
+ * The related ep, will be resumed on connect
+ * while its resource is granted
+ */
+ ipa_ctx->resume_on_connect[client] = true;
+ IPADBG("%d will be resumed on connect.\n", client);
+ if (ipa_ctx->ep[ipa_ep_idx].client == client &&
+ ipa_should_pipe_be_suspended(client)) {
+ spin_lock(&ipa_ctx->disconnect_lock);
+ if (ipa_ctx->ep[ipa_ep_idx].valid &&
+ !ipa_ctx->ep[ipa_ep_idx].disconnect_in_progress) {
+ memset(&suspend, 0, sizeof(suspend));
+ suspend.ipa_ep_suspend = false;
+ ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend);
+ }
+ spin_unlock(&ipa_ctx->disconnect_lock);
+ }
+ }
+
+ return res;
+}
+
+/* read how much SRAM is available for SW use
+ * In case of IPAv2.0 this will also supply an offset from
+ * which we can start write
+ */
+void _ipa_sram_settings_read_v1_1(void)
+{
+ ipa_ctx->smem_restricted_bytes = 0;
+ ipa_ctx->smem_sz = ipa_read_reg(ipa_ctx->mmio,
+ IPA_SHARED_MEM_SIZE_OFST_v1_1);
+ ipa_ctx->smem_reqd_sz = IPA_MEM_v1_RAM_END_OFST;
+ ipa_ctx->hdr_tbl_lcl = 1;
+ ipa_ctx->ip4_rt_tbl_lcl = 0;
+ ipa_ctx->ip6_rt_tbl_lcl = 0;
+ ipa_ctx->ip4_flt_tbl_lcl = 1;
+ ipa_ctx->ip6_flt_tbl_lcl = 1;
+}
+
+void _ipa_sram_settings_read_v2_0(void)
+{
+ ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio,
+ IPA_SHARED_MEM_SIZE_OFST_v2_0,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0);
+ ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio,
+ IPA_SHARED_MEM_SIZE_OFST_v2_0,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0);
+ ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
+ ipa_ctx->hdr_tbl_lcl = 0;
+ ipa_ctx->ip4_rt_tbl_lcl = 0;
+ ipa_ctx->ip6_rt_tbl_lcl = 0;
+ ipa_ctx->ip4_flt_tbl_lcl = 0;
+ ipa_ctx->ip6_flt_tbl_lcl = 0;
+}
+
+void _ipa_sram_settings_read_v2_5(void)
+{
+ ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio,
+ IPA_SHARED_MEM_SIZE_OFST_v2_0,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0);
+ ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio,
+ IPA_SHARED_MEM_SIZE_OFST_v2_0,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0);
+ ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
+ ipa_ctx->hdr_tbl_lcl = 0;
+ ipa_ctx->hdr_proc_ctx_tbl_lcl = 1;
+
+ /*
+ * when proc ctx table is located in internal memory,
+ * modem entries resides first.
+ */
+ if (ipa_ctx->hdr_proc_ctx_tbl_lcl) {
+ ipa_ctx->hdr_proc_ctx_tbl.start_offset =
+ IPA_MEM_PART(modem_hdr_proc_ctx_size);
+ }
+ ipa_ctx->ip4_rt_tbl_lcl = 0;
+ ipa_ctx->ip6_rt_tbl_lcl = 0;
+ ipa_ctx->ip4_flt_tbl_lcl = 0;
+ ipa_ctx->ip6_flt_tbl_lcl = 0;
+}
+
+void _ipa_sram_settings_read_v2_6L(void)
+{
+ ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio,
+ IPA_SHARED_MEM_SIZE_OFST_v2_0,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0);
+ ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio,
+ IPA_SHARED_MEM_SIZE_OFST_v2_0,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0);
+ ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
+ ipa_ctx->hdr_tbl_lcl = 0;
+ ipa_ctx->ip4_rt_tbl_lcl = 0;
+ ipa_ctx->ip6_rt_tbl_lcl = 0;
+ ipa_ctx->ip4_flt_tbl_lcl = 0;
+ ipa_ctx->ip6_flt_tbl_lcl = 0;
+}
+
+void _ipa_cfg_route_v1_1(struct ipa_route *route)
+{
+ u32 reg_val = 0;
+
+ IPA_SETFIELD_IN_REG(reg_val, route->route_dis,
+ IPA_ROUTE_ROUTE_DIS_SHFT,
+ IPA_ROUTE_ROUTE_DIS_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, route->route_def_pipe,
+ IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
+ IPA_ROUTE_ROUTE_DEF_PIPE_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_table,
+ IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
+ IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_ofst,
+ IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
+ IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1, reg_val);
+}
+
+void _ipa_cfg_route_v2_0(struct ipa_route *route)
+{
+ u32 reg_val = 0;
+
+ IPA_SETFIELD_IN_REG(reg_val, route->route_dis,
+ IPA_ROUTE_ROUTE_DIS_SHFT,
+ IPA_ROUTE_ROUTE_DIS_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, route->route_def_pipe,
+ IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
+ IPA_ROUTE_ROUTE_DEF_PIPE_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_table,
+ IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
+ IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_ofst,
+ IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
+ IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, route->route_frag_def_pipe,
+ IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT,
+ IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1, reg_val);
+}
+
+/**
+ * ipa_cfg_route() - configure IPA route
+ * @route: IPA route
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa_cfg_route(struct ipa_route *route)
+{
+
+ IPADBG("disable_route_block=%d, default_pipe=%d, default_hdr_tbl=%d\n",
+ route->route_dis,
+ route->route_def_pipe,
+ route->route_def_hdr_table);
+ IPADBG("default_hdr_ofst=%d, default_frag_pipe=%d\n",
+ route->route_def_hdr_ofst,
+ route->route_frag_def_pipe);
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ ipa_ctx->ctrl->ipa_cfg_route(route);
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+/**
+ * ipa_cfg_filter() - configure filter
+ * @disable: disable value
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa_cfg_filter(u32 disable)
+{
+ u32 ipa_filter_ofst = IPA_FILTER_OFST_v1_1;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ ipa_write_reg(ipa_ctx->mmio, ipa_filter_ofst,
+ IPA_SETFIELD(!disable,
+ IPA_FILTER_FILTER_EN_SHFT,
+ IPA_FILTER_FILTER_EN_BMSK));
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+/**
+ * ipa_init_hw() - initialize HW
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa_init_hw(void)
+{
+ u32 ipa_version = 0;
+
+ /* do soft reset of IPA */
+ ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 1);
+ ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 0);
+
+ /* enable IPA */
+ ipa_write_reg(ipa_ctx->mmio, IPA_COMP_CFG_OFST, 1);
+
+ /* Read IPA version and make sure we have access to the registers */
+ ipa_version = ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST);
+ if (ipa_version == 0)
+ return -EFAULT;
+
+ if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) {
+ /* set ipa_bcr to 0xFFFFFFFF for using new IPA behavior */
+ ipa_write_reg(ipa_ctx->mmio, IPA_BCR_OFST, IPA_BCR_REG_VAL);
+ }
+ return 0;
+}
+
+/**
+ * ipa2_get_ep_mapping() - provide endpoint mapping
+ * @client: client type
+ *
+ * Return value: endpoint mapping
+ */
+int ipa2_get_ep_mapping(enum ipa_client_type client)
+{
+ u8 hw_type_index = IPA_1_1;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return INVALID_EP_MAPPING_INDEX;
+ }
+
+ if (client >= IPA_CLIENT_MAX || client < 0) {
+ IPAERR("Bad client number! client =%d\n", client);
+ return INVALID_EP_MAPPING_INDEX;
+ }
+
+ switch (ipa_ctx->ipa_hw_type) {
+ case IPA_HW_v2_0:
+ case IPA_HW_v2_5:
+ hw_type_index = IPA_2_0;
+ break;
+ case IPA_HW_v2_6L:
+ hw_type_index = IPA_2_6L;
+ break;
+ default:
+ hw_type_index = IPA_1_1;
+ break;
+ }
+
+ return ep_mapping[hw_type_index][client];
+}
+
+/* ipa2_set_client() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+
+void ipa2_set_client(int index, enum ipacm_client_enum client, bool uplink)
+{
+ if (client >= IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) {
+ IPAERR("Bad client number! client =%d\n", client);
+ } else if (index >= IPA_MAX_NUM_PIPES || index < 0) {
+ IPAERR("Bad pipe index! index =%d\n", index);
+ } else {
+ ipa_ctx->ipacm_client[index].client_enum = client;
+ ipa_ctx->ipacm_client[index].uplink = uplink;
+ }
+}
+
+/**
+ * ipa2_get_client() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+enum ipacm_client_enum ipa2_get_client(int pipe_idx)
+{
+ if (pipe_idx >= IPA_MAX_NUM_PIPES || pipe_idx < 0) {
+ IPAERR("Bad pipe index! pipe_idx =%d\n", pipe_idx);
+ return IPACM_CLIENT_MAX;
+ } else {
+ return ipa_ctx->ipacm_client[pipe_idx].client_enum;
+ }
+}
+
+/**
+ * ipa2_get_client_uplink() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+bool ipa2_get_client_uplink(int pipe_idx)
+{
+ return ipa_ctx->ipacm_client[pipe_idx].uplink;
+}
+
+/**
+ * ipa2_get_rm_resource_from_ep() - get the IPA_RM resource which is related to
+ * the supplied pipe index.
+ *
+ * @pipe_idx:
+ *
+ * Return value: IPA_RM resource related to the pipe, -1 if a resource was not
+ * found.
+ */
+enum ipa_rm_resource_name ipa2_get_rm_resource_from_ep(int pipe_idx)
+{
+ int i;
+ int j;
+ enum ipa_client_type client;
+ struct ipa_client_names clients;
+ bool found = false;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (pipe_idx >= ipa_ctx->ipa_num_pipes || pipe_idx < 0) {
+ IPAERR("Bad pipe index!\n");
+ return -EINVAL;
+ }
+
+ client = ipa_ctx->ep[pipe_idx].client;
+
+ for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) {
+ memset(&clients, 0, sizeof(clients));
+ ipa_get_clients_from_rm_resource(i, &clients);
+ for (j = 0; j < clients.length; j++) {
+ if (clients.names[j] == client) {
+ found = true;
+ break;
+ }
+ }
+ if (found)
+ break;
+ }
+
+ if (!found)
+ return -EFAULT;
+
+ return i;
+}
+
+/**
+ * ipa2_get_client_mapping() - provide client mapping
+ * @pipe_idx: IPA end-point number
+ *
+ * Return value: client mapping
+ */
+enum ipa_client_type ipa2_get_client_mapping(int pipe_idx)
+{
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (pipe_idx >= ipa_ctx->ipa_num_pipes || pipe_idx < 0) {
+ IPAERR("Bad pipe index!\n");
+ return -EINVAL;
+ }
+
+ return ipa_ctx->ep[pipe_idx].client;
+}
+
+void ipa_generate_mac_addr_hw_rule(u8 **buf, u8 hdr_mac_addr_offset,
+ const uint8_t mac_addr_mask[ETH_ALEN],
+ const uint8_t mac_addr[ETH_ALEN])
+{
+ *buf = ipa_write_8(hdr_mac_addr_offset, *buf);
+
+ /* MAC addr mask copied as little endian each 4 bytes */
+ *buf = ipa_write_8(mac_addr_mask[3], *buf);
+ *buf = ipa_write_8(mac_addr_mask[2], *buf);
+ *buf = ipa_write_8(mac_addr_mask[1], *buf);
+ *buf = ipa_write_8(mac_addr_mask[0], *buf);
+ *buf = ipa_write_16(0, *buf);
+ *buf = ipa_write_8(mac_addr_mask[5], *buf);
+ *buf = ipa_write_8(mac_addr_mask[4], *buf);
+ *buf = ipa_write_32(0, *buf);
+ *buf = ipa_write_32(0, *buf);
+
+ /* MAC addr copied as little endian each 4 bytes */
+ *buf = ipa_write_8(mac_addr[3], *buf);
+ *buf = ipa_write_8(mac_addr[2], *buf);
+ *buf = ipa_write_8(mac_addr[1], *buf);
+ *buf = ipa_write_8(mac_addr[0], *buf);
+ *buf = ipa_write_16(0, *buf);
+ *buf = ipa_write_8(mac_addr[5], *buf);
+ *buf = ipa_write_8(mac_addr[4], *buf);
+ *buf = ipa_write_32(0, *buf);
+ *buf = ipa_write_32(0, *buf);
+ *buf = ipa_pad_to_32(*buf);
+}
+
+/**
+ * ipa_generate_hw_rule() - generate HW rule
+ * @ip: IP address type
+ * @attrib: IPA rule attribute
+ * @buf: output buffer
+ * @en_rule: rule
+ *
+ * Return codes:
+ * 0: success
+ * -EPERM: wrong input
+ */
+int ipa_generate_hw_rule(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule)
+{
+ u8 ofst_meq32 = 0;
+ u8 ihl_ofst_rng16 = 0;
+ u8 ihl_ofst_meq32 = 0;
+ u8 ofst_meq128 = 0;
+
+ if (ip == IPA_IP_v4) {
+
+ /* error check */
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
+ attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask &
+ IPA_FLT_FLOW_LABEL) {
+ IPAERR("v6 attrib's specified for v4 rule\n");
+ return -EPERM;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TOS) {
+ *en_rule |= IPA_TOS_EQ;
+ *buf = ipa_write_8(attrib->u.v4.tos, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+ if (ipa_ofst_meq32[ofst_meq32] == -1) {
+ IPAERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq32[ofst_meq32];
+ /* 0 => offset of TOS in v4 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_32((attrib->tos_mask << 16), *buf);
+ *buf = ipa_write_32((attrib->tos_value << 16), *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+ *en_rule |= IPA_PROTOCOL_EQ;
+ *buf = ipa_write_8(attrib->u.v4.protocol, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (ipa_ofst_meq32[ofst_meq32] == -1) {
+ IPAERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq32[ofst_meq32];
+ /* 12 => offset of src ip in v4 header */
+ *buf = ipa_write_8(12, *buf);
+ *buf = ipa_write_32(attrib->u.v4.src_addr_mask, *buf);
+ *buf = ipa_write_32(attrib->u.v4.src_addr, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (ipa_ofst_meq32[ofst_meq32] == -1) {
+ IPAERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq32[ofst_meq32];
+ /* 16 => offset of dst ip in v4 header */
+ *buf = ipa_write_8(16, *buf);
+ *buf = ipa_write_32(attrib->u.v4.dst_addr_mask, *buf);
+ *buf = ipa_write_32(attrib->u.v4.dst_addr, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+ if (ipa_ofst_meq32[ofst_meq32] == -1) {
+ IPAERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq32[ofst_meq32];
+ /* -2 => offset of ether type in L2 hdr */
+ *buf = ipa_write_8((u8)-2, *buf);
+ *buf = ipa_write_16(0, *buf);
+ *buf = ipa_write_16(htons(attrib->ether_type), *buf);
+ *buf = ipa_write_16(0, *buf);
+ *buf = ipa_write_16(htons(attrib->ether_type), *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAERR("bad src port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 0 => offset of src port after v4 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_16(attrib->src_port_hi, *buf);
+ *buf = ipa_write_16(attrib->src_port_lo, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAERR("bad dst port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 2 => offset of dst port after v4 header */
+ *buf = ipa_write_8(2, *buf);
+ *buf = ipa_write_16(attrib->dst_port_hi, *buf);
+ *buf = ipa_write_16(attrib->dst_port_lo, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 0 => offset of type after v4 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_32(0xFF, *buf);
+ *buf = ipa_write_32(attrib->type, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 1 => offset of code after v4 header */
+ *buf = ipa_write_8(1, *buf);
+ *buf = ipa_write_32(0xFF, *buf);
+ *buf = ipa_write_32(attrib->code, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 0 => offset of SPI after v4 header FIXME */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_32(0xFFFFFFFF, *buf);
+ *buf = ipa_write_32(attrib->spi, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 0 => offset of src port after v4 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_16(attrib->src_port, *buf);
+ *buf = ipa_write_16(attrib->src_port, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 2 => offset of dst port after v4 header */
+ *buf = ipa_write_8(2, *buf);
+ *buf = ipa_write_16(attrib->dst_port, *buf);
+ *buf = ipa_write_16(attrib->dst_port, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+ /* -14 => offset of dst mac addr in Ethernet II hdr */
+ ipa_generate_mac_addr_hw_rule(
+ buf,
+ -14,
+ attrib->dst_mac_addr_mask,
+ attrib->dst_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+ /* -8 => offset of src mac addr in Ethernet II hdr */
+ ipa_generate_mac_addr_hw_rule(
+ buf,
+ -8,
+ attrib->src_mac_addr_mask,
+ attrib->src_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+ /* -22 => offset of dst mac addr in 802.3 hdr */
+ ipa_generate_mac_addr_hw_rule(
+ buf,
+ -22,
+ attrib->dst_mac_addr_mask,
+ attrib->dst_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+ /* -16 => offset of src mac addr in 802.3 hdr */
+ ipa_generate_mac_addr_hw_rule(
+ buf,
+ -16,
+ attrib->src_mac_addr_mask,
+ attrib->src_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_METADATA_COMPARE;
+ *buf = ipa_write_8(0, *buf); /* offset, reserved */
+ *buf = ipa_write_32(attrib->meta_data_mask, *buf);
+ *buf = ipa_write_32(attrib->meta_data, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+ *en_rule |= IPA_IS_FRAG;
+ *buf = ipa_pad_to_32(*buf);
+ }
+ } else if (ip == IPA_IP_v6) {
+
+ /* v6 code below assumes no extension headers TODO: fix this */
+
+ /* error check */
+ if (attrib->attrib_mask & IPA_FLT_TOS ||
+ attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+ IPAERR("v4 attrib's specified for v6 rule\n");
+ return -EPERM;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+ *en_rule |= IPA_PROTOCOL_EQ;
+ *buf = ipa_write_8(attrib->u.v6.next_hdr, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+ if (ipa_ofst_meq32[ofst_meq32] == -1) {
+ IPAERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq32[ofst_meq32];
+ /* -2 => offset of ether type in L2 hdr */
+ *buf = ipa_write_8((u8)-2, *buf);
+ *buf = ipa_write_16(0, *buf);
+ *buf = ipa_write_16(htons(attrib->ether_type), *buf);
+ *buf = ipa_write_16(0, *buf);
+ *buf = ipa_write_16(htons(attrib->ether_type), *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 0 => offset of type after v6 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_32(0xFF, *buf);
+ *buf = ipa_write_32(attrib->type, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 1 => offset of code after v6 header */
+ *buf = ipa_write_8(1, *buf);
+ *buf = ipa_write_32(0xFF, *buf);
+ *buf = ipa_write_32(attrib->code, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 0 => offset of SPI after v6 header FIXME */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_32(0xFFFFFFFF, *buf);
+ *buf = ipa_write_32(attrib->spi, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 0 => offset of src port after v6 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_16(attrib->src_port, *buf);
+ *buf = ipa_write_16(attrib->src_port, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 2 => offset of dst port after v6 header */
+ *buf = ipa_write_8(2, *buf);
+ *buf = ipa_write_16(attrib->dst_port, *buf);
+ *buf = ipa_write_16(attrib->dst_port, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAERR("bad src port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 0 => offset of src port after v6 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_16(attrib->src_port_hi, *buf);
+ *buf = ipa_write_16(attrib->src_port_lo, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAERR("bad dst port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 2 => offset of dst port after v6 header */
+ *buf = ipa_write_8(2, *buf);
+ *buf = ipa_write_16(attrib->dst_port_hi, *buf);
+ *buf = ipa_write_16(attrib->dst_port_lo, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+ /* 8 => offset of src ip in v6 header */
+ *buf = ipa_write_8(8, *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr_mask[0],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr_mask[1],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr_mask[2],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr_mask[3],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr[0], *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr[1], *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr[2], *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr[3], *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+ /* 24 => offset of dst ip in v6 header */
+ *buf = ipa_write_8(24, *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[0],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[1],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[2],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[3],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr[0], *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr[1], *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr[2], *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr[3], *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TC) {
+ *en_rule |= IPA_FLT_TC;
+ *buf = ipa_write_8(attrib->u.v6.tc, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+ /* 0 => offset of TOS in v6 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_32((attrib->tos_mask << 20), *buf);
+ *buf = ipa_write_32(0, *buf);
+ *buf = ipa_write_32(0, *buf);
+ *buf = ipa_write_32(0, *buf);
+
+ *buf = ipa_write_32((attrib->tos_value << 20), *buf);
+ *buf = ipa_write_32(0, *buf);
+ *buf = ipa_write_32(0, *buf);
+ *buf = ipa_write_32(0, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+ /* -14 => offset of dst mac addr in Ethernet II hdr */
+ ipa_generate_mac_addr_hw_rule(
+ buf,
+ -14,
+ attrib->dst_mac_addr_mask,
+ attrib->dst_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+ /* -8 => offset of src mac addr in Ethernet II hdr */
+ ipa_generate_mac_addr_hw_rule(
+ buf,
+ -8,
+ attrib->src_mac_addr_mask,
+ attrib->src_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+ /* -22 => offset of dst mac addr in 802.3 hdr */
+ ipa_generate_mac_addr_hw_rule(
+ buf,
+ -22,
+ attrib->dst_mac_addr_mask,
+ attrib->dst_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+ /* -16 => offset of src mac addr in 802.3 hdr */
+ ipa_generate_mac_addr_hw_rule(
+ buf,
+ -16,
+ attrib->src_mac_addr_mask,
+ attrib->src_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+ *en_rule |= IPA_FLT_FLOW_LABEL;
+ /* FIXME FL is only 20 bits */
+ *buf = ipa_write_32(attrib->u.v6.flow_label, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_METADATA_COMPARE;
+ *buf = ipa_write_8(0, *buf); /* offset, reserved */
+ *buf = ipa_write_32(attrib->meta_data_mask, *buf);
+ *buf = ipa_write_32(attrib->meta_data, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+ *en_rule |= IPA_IS_FRAG;
+ *buf = ipa_pad_to_32(*buf);
+ }
+ } else {
+ IPAERR("unsupported ip %d\n", ip);
+ return -EPERM;
+ }
+
+ /*
+ * default "rule" means no attributes set -> map to
+ * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
+ */
+ if (attrib->attrib_mask == 0) {
+ if (ipa_ofst_meq32[ofst_meq32] == -1) {
+ IPAERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq32[ofst_meq32];
+ *buf = ipa_write_8(0, *buf); /* offset */
+ *buf = ipa_write_32(0, *buf); /* mask */
+ *buf = ipa_write_32(0, *buf); /* val */
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq32++;
+ }
+
+ return 0;
+}
+
+void ipa_generate_flt_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb,
+ u8 hdr_mac_addr_offset, const uint8_t mac_addr_mask[ETH_ALEN],
+ const uint8_t mac_addr[ETH_ALEN], u8 ofst_meq128)
+{
+ eq_atrb->offset_meq_128[ofst_meq128].offset = hdr_mac_addr_offset;
+ eq_atrb->offset_meq_128[ofst_meq128].mask[0] = mac_addr_mask[3];
+ eq_atrb->offset_meq_128[ofst_meq128].mask[1] = mac_addr_mask[2];
+ eq_atrb->offset_meq_128[ofst_meq128].mask[2] = mac_addr_mask[1];
+ eq_atrb->offset_meq_128[ofst_meq128].mask[3] = mac_addr_mask[0];
+ eq_atrb->offset_meq_128[ofst_meq128].mask[4] = 0;
+ eq_atrb->offset_meq_128[ofst_meq128].mask[5] = 0;
+ eq_atrb->offset_meq_128[ofst_meq128].mask[6] = mac_addr_mask[5];
+ eq_atrb->offset_meq_128[ofst_meq128].mask[7] = mac_addr_mask[4];
+ memset(eq_atrb->offset_meq_128[ofst_meq128].mask + 8, 0, 8);
+ eq_atrb->offset_meq_128[ofst_meq128].value[0] = mac_addr[3];
+ eq_atrb->offset_meq_128[ofst_meq128].value[1] = mac_addr[2];
+ eq_atrb->offset_meq_128[ofst_meq128].value[2] = mac_addr[1];
+ eq_atrb->offset_meq_128[ofst_meq128].value[3] = mac_addr[0];
+ eq_atrb->offset_meq_128[ofst_meq128].value[4] = 0;
+ eq_atrb->offset_meq_128[ofst_meq128].value[5] = 0;
+ eq_atrb->offset_meq_128[ofst_meq128].value[6] = mac_addr[5];
+ eq_atrb->offset_meq_128[ofst_meq128].value[7] = mac_addr[4];
+ memset(eq_atrb->offset_meq_128[ofst_meq128].value + 8, 0, 8);
+}
+
+int ipa_generate_flt_eq(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+ u8 ofst_meq32 = 0;
+ u8 ihl_ofst_rng16 = 0;
+ u8 ihl_ofst_meq32 = 0;
+ u8 ofst_meq128 = 0;
+ u16 eq_bitmap = 0;
+ u16 *en_rule = &eq_bitmap;
+
+ if (ip == IPA_IP_v4) {
+
+ /* error check */
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
+ attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask &
+ IPA_FLT_FLOW_LABEL) {
+ IPAERR("v6 attrib's specified for v4 rule\n");
+ return -EPERM;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TOS) {
+ *en_rule |= IPA_TOS_EQ;
+ eq_atrb->tos_eq_present = 1;
+ eq_atrb->tos_eq = attrib->u.v4.tos;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+ if (ipa_ofst_meq32[ofst_meq32] == -1) {
+ IPAERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq32[ofst_meq32];
+ eq_atrb->offset_meq_32[ofst_meq32].offset = 0;
+ eq_atrb->offset_meq_32[ofst_meq32].mask =
+ attrib->tos_mask << 16;
+ eq_atrb->offset_meq_32[ofst_meq32].value =
+ attrib->tos_value << 16;
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+ *en_rule |= IPA_PROTOCOL_EQ;
+ eq_atrb->protocol_eq_present = 1;
+ eq_atrb->protocol_eq = attrib->u.v4.protocol;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (ipa_ofst_meq32[ofst_meq32] == -1) {
+ IPAERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq32[ofst_meq32];
+ eq_atrb->offset_meq_32[ofst_meq32].offset = 12;
+ eq_atrb->offset_meq_32[ofst_meq32].mask =
+ attrib->u.v4.src_addr_mask;
+ eq_atrb->offset_meq_32[ofst_meq32].value =
+ attrib->u.v4.src_addr;
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (ipa_ofst_meq32[ofst_meq32] == -1) {
+ IPAERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq32[ofst_meq32];
+ eq_atrb->offset_meq_32[ofst_meq32].offset = 16;
+ eq_atrb->offset_meq_32[ofst_meq32].mask =
+ attrib->u.v4.dst_addr_mask;
+ eq_atrb->offset_meq_32[ofst_meq32].value =
+ attrib->u.v4.dst_addr;
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAERR("bad src port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->src_port_lo;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->src_port_hi;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAERR("bad dst port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->dst_port_lo;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->dst_port_hi;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->type;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->code;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+ 0xFFFFFFFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->spi;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->src_port;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->src_port;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->dst_port;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->dst_port;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_METADATA_COMPARE;
+ eq_atrb->metadata_meq32_present = 1;
+ eq_atrb->metadata_meq32.offset = 0;
+ eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
+ eq_atrb->metadata_meq32.value = attrib->meta_data;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+ *en_rule |= IPA_IS_FRAG;
+ eq_atrb->ipv4_frag_eq_present = 1;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+ /* -14 => offset of dst mac addr in Ethernet II hdr */
+ ipa_generate_flt_mac_addr_eq(eq_atrb, -14,
+ attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+ /* -8 => offset of src mac addr in Ethernet II hdr */
+ ipa_generate_flt_mac_addr_eq(eq_atrb, -8,
+ attrib->src_mac_addr_mask, attrib->src_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+ /* -22 => offset of dst mac addr in 802.3 hdr */
+ ipa_generate_flt_mac_addr_eq(eq_atrb, -22,
+ attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+ /* -16 => offset of src mac addr in 802.3 hdr */
+ ipa_generate_flt_mac_addr_eq(eq_atrb, -16,
+ attrib->src_mac_addr_mask, attrib->src_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+ if (ipa_ofst_meq32[ofst_meq32] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq32[ofst_meq32];
+ eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
+ eq_atrb->offset_meq_32[ofst_meq32].mask =
+ htons(attrib->ether_type);
+ eq_atrb->offset_meq_32[ofst_meq32].value =
+ htons(attrib->ether_type);
+ ofst_meq32++;
+ }
+ } else if (ip == IPA_IP_v6) {
+
+ /* v6 code below assumes no extension headers TODO: fix this */
+
+ /* error check */
+ if (attrib->attrib_mask & IPA_FLT_TOS ||
+ attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+ IPAERR("v4 attrib's specified for v6 rule\n");
+ return -EPERM;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+ *en_rule |= IPA_PROTOCOL_EQ;
+ eq_atrb->protocol_eq_present = 1;
+ eq_atrb->protocol_eq = attrib->u.v6.next_hdr;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->type;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->code;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+ 0xFFFFFFFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->spi;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->src_port;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->src_port;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->dst_port;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->dst_port;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAERR("bad src port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->src_port_lo;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->src_port_hi;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAERR("bad dst port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->dst_port_lo;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->dst_port_hi;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+ eq_atrb->offset_meq_128[ofst_meq128].offset = 8;
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
+ = attrib->u.v6.src_addr_mask[0];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
+ = attrib->u.v6.src_addr_mask[1];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
+ = attrib->u.v6.src_addr_mask[2];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+ = attrib->u.v6.src_addr_mask[3];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
+ = attrib->u.v6.src_addr[0];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
+ = attrib->u.v6.src_addr[1];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
+ = attrib->u.v6.src_addr[2];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+ 12) = attrib->u.v6.src_addr[3];
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+ eq_atrb->offset_meq_128[ofst_meq128].offset = 24;
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
+ = attrib->u.v6.dst_addr_mask[0];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
+ = attrib->u.v6.dst_addr_mask[1];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
+ = attrib->u.v6.dst_addr_mask[2];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+ = attrib->u.v6.dst_addr_mask[3];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
+ = attrib->u.v6.dst_addr[0];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
+ = attrib->u.v6.dst_addr[1];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
+ = attrib->u.v6.dst_addr[2];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+ 12) = attrib->u.v6.dst_addr[3];
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TC) {
+ *en_rule |= IPA_FLT_TC;
+ eq_atrb->tc_eq_present = 1;
+ eq_atrb->tc_eq = attrib->u.v6.tc;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+ eq_atrb->offset_meq_128[ofst_meq128].offset = 0;
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
+ = attrib->tos_mask << 20;
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
+ = 0;
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
+ = 0;
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+ = 0;
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
+ = attrib->tos_value << 20;
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
+ = 0;
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
+ = 0;
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+ 12) = 0;
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+ *en_rule |= IPA_FLT_FLOW_LABEL;
+ eq_atrb->fl_eq_present = 1;
+ eq_atrb->fl_eq = attrib->u.v6.flow_label;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_METADATA_COMPARE;
+ eq_atrb->metadata_meq32_present = 1;
+ eq_atrb->metadata_meq32.offset = 0;
+ eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
+ eq_atrb->metadata_meq32.value = attrib->meta_data;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+ *en_rule |= IPA_IS_FRAG;
+ eq_atrb->ipv4_frag_eq_present = 1;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+ /* -14 => offset of dst mac addr in Ethernet II hdr */
+ ipa_generate_flt_mac_addr_eq(eq_atrb, -14,
+ attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+ /* -8 => offset of src mac addr in Ethernet II hdr */
+ ipa_generate_flt_mac_addr_eq(eq_atrb, -8,
+ attrib->src_mac_addr_mask, attrib->src_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+ /* -22 => offset of dst mac addr in 802.3 hdr */
+ ipa_generate_flt_mac_addr_eq(eq_atrb, -22,
+ attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+ /* -16 => offset of src mac addr in 802.3 hdr */
+ ipa_generate_flt_mac_addr_eq(eq_atrb, -16,
+ attrib->src_mac_addr_mask, attrib->src_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+ if (ipa_ofst_meq32[ofst_meq32] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq32[ofst_meq32];
+ eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
+ eq_atrb->offset_meq_32[ofst_meq32].mask =
+ htons(attrib->ether_type);
+ eq_atrb->offset_meq_32[ofst_meq32].value =
+ htons(attrib->ether_type);
+ ofst_meq32++;
+ }
+
+ } else {
+ IPAERR("unsupported ip %d\n", ip);
+ return -EPERM;
+ }
+
+ /*
+ * default "rule" means no attributes set -> map to
+ * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
+ */
+ if (attrib->attrib_mask == 0) {
+ if (ipa_ofst_meq32[ofst_meq32] == -1) {
+ IPAERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq32[ofst_meq32];
+ eq_atrb->offset_meq_32[ofst_meq32].offset = 0;
+ eq_atrb->offset_meq_32[ofst_meq32].mask = 0;
+ eq_atrb->offset_meq_32[ofst_meq32].value = 0;
+ ofst_meq32++;
+ }
+
+ eq_atrb->rule_eq_bitmap = *en_rule;
+ eq_atrb->num_offset_meq_32 = ofst_meq32;
+ eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16;
+ eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32;
+ eq_atrb->num_offset_meq_128 = ofst_meq128;
+
+ return 0;
+}
+
+/**
+ * ipa2_cfg_ep - IPA end-point configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * This includes nat, header, mode, aggregation and route settings and is a one
+ * shot API to configure the IPA end-point fully
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg)
+{
+ int result = -EINVAL;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0 || ipa_ep_cfg == NULL) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ result = ipa2_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr);
+ if (result)
+ return result;
+
+ result = ipa2_cfg_ep_hdr_ext(clnt_hdl, &ipa_ep_cfg->hdr_ext);
+ if (result)
+ return result;
+
+ result = ipa2_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr);
+ if (result)
+ return result;
+
+ result = ipa2_cfg_ep_cfg(clnt_hdl, &ipa_ep_cfg->cfg);
+ if (result)
+ return result;
+
+ if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) {
+ result = ipa2_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat);
+ if (result)
+ return result;
+
+ result = ipa2_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode);
+ if (result)
+ return result;
+
+ result = ipa2_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route);
+ if (result)
+ return result;
+
+ result = ipa2_cfg_ep_deaggr(clnt_hdl, &ipa_ep_cfg->deaggr);
+ if (result)
+ return result;
+ } else {
+ result = ipa2_cfg_ep_metadata_mask(clnt_hdl,
+ &ipa_ep_cfg->metadata_mask);
+ if (result)
+ return result;
+ }
+
+ return 0;
+}
+
+const char *ipa_get_nat_en_str(enum ipa_nat_en_type nat_en)
+{
+ switch (nat_en) {
+ case (IPA_BYPASS_NAT):
+ return "NAT disabled";
+ case (IPA_SRC_NAT):
+ return "Source NAT";
+ case (IPA_DST_NAT):
+ return "Dst NAT";
+ }
+
+ return "undefined";
+}
+
+void _ipa_cfg_ep_nat_v1_1(u32 clnt_hdl,
+ const struct ipa_ep_cfg_nat *ep_nat)
+{
+ u32 reg_val = 0;
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_nat->nat_en,
+ IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT,
+ IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_NAT_N_OFST_v1_1(clnt_hdl),
+ reg_val);
+}
+
+void _ipa_cfg_ep_nat_v2_0(u32 clnt_hdl,
+ const struct ipa_ep_cfg_nat *ep_nat)
+{
+ u32 reg_val = 0;
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_nat->nat_en,
+ IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT,
+ IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_NAT_N_OFST_v2_0(clnt_hdl),
+ reg_val);
+}
+
+/**
+ * ipa2_cfg_ep_nat() - IPA end-point NAT configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat)
+{
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0 || ep_nat == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl,
+ ipa_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
+ IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ IPADBG("pipe=%d, nat_en=%d(%s)\n",
+ clnt_hdl,
+ ep_nat->nat_en,
+ ipa_get_nat_en_str(ep_nat->nat_en));
+
+ /* copy over EP cfg */
+ ipa_ctx->ep[clnt_hdl].cfg.nat = *ep_nat;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ ipa_ctx->ctrl->ipa_cfg_ep_nat(clnt_hdl, ep_nat);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
+static void _ipa_cfg_ep_status_v1_1(u32 clnt_hdl,
+ const struct ipa_ep_cfg_status *ep_status)
+{
+ IPADBG("Not supported for version 1.1\n");
+}
+
+static void _ipa_cfg_ep_status_v2_0(u32 clnt_hdl,
+ const struct ipa_ep_cfg_status *ep_status)
+{
+ u32 reg_val = 0;
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_status->status_en,
+ IPA_ENDP_STATUS_n_STATUS_EN_SHFT,
+ IPA_ENDP_STATUS_n_STATUS_EN_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_status->status_ep,
+ IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT,
+ IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_STATUS_n_OFST(clnt_hdl),
+ reg_val);
+}
+
+/**
+ * ipa2_cfg_ep_status() - IPA end-point status configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_cfg_ep_status(u32 clnt_hdl, const struct ipa_ep_cfg_status *ep_status)
+{
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0 || ep_status == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl,
+ ipa_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+
+ IPADBG("pipe=%d, status_en=%d status_ep=%d\n",
+ clnt_hdl,
+ ep_status->status_en,
+ ep_status->status_ep);
+
+ /* copy over EP cfg */
+ ipa_ctx->ep[clnt_hdl].status = *ep_status;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ ipa_ctx->ctrl->ipa_cfg_ep_status(clnt_hdl, ep_status);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
+static void _ipa_cfg_ep_cfg_v1_1(u32 clnt_hdl,
+ const struct ipa_ep_cfg_cfg *cfg)
+{
+ IPADBG("Not supported for version 1.1\n");
+}
+
+static void _ipa_cfg_ep_cfg_v2_0(u32 clnt_hdl,
+ const struct ipa_ep_cfg_cfg *cfg)
+{
+ u32 reg_val = 0;
+
+ IPA_SETFIELD_IN_REG(reg_val, cfg->frag_offload_en,
+ IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT,
+ IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK);
+ IPA_SETFIELD_IN_REG(reg_val, cfg->cs_offload_en,
+ IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT,
+ IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK);
+ IPA_SETFIELD_IN_REG(reg_val, cfg->cs_metadata_hdr_offset,
+ IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT,
+ IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_CFG_n_OFST(clnt_hdl),
+ reg_val);
+}
+
+/**
+ * ipa2_cfg_ep_cfg() - IPA end-point cfg configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg)
+{
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0 || cfg == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl,
+ ipa_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+
+ IPADBG("pipe=%d, frag_ofld_en=%d cs_ofld_en=%d mdata_hdr_ofst=%d\n",
+ clnt_hdl,
+ cfg->frag_offload_en,
+ cfg->cs_offload_en,
+ cfg->cs_metadata_hdr_offset);
+
+ /* copy over EP cfg */
+ ipa_ctx->ep[clnt_hdl].cfg.cfg = *cfg;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ ipa_ctx->ctrl->ipa_cfg_ep_cfg(clnt_hdl, cfg);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
+static void _ipa_cfg_ep_metadata_mask_v1_1(u32 clnt_hdl,
+ const struct ipa_ep_cfg_metadata_mask *metadata_mask)
+{
+ IPADBG("Not supported for version 1.1\n");
+}
+
+static void _ipa_cfg_ep_metadata_mask_v2_0(u32 clnt_hdl,
+ const struct ipa_ep_cfg_metadata_mask *metadata_mask)
+{
+ u32 reg_val = 0;
+
+ IPA_SETFIELD_IN_REG(reg_val, metadata_mask->metadata_mask,
+ IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT,
+ IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HDR_METADATA_MASK_n_OFST(clnt_hdl),
+ reg_val);
+}
+
+/**
+ * ipa2_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_cfg_ep_metadata_mask(u32 clnt_hdl,
+ const struct ipa_ep_cfg_metadata_mask *metadata_mask)
+{
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0 || metadata_mask == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl,
+ ipa_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+
+ IPADBG("pipe=%d, metadata_mask=0x%x\n",
+ clnt_hdl,
+ metadata_mask->metadata_mask);
+
+ /* copy over EP cfg */
+ ipa_ctx->ep[clnt_hdl].cfg.metadata_mask = *metadata_mask;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ ipa_ctx->ctrl->ipa_cfg_ep_metadata_mask(clnt_hdl, metadata_mask);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
+void _ipa_cfg_ep_hdr_v1_1(u32 pipe_number,
+ const struct ipa_ep_cfg_hdr *ep_hdr)
+{
+ u32 val = 0;
+
+ val = IPA_SETFIELD(ep_hdr->hdr_len,
+ IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT,
+ IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK) |
+ IPA_SETFIELD(ep_hdr->hdr_ofst_metadata_valid,
+ IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT,
+ IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK) |
+ IPA_SETFIELD(ep_hdr->hdr_ofst_metadata,
+ IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT,
+ IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK) |
+ IPA_SETFIELD(ep_hdr->hdr_additional_const_len,
+ IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT,
+ IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK) |
+ IPA_SETFIELD(ep_hdr->hdr_ofst_pkt_size_valid,
+ IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT,
+ IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK) |
+ IPA_SETFIELD(ep_hdr->hdr_ofst_pkt_size,
+ IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT,
+ IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK) |
+ IPA_SETFIELD(ep_hdr->hdr_a5_mux,
+ IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT,
+ IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK);
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HDR_N_OFST_v1_1(pipe_number), val);
+}
+
+void _ipa_cfg_ep_hdr_v2_0(u32 pipe_number,
+ const struct ipa_ep_cfg_hdr *ep_hdr)
+{
+ u32 reg_val = 0;
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_metadata_reg_valid,
+ IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_SHFT_v2,
+ IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_BMSK_v2);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_remove_additional,
+ IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_SHFT_v2,
+ IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_BMSK_v2);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_a5_mux,
+ IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT,
+ IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_pkt_size,
+ IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT,
+ IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_pkt_size_valid,
+ IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT,
+ IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_additional_const_len,
+ IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT,
+ IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_metadata,
+ IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT,
+ IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_metadata_valid,
+ IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT,
+ IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_len,
+ IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT,
+ IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HDR_N_OFST_v2_0(pipe_number), reg_val);
+}
+
+/**
+ * ipa2_cfg_ep_hdr() - IPA end-point header configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr)
+{
+ struct ipa_ep_context *ep;
+
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0 || ep_hdr == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+ IPADBG("pipe=%d remove_additional=%d, a5_mux=%d, ofst_pkt_size=0x%x\n",
+ clnt_hdl,
+ ep_hdr->hdr_remove_additional,
+ ep_hdr->hdr_a5_mux,
+ ep_hdr->hdr_ofst_pkt_size);
+
+ IPADBG("ofst_pkt_size_valid=%d, additional_const_len=0x%x\n",
+ ep_hdr->hdr_ofst_pkt_size_valid,
+ ep_hdr->hdr_additional_const_len);
+
+ IPADBG("ofst_metadata=0x%x, ofst_metadata_valid=%d, len=0x%x",
+ ep_hdr->hdr_ofst_metadata,
+ ep_hdr->hdr_ofst_metadata_valid,
+ ep_hdr->hdr_len);
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+
+ /* copy over EP cfg */
+ ep->cfg.hdr = *ep_hdr;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ ipa_ctx->ctrl->ipa_cfg_ep_hdr(clnt_hdl, &ep->cfg.hdr);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
+static int _ipa_cfg_ep_hdr_ext_v1_1(u32 clnt_hdl,
+ const struct ipa_ep_cfg_hdr_ext *ep_hdr)
+{
+ IPADBG("Not supported for version 1.1\n");
+ return 0;
+}
+
+static int _ipa_cfg_ep_hdr_ext(u32 clnt_hdl,
+ const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext, u32 reg_val)
+{
+ u8 hdr_endianness = ep_hdr_ext->hdr_little_endian ? 0 : 1;
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad_offset,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_payload_len_inc_padding,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad_valid,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, hdr_endianness,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HDR_EXT_n_OFST_v2_0(clnt_hdl), reg_val);
+
+ return 0;
+}
+
+static int _ipa_cfg_ep_hdr_ext_v2_0(u32 clnt_hdl,
+ const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
+{
+ u32 reg_val = 0;
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_0);
+
+ return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val);
+}
+
+static int _ipa_cfg_ep_hdr_ext_v2_5(u32 clnt_hdl,
+ const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
+{
+ u32 reg_val = 0;
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_5);
+
+ return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val);
+
+}
+
+static int _ipa_cfg_ep_hdr_ext_v2_6L(u32 clnt_hdl,
+ const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
+{
+ u32 reg_val = 0;
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_5);
+
+ return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val);
+
+}
+
+/**
+ * ipa2_cfg_ep_hdr_ext() - IPA end-point extended header configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ep_hdr_ext: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_cfg_ep_hdr_ext(u32 clnt_hdl,
+ const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
+{
+ struct ipa_ep_context *ep;
+
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0 || ep_hdr_ext == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+
+ IPADBG("pipe=%d hdr_pad_to_alignment=%d\n",
+ clnt_hdl,
+ ep_hdr_ext->hdr_pad_to_alignment);
+
+ IPADBG("hdr_total_len_or_pad_offset=%d\n",
+ ep_hdr_ext->hdr_total_len_or_pad_offset);
+
+ IPADBG("hdr_payload_len_inc_padding=%d hdr_total_len_or_pad=%d\n",
+ ep_hdr_ext->hdr_payload_len_inc_padding,
+ ep_hdr_ext->hdr_total_len_or_pad);
+
+ IPADBG("hdr_total_len_or_pad_valid=%d hdr_little_endian=%d\n",
+ ep_hdr_ext->hdr_total_len_or_pad_valid,
+ ep_hdr_ext->hdr_little_endian);
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+
+ /* copy over EP cfg */
+ ep->cfg.hdr_ext = *ep_hdr_ext;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ ipa_ctx->ctrl->ipa_cfg_ep_hdr_ext(clnt_hdl, &ep->cfg.hdr_ext);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
+/**
+ * ipa2_cfg_ep_hdr() - IPA end-point Control configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg_ctrl: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa2_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
+{
+ u32 reg_val = 0;
+
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ep_ctrl == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ IPADBG("pipe=%d ep_suspend=%d, ep_delay=%d\n",
+ clnt_hdl,
+ ep_ctrl->ipa_ep_suspend,
+ ep_ctrl->ipa_ep_delay);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_ctrl->ipa_ep_suspend,
+ IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_SHFT,
+ IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_ctrl->ipa_ep_delay,
+ IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT,
+ IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_CTRL_N_OFST(clnt_hdl), reg_val);
+
+ return 0;
+
+}
+
+/**
+ * ipa_cfg_aggr_cntr_granularity() - granularity of the AGGR timer configuration
+ * @aggr_granularity: [in] defines the granularity of AGGR timers
+ * number of units of 1/32msec
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_cfg_aggr_cntr_granularity(u8 aggr_granularity)
+{
+ u32 reg_val = 0;
+
+ if (aggr_granularity <= IPA_AGGR_GRAN_MIN ||
+ aggr_granularity > IPA_AGGR_GRAN_MAX) {
+ IPAERR("bad param, aggr_granularity = %d\n",
+ aggr_granularity);
+ return -EINVAL;
+ }
+ IPADBG("aggr_granularity=%d\n", aggr_granularity);
+
+ reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_COUNTER_CFG_OFST);
+ reg_val = (reg_val & ~IPA_COUNTER_CFG_AGGR_GRAN_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, aggr_granularity - 1,
+ IPA_COUNTER_CFG_AGGR_GRAN_SHFT,
+ IPA_COUNTER_CFG_AGGR_GRAN_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_COUNTER_CFG_OFST, reg_val);
+
+ return 0;
+
+}
+EXPORT_SYMBOL(ipa_cfg_aggr_cntr_granularity);
+
+/**
+ * ipa_cfg_eot_coal_cntr_granularity() - granularity of EOT_COAL timer
+ * configuration
+ * @eot_coal_granularity: defines the granularity of EOT_COAL timers
+ * number of units of 1/32msec
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_cfg_eot_coal_cntr_granularity(u8 eot_coal_granularity)
+{
+ u32 reg_val = 0;
+
+ if (eot_coal_granularity <= IPA_EOT_COAL_GRAN_MIN ||
+ eot_coal_granularity > IPA_EOT_COAL_GRAN_MAX) {
+ IPAERR("bad parm, eot_coal_granularity = %d\n",
+ eot_coal_granularity);
+ return -EINVAL;
+ }
+ IPADBG("eot_coal_granularity=%d\n", eot_coal_granularity);
+
+ reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_COUNTER_CFG_OFST);
+ reg_val = (reg_val & ~IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, eot_coal_granularity - 1,
+ IPA_COUNTER_CFG_EOT_COAL_GRAN_SHFT,
+ IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_COUNTER_CFG_OFST, reg_val);
+
+ return 0;
+
+}
+EXPORT_SYMBOL(ipa_cfg_eot_coal_cntr_granularity);
+
+const char *ipa_get_mode_type_str(enum ipa_mode_type mode)
+{
+ switch (mode) {
+ case (IPA_BASIC):
+ return "Basic";
+ case (IPA_ENABLE_FRAMING_HDLC):
+ return "HDLC framing";
+ case (IPA_ENABLE_DEFRAMING_HDLC):
+ return "HDLC de-framing";
+ case (IPA_DMA):
+ return "DMA";
+ }
+
+ return "undefined";
+}
+
+void _ipa_cfg_ep_mode_v1_1(u32 pipe_number, u32 dst_pipe_number,
+ const struct ipa_ep_cfg_mode *ep_mode)
+{
+ u32 reg_val = 0;
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_mode->mode,
+ IPA_ENDP_INIT_MODE_N_MODE_SHFT,
+ IPA_ENDP_INIT_MODE_N_MODE_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, dst_pipe_number,
+ IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v1_1,
+ IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v1_1);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_MODE_N_OFST_v1_1(pipe_number), reg_val);
+}
+
+void _ipa_cfg_ep_mode_v2_0(u32 pipe_number, u32 dst_pipe_number,
+ const struct ipa_ep_cfg_mode *ep_mode)
+{
+ u32 reg_val = 0;
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_mode->mode,
+ IPA_ENDP_INIT_MODE_N_MODE_SHFT,
+ IPA_ENDP_INIT_MODE_N_MODE_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, dst_pipe_number,
+ IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v2_0,
+ IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v2_0);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_MODE_N_OFST_v2_0(pipe_number), reg_val);
+}
+
+/**
+ * ipa2_cfg_ep_mode() - IPA end-point mode configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode)
+{
+ int ep;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0 || ep_mode == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
+ IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ ep = ipa2_get_ep_mapping(ep_mode->dst);
+ if (ep == -1 && ep_mode->mode == IPA_DMA) {
+ IPAERR("dst %d does not exist\n", ep_mode->dst);
+ return -EINVAL;
+ }
+
+ WARN_ON(ep_mode->mode == IPA_DMA && IPA_CLIENT_IS_PROD(ep_mode->dst));
+
+ if (!IPA_CLIENT_IS_CONS(ep_mode->dst))
+ ep = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+
+ IPADBG("pipe=%d mode=%d(%s), dst_client_number=%d",
+ clnt_hdl,
+ ep_mode->mode,
+ ipa_get_mode_type_str(ep_mode->mode),
+ ep_mode->dst);
+
+ /* copy over EP cfg */
+ ipa_ctx->ep[clnt_hdl].cfg.mode = *ep_mode;
+ ipa_ctx->ep[clnt_hdl].dst_pipe_index = ep;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ ipa_ctx->ctrl->ipa_cfg_ep_mode(clnt_hdl,
+ ipa_ctx->ep[clnt_hdl].dst_pipe_index,
+ ep_mode);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
+const char *get_aggr_enable_str(enum ipa_aggr_en_type aggr_en)
+{
+ switch (aggr_en) {
+ case (IPA_BYPASS_AGGR):
+ return "no aggregation";
+ case (IPA_ENABLE_AGGR):
+ return "aggregation enabled";
+ case (IPA_ENABLE_DEAGGR):
+ return "de-aggregation enabled";
+ }
+
+ return "undefined";
+}
+
+const char *get_aggr_type_str(enum ipa_aggr_type aggr_type)
+{
+ switch (aggr_type) {
+ case (IPA_MBIM_16):
+ return "MBIM_16";
+ case (IPA_HDLC):
+ return "HDLC";
+ case (IPA_TLP):
+ return "TLP";
+ case (IPA_RNDIS):
+ return "RNDIS";
+ case (IPA_GENERIC):
+ return "GENERIC";
+ case (IPA_QCMAP):
+ return "QCMAP";
+ }
+ return "undefined";
+}
+
+void _ipa_cfg_ep_aggr_v1_1(u32 pipe_number,
+ const struct ipa_ep_cfg_aggr *ep_aggr)
+{
+ u32 reg_val = 0;
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_en,
+ IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT,
+ IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr,
+ IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT,
+ IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_byte_limit,
+ IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT,
+ IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_time_limit,
+ IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT,
+ IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_AGGR_N_OFST_v1_1(pipe_number), reg_val);
+}
+
+void _ipa_cfg_ep_aggr_v2_0(u32 pipe_number,
+ const struct ipa_ep_cfg_aggr *ep_aggr)
+{
+ u32 reg_val = 0;
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_en,
+ IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT,
+ IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr,
+ IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT,
+ IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_byte_limit,
+ IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT,
+ IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_time_limit,
+ IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT,
+ IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_pkt_limit,
+ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_sw_eof_active,
+ IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_AGGR_N_OFST_v2_0(pipe_number), reg_val);
+}
+
+/**
+ * ipa2_cfg_ep_aggr() - IPA end-point aggregation configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr)
+{
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0 || ep_aggr == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+
+ IPADBG("pipe=%d en=%d(%s), type=%d(%s), byte_limit=%d, time_limit=%d\n",
+ clnt_hdl,
+ ep_aggr->aggr_en,
+ get_aggr_enable_str(ep_aggr->aggr_en),
+ ep_aggr->aggr,
+ get_aggr_type_str(ep_aggr->aggr),
+ ep_aggr->aggr_byte_limit,
+ ep_aggr->aggr_time_limit);
+
+ /* copy over EP cfg */
+ ipa_ctx->ep[clnt_hdl].cfg.aggr = *ep_aggr;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ ipa_ctx->ctrl->ipa_cfg_ep_aggr(clnt_hdl, ep_aggr);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
+void _ipa_cfg_ep_route_v1_1(u32 pipe_index, u32 rt_tbl_index)
+{
+ int reg_val = 0;
+
+ IPA_SETFIELD_IN_REG(reg_val, rt_tbl_index,
+ IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT,
+ IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_ROUTE_N_OFST_v1_1(pipe_index),
+ reg_val);
+}
+
+void _ipa_cfg_ep_route_v2_0(u32 pipe_index, u32 rt_tbl_index)
+{
+ int reg_val = 0;
+
+ IPA_SETFIELD_IN_REG(reg_val, rt_tbl_index,
+ IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT,
+ IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_ROUTE_N_OFST_v2_0(pipe_index),
+ reg_val);
+}
+
+/**
+ * ipa2_cfg_ep_route() - IPA end-point routing configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route)
+{
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0 || ep_route == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
+ IPAERR("ROUTE does not apply to IPA out EP %d\n",
+ clnt_hdl);
+ return -EINVAL;
+ }
+
+ /*
+ * if DMA mode was configured previously for this EP, return with
+ * success
+ */
+ if (ipa_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) {
+ IPADBG("DMA enabled for ep %d, dst pipe is part of DMA\n",
+ clnt_hdl);
+ return 0;
+ }
+
+ if (ep_route->rt_tbl_hdl)
+ IPAERR("client specified non-zero RT TBL hdl - ignore it\n");
+
+ IPADBG("pipe=%d, rt_tbl_hdl=%d\n",
+ clnt_hdl,
+ ep_route->rt_tbl_hdl);
+
+ /* always use "default" routing table when programming EP ROUTE reg */
+ if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0)
+ ipa_ctx->ep[clnt_hdl].rt_tbl_idx =
+ IPA_MEM_PART(v4_apps_rt_index_lo);
+ else
+ ipa_ctx->ep[clnt_hdl].rt_tbl_idx = 0;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ ipa_ctx->ctrl->ipa_cfg_ep_route(clnt_hdl,
+ ipa_ctx->ep[clnt_hdl].rt_tbl_idx);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
+void _ipa_cfg_ep_holb_v1_1(u32 pipe_number,
+ const struct ipa_ep_cfg_holb *ep_holb)
+{
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v1_1(pipe_number),
+ ep_holb->en);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v1_1(pipe_number),
+ (u16)ep_holb->tmr_val);
+}
+
+void _ipa_cfg_ep_holb_v2_0(u32 pipe_number,
+ const struct ipa_ep_cfg_holb *ep_holb)
+{
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number),
+ ep_holb->en);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number),
+ (u16)ep_holb->tmr_val);
+}
+
+void _ipa_cfg_ep_holb_v2_5(u32 pipe_number,
+ const struct ipa_ep_cfg_holb *ep_holb)
+{
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number),
+ ep_holb->en);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number),
+ ep_holb->tmr_val);
+}
+
+void _ipa_cfg_ep_holb_v2_6L(u32 pipe_number,
+ const struct ipa_ep_cfg_holb *ep_holb)
+{
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number),
+ ep_holb->en);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number),
+ ep_holb->tmr_val);
+}
+
+/**
+ * ipa2_cfg_ep_holb() - IPA end-point holb configuration
+ *
+ * If an IPA producer pipe is full, IPA HW by default will block
+ * indefinitely till space opens up. During this time no packets
+ * including those from unrelated pipes will be processed. Enabling
+ * HOLB means IPA HW will be allowed to drop packets as/when needed
+ * and indefinite blocking is avoided.
+ *
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa2_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb)
+{
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0 || ep_holb == NULL ||
+ ep_holb->tmr_val > ipa_ctx->ctrl->max_holb_tmr_val ||
+ ep_holb->en > 1) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) {
+ IPAERR("HOLB does not apply to IPA in EP %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ if (!ipa_ctx->ctrl->ipa_cfg_ep_holb) {
+ IPAERR("HOLB is not supported for this IPA core\n");
+ return -EINVAL;
+ }
+
+ ipa_ctx->ep[clnt_hdl].holb = *ep_holb;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ ipa_ctx->ctrl->ipa_cfg_ep_holb(clnt_hdl, ep_holb);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ IPADBG("cfg holb %u ep=%d tmr=%d\n", ep_holb->en, clnt_hdl,
+ ep_holb->tmr_val);
+
+ return 0;
+}
+
+/**
+ * ipa2_cfg_ep_holb_by_client() - IPA end-point holb configuration
+ *
+ * Wrapper function for ipa_cfg_ep_holb() with client name instead of
+ * client handle. This function is used for clients that does not have
+ * client handle.
+ *
+ * @client: [in] client name
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa2_cfg_ep_holb_by_client(enum ipa_client_type client,
+ const struct ipa_ep_cfg_holb *ep_holb)
+{
+ return ipa2_cfg_ep_holb(ipa2_get_ep_mapping(client), ep_holb);
+}
+
+static int _ipa_cfg_ep_deaggr_v1_1(u32 clnt_hdl,
+ const struct ipa_ep_cfg_deaggr *ep_deaggr)
+{
+ IPADBG("Not supported for version 1.1\n");
+ return 0;
+}
+
+static int _ipa_cfg_ep_deaggr_v2_0(u32 clnt_hdl,
+ const struct ipa_ep_cfg_deaggr *ep_deaggr)
+{
+ u32 reg_val = 0;
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->deaggr_hdr_len,
+ IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT,
+ IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->packet_offset_valid,
+ IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT,
+ IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->packet_offset_location,
+ IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT,
+ IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK);
+
+ IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->max_packet_len,
+ IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT,
+ IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_DEAGGR_n_OFST_v2_0(clnt_hdl), reg_val);
+
+ return 0;
+}
+
+/**
+ * ipa2_cfg_ep_deaggr() - IPA end-point deaggregation configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ep_deaggr: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_cfg_ep_deaggr(u32 clnt_hdl,
+ const struct ipa_ep_cfg_deaggr *ep_deaggr)
+{
+ struct ipa_ep_context *ep;
+
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0 || ep_deaggr == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+
+ IPADBG("pipe=%d deaggr_hdr_len=%d\n",
+ clnt_hdl,
+ ep_deaggr->deaggr_hdr_len);
+
+ IPADBG("packet_offset_valid=%d\n",
+ ep_deaggr->packet_offset_valid);
+
+ IPADBG("packet_offset_location=%d max_packet_len=%d\n",
+ ep_deaggr->packet_offset_location,
+ ep_deaggr->max_packet_len);
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+
+ /* copy over EP cfg */
+ ep->cfg.deaggr = *ep_deaggr;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ ipa_ctx->ctrl->ipa_cfg_ep_deaggr(clnt_hdl, &ep->cfg.deaggr);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
+static void _ipa_cfg_ep_metadata_v1_1(u32 pipe_number,
+ const struct ipa_ep_cfg_metadata *meta)
+{
+ IPADBG("Not supported for version 1.1\n");
+}
+
+static void _ipa_cfg_ep_metadata_v2_0(u32 pipe_number,
+ const struct ipa_ep_cfg_metadata *meta)
+{
+ u32 reg_val = 0;
+
+ IPA_SETFIELD_IN_REG(reg_val, meta->qmap_id,
+ IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT,
+ IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK);
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HDR_METADATA_n_OFST(pipe_number),
+ reg_val);
+}
+
+/**
+ * ipa2_cfg_ep_metadata() - IPA end-point metadata configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md)
+{
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0 || ep_md == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+
+ IPADBG("pipe=%d, mux id=%d\n", clnt_hdl, ep_md->qmap_id);
+
+ /* copy over EP cfg */
+ ipa_ctx->ep[clnt_hdl].cfg.meta = *ep_md;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ ipa_ctx->ctrl->ipa_cfg_ep_metadata(clnt_hdl, ep_md);
+ ipa_ctx->ep[clnt_hdl].cfg.hdr.hdr_metadata_reg_valid = 1;
+ ipa_ctx->ctrl->ipa_cfg_ep_hdr(clnt_hdl, &ipa_ctx->ep[clnt_hdl].cfg.hdr);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa2_cfg_ep_metadata);
+
+int ipa2_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
+{
+ struct ipa_ep_cfg_metadata meta;
+ struct ipa_ep_context *ep;
+ int ipa_ep_idx;
+ int result = -EINVAL;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (param_in->client >= IPA_CLIENT_MAX) {
+ IPAERR("bad parm client:%d\n", param_in->client);
+ goto fail;
+ }
+
+ ipa_ep_idx = ipa2_get_ep_mapping(param_in->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ goto fail;
+ }
+
+ ep = &ipa_ctx->ep[ipa_ep_idx];
+ if (!ep->valid) {
+ IPAERR("EP not allocated.\n");
+ goto fail;
+ }
+
+ meta.qmap_id = param_in->qmap_id;
+ if (param_in->client == IPA_CLIENT_USB_PROD ||
+ param_in->client == IPA_CLIENT_HSIC1_PROD ||
+ param_in->client == IPA_CLIENT_ODU_PROD) {
+ result = ipa2_cfg_ep_metadata(ipa_ep_idx, &meta);
+ } else if (param_in->client == IPA_CLIENT_WLAN1_PROD) {
+ ipa_ctx->ep[ipa_ep_idx].cfg.meta = meta;
+ result = ipa_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id);
+ if (result)
+ IPAERR("qmap_id %d write failed on ep=%d\n",
+ meta.qmap_id, ipa_ep_idx);
+ result = 0;
+ }
+
+fail:
+ return result;
+}
+
+/**
+ * ipa_dump_buff_internal() - dumps buffer for debug purposes
+ * @base: buffer base address
+ * @phy_base: buffer physical base address
+ * @size: size of the buffer
+ */
+void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size)
+{
+ int i;
+ u32 *cur = (u32 *)base;
+ u8 *byt;
+
+ IPADBG("system phys addr=%pa len=%u\n", &phy_base, size);
+ for (i = 0; i < size / 4; i++) {
+ byt = (u8 *)(cur + i);
+ IPADBG("%2d %08x %02x %02x %02x %02x\n", i, *(cur + i),
+ byt[0], byt[1], byt[2], byt[3]);
+ }
+ IPADBG("END\n");
+}
+
+/**
+ * void ipa_rx_timeout_min_max_calc() - calc min max timeout time of rx polling
+ * @time: time fom dtsi entry or from debugfs file system
+ * @min: rx polling min timeout
+ * @max: rx polling max timeout
+ * Maximum time could be of 10Msec allowed.
+ */
+void ipa_rx_timeout_min_max_calc(u32 *min, u32 *max, s8 time)
+{
+ if ((time >= MIN_RX_POLL_TIME) &&
+ (time <= MAX_RX_POLL_TIME)) {
+ *min = (time * MSEC) + LOWER_CUTOFF;
+ *max = (time * MSEC) + UPPER_CUTOFF;
+ } else {
+ /* Setting up the default min max time */
+ IPADBG("Setting up default rx polling timeout\n");
+ *min = (MIN_RX_POLL_TIME * MSEC) +
+ LOWER_CUTOFF;
+ *max = (MIN_RX_POLL_TIME * MSEC) +
+ UPPER_CUTOFF;
+ }
+ IPADBG("Rx polling timeout Min = %u len = %u\n", *min, *max);
+}
+
+/**
+ * ipa_pipe_mem_init() - initialize the pipe memory
+ * @start_ofst: start offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ * -ENOMEM: no memory
+ */
+int ipa_pipe_mem_init(u32 start_ofst, u32 size)
+{
+ int res;
+ u32 aligned_start_ofst;
+ u32 aligned_size;
+ struct gen_pool *pool;
+
+ if (!size) {
+ IPAERR("no IPA pipe memory allocated\n");
+ goto fail;
+ }
+
+ aligned_start_ofst = IPA_HW_TABLE_ALIGNMENT(start_ofst);
+ aligned_size = size - (aligned_start_ofst - start_ofst);
+
+ IPADBG("start_ofst=%u aligned_start_ofst=%u size=%u aligned_size=%u\n",
+ start_ofst, aligned_start_ofst, size, aligned_size);
+
+ /* allocation order of 8 i.e. 128 bytes, global pool */
+ pool = gen_pool_create(8, -1);
+ if (!pool) {
+ IPAERR("Failed to create a new memory pool.\n");
+ goto fail;
+ }
+
+ res = gen_pool_add(pool, aligned_start_ofst, aligned_size, -1);
+ if (res) {
+ IPAERR("Failed to add memory to IPA pipe pool\n");
+ goto err_pool_add;
+ }
+
+ ipa_ctx->pipe_mem_pool = pool;
+ return 0;
+
+err_pool_add:
+ gen_pool_destroy(pool);
+fail:
+ return -ENOMEM;
+}
+
+/**
+ * ipa_pipe_mem_alloc() - allocate pipe memory
+ * @ofst: offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa_pipe_mem_alloc(u32 *ofst, u32 size)
+{
+ u32 vaddr;
+ int res = -1;
+
+ if (!ipa_ctx->pipe_mem_pool || !size) {
+ IPAERR("failed size=%u pipe_mem_pool=%p\n", size,
+ ipa_ctx->pipe_mem_pool);
+ return res;
+ }
+
+ vaddr = gen_pool_alloc(ipa_ctx->pipe_mem_pool, size);
+
+ if (vaddr) {
+ *ofst = vaddr;
+ res = 0;
+ IPADBG("size=%u ofst=%u\n", size, vaddr);
+ } else {
+ IPAERR("size=%u failed\n", size);
+ }
+
+ return res;
+}
+
+/**
+ * ipa_pipe_mem_free() - free pipe memory
+ * @ofst: offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa_pipe_mem_free(u32 ofst, u32 size)
+{
+ IPADBG("size=%u ofst=%u\n", size, ofst);
+ if (ipa_ctx->pipe_mem_pool && size)
+ gen_pool_free(ipa_ctx->pipe_mem_pool, ofst, size);
+ return 0;
+}
+
+/**
+ * ipa2_set_aggr_mode() - Set the aggregation mode which is a global setting
+ * @mode: [in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
+ * etc
+ *
+ * Returns: 0 on success
+ */
+int ipa2_set_aggr_mode(enum ipa_aggr_mode mode)
+{
+ u32 reg_val;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST);
+ ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, (mode & 0x1) |
+ (reg_val & 0xfffffffe));
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+/**
+ * ipa2_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
+ * mode
+ * @sig: [in] the first 3 bytes of QCNCM NDP signature (expected to be
+ * "QND")
+ *
+ * Set the NDP signature used for QCNCM aggregation mode. The fourth byte
+ * (expected to be 'P') needs to be set using the header addition mechanism
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa2_set_qcncm_ndp_sig(char sig[3])
+{
+ u32 reg_val;
+
+ if (sig == NULL) {
+ IPAERR("bad argument for ipa_set_qcncm_ndp_sig/n");
+ return -EINVAL;
+ }
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST);
+ ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, sig[0] << 20 |
+ (sig[1] << 12) | (sig[2] << 4) |
+ (reg_val & 0xf000000f));
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+/**
+ * ipa2_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
+ * configuration
+ * @enable: [in] true for single NDP/MBIM; false otherwise
+ *
+ * Returns: 0 on success
+ */
+int ipa2_set_single_ndp_per_mbim(bool enable)
+{
+ u32 reg_val;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST);
+ ipa_write_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST,
+ (enable & 0x1) | (reg_val & 0xfffffffe));
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+/**
+ * ipa_set_hw_timer_fix_for_mbim_aggr() - Enable/disable HW timer fix
+ * for MBIM aggregation.
+ * @enable: [in] true for enable HW fix; false otherwise
+ *
+ * Returns: 0 on success
+ */
+int ipa_set_hw_timer_fix_for_mbim_aggr(bool enable)
+{
+ u32 reg_val;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST);
+ ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST,
+ (enable << IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_SHFT) |
+ (reg_val & ~IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_BMSK));
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return 0;
+}
+EXPORT_SYMBOL(ipa_set_hw_timer_fix_for_mbim_aggr);
+
+/**
+ * ipa_straddle_boundary() - Checks whether a memory buffer straddles a boundary
+ * @start: start address of the memory buffer
+ * @end: end address of the memory buffer
+ * @boundary: boundary
+ *
+ * Return value:
+ * 1: if the interval [start, end] straddles boundary
+ * 0: otherwise
+ */
+int ipa_straddle_boundary(u32 start, u32 end, u32 boundary)
+{
+ u32 next_start;
+ u32 prev_end;
+
+ IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary);
+
+ next_start = (start + (boundary - 1)) & ~(boundary - 1);
+ prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary;
+
+ while (next_start < prev_end)
+ next_start += boundary;
+
+ if (next_start == prev_end)
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * ipa2_bam_reg_dump() - Dump selected BAM registers for IPA and DMA-BAM
+ *
+ * Function is rate limited to avoid flooding kernel log buffer
+ */
+void ipa2_bam_reg_dump(void)
+{
+ static DEFINE_RATELIMIT_STATE(_rs, 500*HZ, 1);
+
+ if (__ratelimit(&_rs)) {
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ pr_err("IPA BAM START\n");
+ if (ipa_ctx->ipa_hw_type < IPA_HW_v2_0) {
+ sps_get_bam_debug_info(ipa_ctx->bam_handle, 5,
+ 511950, 0, 0);
+ sps_get_bam_debug_info(ipa_ctx->bam_handle, 93, 0,
+ 0, 0);
+ } else {
+ sps_get_bam_debug_info(ipa_ctx->bam_handle, 93,
+ (SPS_BAM_PIPE(ipa_get_ep_mapping(IPA_CLIENT_USB_CONS))
+ |
+ SPS_BAM_PIPE(ipa_get_ep_mapping(IPA_CLIENT_USB_PROD))),
+ 0, 2);
+ }
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ }
+}
+
+static void ipa_init_mem_partition_v2(void)
+{
+ IPADBG("Memory partition IPA 2\n");
+ IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST;
+ IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE;
+ IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
+ IPA_MEM_PART(nat_size));
+
+ IPA_MEM_PART(ofst_start) = IPA_MEM_v2_RAM_OFST_START;
+ IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start));
+
+ IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_RAM_V4_FLT_OFST;
+ IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_RAM_V4_FLT_SIZE;
+ IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR;
+ IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+ IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size),
+ IPA_MEM_PART(v4_flt_size_ddr));
+
+ IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_RAM_V6_FLT_OFST;
+ IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_RAM_V6_FLT_SIZE;
+ IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR;
+ IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+ IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size),
+ IPA_MEM_PART(v6_flt_size_ddr));
+
+ IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_RAM_V4_RT_OFST;
+ IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst));
+
+ IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_RAM_V4_NUM_INDEX;
+ IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index));
+
+ IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_V4_MODEM_RT_INDEX_LO;
+ IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_V4_MODEM_RT_INDEX_HI;
+ IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n",
+ IPA_MEM_PART(v4_modem_rt_index_lo),
+ IPA_MEM_PART(v4_modem_rt_index_hi));
+
+ IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_V4_APPS_RT_INDEX_LO;
+ IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_V4_APPS_RT_INDEX_HI;
+ IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n",
+ IPA_MEM_PART(v4_apps_rt_index_lo),
+ IPA_MEM_PART(v4_apps_rt_index_hi));
+
+ IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_RAM_V4_RT_SIZE;
+ IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR;
+ IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size),
+ IPA_MEM_PART(v4_rt_size_ddr));
+
+ IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_RAM_V6_RT_OFST;
+ IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst));
+
+ IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_RAM_V6_NUM_INDEX;
+ IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index));
+
+ IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_V6_MODEM_RT_INDEX_LO;
+ IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_V6_MODEM_RT_INDEX_HI;
+ IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n",
+ IPA_MEM_PART(v6_modem_rt_index_lo),
+ IPA_MEM_PART(v6_modem_rt_index_hi));
+
+ IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_V6_APPS_RT_INDEX_LO;
+ IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_V6_APPS_RT_INDEX_HI;
+ IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n",
+ IPA_MEM_PART(v6_apps_rt_index_lo),
+ IPA_MEM_PART(v6_apps_rt_index_hi));
+
+ IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_RAM_V6_RT_SIZE;
+ IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR;
+ IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size),
+ IPA_MEM_PART(v6_rt_size_ddr));
+
+ IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_RAM_MODEM_HDR_OFST;
+ IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_RAM_MODEM_HDR_SIZE;
+ IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size));
+
+ IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_RAM_APPS_HDR_OFST;
+ IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_RAM_APPS_HDR_SIZE;
+ IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_RAM_HDR_SIZE_DDR;
+ IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+ IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size),
+ IPA_MEM_PART(apps_hdr_size_ddr));
+
+ IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_RAM_MODEM_OFST;
+ IPA_MEM_PART(modem_size) = IPA_MEM_v2_RAM_MODEM_SIZE;
+ IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst),
+ IPA_MEM_PART(modem_size));
+
+ IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_RAM_APPS_V4_FLT_OFST;
+ IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_RAM_APPS_V4_FLT_SIZE;
+ IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size));
+
+ IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_RAM_APPS_V6_FLT_OFST;
+ IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_RAM_APPS_V6_FLT_SIZE;
+ IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size));
+
+ IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_RAM_UC_INFO_OFST;
+ IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_RAM_UC_INFO_SIZE;
+ IPADBG("V6 UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst),
+ IPA_MEM_PART(uc_info_size));
+
+ IPA_MEM_PART(end_ofst) = IPA_MEM_v2_RAM_END_OFST;
+ IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_RAM_APPS_V4_RT_OFST;
+ IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_RAM_APPS_V4_RT_SIZE;
+ IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_RAM_APPS_V6_RT_OFST;
+ IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_RAM_APPS_V6_RT_SIZE;
+}
+
+static void ipa_init_mem_partition_v2_5(void)
+{
+ IPADBG("Memory partition IPA 2.5\n");
+ IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST;
+ IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE;
+ IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
+ IPA_MEM_PART(nat_size));
+
+ IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_5_RAM_UC_INFO_OFST;
+ IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_5_RAM_UC_INFO_SIZE;
+ IPADBG("UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst),
+ IPA_MEM_PART(uc_info_size));
+
+ IPA_MEM_PART(ofst_start) = IPA_MEM_v2_5_RAM_OFST_START;
+ IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start));
+
+ IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_5_RAM_V4_FLT_OFST;
+ IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_5_RAM_V4_FLT_SIZE;
+ IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR;
+ IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+ IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size),
+ IPA_MEM_PART(v4_flt_size_ddr));
+
+ IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_5_RAM_V6_FLT_OFST;
+ IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_5_RAM_V6_FLT_SIZE;
+ IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR;
+ IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+ IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size),
+ IPA_MEM_PART(v6_flt_size_ddr));
+
+ IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_5_RAM_V4_RT_OFST;
+ IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst));
+
+ IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_5_RAM_V4_NUM_INDEX;
+ IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index));
+
+ IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_5_V4_MODEM_RT_INDEX_LO;
+ IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_5_V4_MODEM_RT_INDEX_HI;
+ IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n",
+ IPA_MEM_PART(v4_modem_rt_index_lo),
+ IPA_MEM_PART(v4_modem_rt_index_hi));
+
+ IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_5_V4_APPS_RT_INDEX_LO;
+ IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_5_V4_APPS_RT_INDEX_HI;
+ IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n",
+ IPA_MEM_PART(v4_apps_rt_index_lo),
+ IPA_MEM_PART(v4_apps_rt_index_hi));
+
+ IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_5_RAM_V4_RT_SIZE;
+ IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR;
+ IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size),
+ IPA_MEM_PART(v4_rt_size_ddr));
+
+ IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_5_RAM_V6_RT_OFST;
+ IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst));
+
+ IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_5_RAM_V6_NUM_INDEX;
+ IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index));
+
+ IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_5_V6_MODEM_RT_INDEX_LO;
+ IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_5_V6_MODEM_RT_INDEX_HI;
+ IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n",
+ IPA_MEM_PART(v6_modem_rt_index_lo),
+ IPA_MEM_PART(v6_modem_rt_index_hi));
+
+ IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_5_V6_APPS_RT_INDEX_LO;
+ IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_5_V6_APPS_RT_INDEX_HI;
+ IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n",
+ IPA_MEM_PART(v6_apps_rt_index_lo),
+ IPA_MEM_PART(v6_apps_rt_index_hi));
+
+ IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_5_RAM_V6_RT_SIZE;
+ IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR;
+ IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size),
+ IPA_MEM_PART(v6_rt_size_ddr));
+
+ IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_5_RAM_MODEM_HDR_OFST;
+ IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_5_RAM_MODEM_HDR_SIZE;
+ IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size));
+
+ IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_5_RAM_APPS_HDR_OFST;
+ IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_5_RAM_APPS_HDR_SIZE;
+ IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_5_RAM_HDR_SIZE_DDR;
+ IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+ IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size),
+ IPA_MEM_PART(apps_hdr_size_ddr));
+
+ IPA_MEM_PART(modem_hdr_proc_ctx_ofst) =
+ IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_OFST;
+ IPA_MEM_PART(modem_hdr_proc_ctx_size) =
+ IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_SIZE;
+ IPADBG("MODEM HDR PROC CTX OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(modem_hdr_proc_ctx_ofst),
+ IPA_MEM_PART(modem_hdr_proc_ctx_size));
+
+ IPA_MEM_PART(apps_hdr_proc_ctx_ofst) =
+ IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_OFST;
+ IPA_MEM_PART(apps_hdr_proc_ctx_size) =
+ IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_SIZE;
+ IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr) =
+ IPA_MEM_RAM_HDR_PROC_CTX_SIZE_DDR;
+ IPADBG("APPS HDR PROC CTX OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+ IPA_MEM_PART(apps_hdr_proc_ctx_ofst),
+ IPA_MEM_PART(apps_hdr_proc_ctx_size),
+ IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr));
+
+ IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_5_RAM_MODEM_OFST;
+ IPA_MEM_PART(modem_size) = IPA_MEM_v2_5_RAM_MODEM_SIZE;
+ IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst),
+ IPA_MEM_PART(modem_size));
+
+ IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_5_RAM_APPS_V4_FLT_OFST;
+ IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_5_RAM_APPS_V4_FLT_SIZE;
+ IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size));
+
+ IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_5_RAM_APPS_V6_FLT_OFST;
+ IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_5_RAM_APPS_V6_FLT_SIZE;
+ IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size));
+
+ IPA_MEM_PART(end_ofst) = IPA_MEM_v2_5_RAM_END_OFST;
+ IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_5_RAM_APPS_V4_RT_OFST;
+ IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_5_RAM_APPS_V4_RT_SIZE;
+ IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_5_RAM_APPS_V6_RT_OFST;
+ IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_5_RAM_APPS_V6_RT_SIZE;
+}
+
+static void ipa_init_mem_partition_v2_6L(void)
+{
+ IPADBG("Memory partition IPA 2.6Lite\n");
+ IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST;
+ IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE;
+ IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
+ IPA_MEM_PART(nat_size));
+
+ IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_6L_RAM_UC_INFO_OFST;
+ IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_6L_RAM_UC_INFO_SIZE;
+ IPADBG("V6 UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst),
+ IPA_MEM_PART(uc_info_size));
+
+ IPA_MEM_PART(ofst_start) = IPA_MEM_v2_6L_RAM_OFST_START;
+ IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start));
+
+ IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_6L_RAM_V4_FLT_OFST;
+ IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_6L_RAM_V4_FLT_SIZE;
+ IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR;
+ IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+ IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size),
+ IPA_MEM_PART(v4_flt_size_ddr));
+
+ IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_6L_RAM_V6_FLT_OFST;
+ IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_6L_RAM_V6_FLT_SIZE;
+ IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR;
+ IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+ IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size),
+ IPA_MEM_PART(v6_flt_size_ddr));
+
+ IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_6L_RAM_V4_RT_OFST;
+ IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst));
+
+ IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_6L_RAM_V4_NUM_INDEX;
+ IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index));
+
+ IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_LO;
+ IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_HI;
+ IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n",
+ IPA_MEM_PART(v4_modem_rt_index_lo),
+ IPA_MEM_PART(v4_modem_rt_index_hi));
+
+ IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_6L_V4_APPS_RT_INDEX_LO;
+ IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_6L_V4_APPS_RT_INDEX_HI;
+ IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n",
+ IPA_MEM_PART(v4_apps_rt_index_lo),
+ IPA_MEM_PART(v4_apps_rt_index_hi));
+
+ IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_6L_RAM_V4_RT_SIZE;
+ IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR;
+ IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size),
+ IPA_MEM_PART(v4_rt_size_ddr));
+
+ IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_6L_RAM_V6_RT_OFST;
+ IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst));
+
+ IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_6L_RAM_V6_NUM_INDEX;
+ IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index));
+
+ IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_LO;
+ IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_HI;
+ IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n",
+ IPA_MEM_PART(v6_modem_rt_index_lo),
+ IPA_MEM_PART(v6_modem_rt_index_hi));
+
+ IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_6L_V6_APPS_RT_INDEX_LO;
+ IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_6L_V6_APPS_RT_INDEX_HI;
+ IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n",
+ IPA_MEM_PART(v6_apps_rt_index_lo),
+ IPA_MEM_PART(v6_apps_rt_index_hi));
+
+ IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_6L_RAM_V6_RT_SIZE;
+ IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR;
+ IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size),
+ IPA_MEM_PART(v6_rt_size_ddr));
+
+ IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_6L_RAM_MODEM_HDR_OFST;
+ IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_6L_RAM_MODEM_HDR_SIZE;
+ IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size));
+
+ IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_6L_RAM_APPS_HDR_OFST;
+ IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_6L_RAM_APPS_HDR_SIZE;
+ IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_6L_RAM_HDR_SIZE_DDR;
+ IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+ IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size),
+ IPA_MEM_PART(apps_hdr_size_ddr));
+
+ IPA_MEM_PART(modem_comp_decomp_ofst) =
+ IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_OFST;
+ IPA_MEM_PART(modem_comp_decomp_size) =
+ IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_SIZE;
+ IPADBG("MODEM COMP DECOMP OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(modem_comp_decomp_ofst),
+ IPA_MEM_PART(modem_comp_decomp_size));
+
+ IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_6L_RAM_MODEM_OFST;
+ IPA_MEM_PART(modem_size) = IPA_MEM_v2_6L_RAM_MODEM_SIZE;
+ IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst),
+ IPA_MEM_PART(modem_size));
+
+ IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V4_FLT_OFST;
+ IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_6L_RAM_APPS_V4_FLT_SIZE;
+ IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size));
+
+ IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V6_FLT_OFST;
+ IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_6L_RAM_APPS_V6_FLT_SIZE;
+ IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size));
+
+ IPA_MEM_PART(end_ofst) = IPA_MEM_v2_6L_RAM_END_OFST;
+ IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V4_RT_OFST;
+ IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_6L_RAM_APPS_V4_RT_SIZE;
+ IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V6_RT_OFST;
+ IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_6L_RAM_APPS_V6_RT_SIZE;
+}
+
+/**
+ * ipa_controller_shared_static_bind() - set the appropriate shared methods for
+ * for IPA HW version 2.0, 2.5, 2.6 and 2.6L
+ *
+ * @ctrl: data structure which holds the function pointers
+ */
+void ipa_controller_shared_static_bind(struct ipa_controller *ctrl)
+{
+ ctrl->ipa_init_rt4 = _ipa_init_rt4_v2;
+ ctrl->ipa_init_rt6 = _ipa_init_rt6_v2;
+ ctrl->ipa_init_flt4 = _ipa_init_flt4_v2;
+ ctrl->ipa_init_flt6 = _ipa_init_flt6_v2;
+ ctrl->ipa_cfg_ep_hdr = _ipa_cfg_ep_hdr_v2_0;
+ ctrl->ipa_cfg_ep_nat = _ipa_cfg_ep_nat_v2_0;
+ ctrl->ipa_cfg_ep_aggr = _ipa_cfg_ep_aggr_v2_0;
+ ctrl->ipa_cfg_ep_deaggr = _ipa_cfg_ep_deaggr_v2_0;
+ ctrl->ipa_cfg_ep_mode = _ipa_cfg_ep_mode_v2_0;
+ ctrl->ipa_cfg_ep_route = _ipa_cfg_ep_route_v2_0;
+ ctrl->ipa_cfg_route = _ipa_cfg_route_v2_0;
+ ctrl->ipa_cfg_ep_status = _ipa_cfg_ep_status_v2_0;
+ ctrl->ipa_cfg_ep_cfg = _ipa_cfg_ep_cfg_v2_0;
+ ctrl->ipa_cfg_ep_metadata_mask = _ipa_cfg_ep_metadata_mask_v2_0;
+ ctrl->ipa_clk_rate_turbo = IPA_V2_0_CLK_RATE_TURBO;
+ ctrl->ipa_clk_rate_nominal = IPA_V2_0_CLK_RATE_NOMINAL;
+ ctrl->ipa_clk_rate_svs = IPA_V2_0_CLK_RATE_SVS;
+ ctrl->ipa_read_gen_reg = _ipa_read_gen_reg_v2_0;
+ ctrl->ipa_read_ep_reg = _ipa_read_ep_reg_v2_0;
+ ctrl->ipa_write_dbg_cnt = _ipa_write_dbg_cnt_v2_0;
+ ctrl->ipa_read_dbg_cnt = _ipa_read_dbg_cnt_v2_0;
+ ctrl->ipa_commit_flt = __ipa_commit_flt_v2;
+ ctrl->ipa_commit_rt = __ipa_commit_rt_v2;
+ ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2;
+ ctrl->ipa_enable_clks = _ipa_enable_clks_v2_0;
+ ctrl->ipa_disable_clks = _ipa_disable_clks_v2_0;
+ ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v2_0;
+ ctrl->ipa_cfg_ep_metadata = _ipa_cfg_ep_metadata_v2_0;
+ ctrl->clock_scaling_bw_threshold_nominal =
+ IPA_V2_0_BW_THRESHOLD_NOMINAL_MBPS;
+ ctrl->clock_scaling_bw_threshold_turbo =
+ IPA_V2_0_BW_THRESHOLD_TURBO_MBPS;
+}
+
+/**
+ * ipa_ctrl_static_bind() - set the appropriate methods for
+ * IPA Driver based on the HW version
+ *
+ * @ctrl: data structure which holds the function pointers
+ * @hw_type: the HW type in use
+ *
+ * This function can avoid the runtime assignment by using C99 special
+ * struct initialization - hard decision... time.vs.mem
+ */
+int ipa_controller_static_bind(struct ipa_controller *ctrl,
+ enum ipa_hw_type hw_type)
+{
+ switch (hw_type) {
+ case (IPA_HW_v1_1):
+ ipa_init_mem_partition_v2();
+ ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v1_1;
+ ctrl->ipa_cfg_ep_hdr = _ipa_cfg_ep_hdr_v1_1;
+ ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v1_1;
+ ctrl->ipa_cfg_ep_aggr = _ipa_cfg_ep_aggr_v1_1;
+ ctrl->ipa_cfg_ep_deaggr = _ipa_cfg_ep_deaggr_v1_1;
+ ctrl->ipa_cfg_ep_nat = _ipa_cfg_ep_nat_v1_1;
+ ctrl->ipa_cfg_ep_mode = _ipa_cfg_ep_mode_v1_1;
+ ctrl->ipa_cfg_ep_route = _ipa_cfg_ep_route_v1_1;
+ ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v1_1;
+ ctrl->ipa_cfg_route = _ipa_cfg_route_v1_1;
+ ctrl->ipa_cfg_ep_status = _ipa_cfg_ep_status_v1_1;
+ ctrl->ipa_cfg_ep_cfg = _ipa_cfg_ep_cfg_v1_1;
+ ctrl->ipa_cfg_ep_metadata_mask = _ipa_cfg_ep_metadata_mask_v1_1;
+ ctrl->ipa_clk_rate_turbo = IPA_V1_1_CLK_RATE;
+ ctrl->ipa_clk_rate_nominal = IPA_V1_1_CLK_RATE;
+ ctrl->ipa_clk_rate_svs = IPA_V1_1_CLK_RATE;
+ ctrl->ipa_read_gen_reg = _ipa_read_gen_reg_v1_1;
+ ctrl->ipa_read_ep_reg = _ipa_read_ep_reg_v1_1;
+ ctrl->ipa_write_dbg_cnt = _ipa_write_dbg_cnt_v1_1;
+ ctrl->ipa_read_dbg_cnt = _ipa_read_dbg_cnt_v1_1;
+ ctrl->ipa_commit_flt = __ipa_commit_flt_v1_1;
+ ctrl->ipa_commit_rt = __ipa_commit_rt_v1_1;
+ ctrl->ipa_commit_hdr = __ipa_commit_hdr_v1_1;
+ ctrl->ipa_enable_clks = _ipa_enable_clks_v1_1;
+ ctrl->ipa_disable_clks = _ipa_disable_clks_v1_1;
+ ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v1_1;
+ ctrl->ipa_cfg_ep_metadata = _ipa_cfg_ep_metadata_v1_1;
+ ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_0;
+ ctrl->max_holb_tmr_val = IPA_V1_MAX_HOLB_TMR_VAL;
+ break;
+ case (IPA_HW_v2_0):
+ ipa_init_mem_partition_v2();
+ ipa_controller_shared_static_bind(ctrl);
+ ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_0;
+ ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_0;
+ ctrl->max_holb_tmr_val = IPA_V2_0_MAX_HOLB_TMR_VAL;
+ ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_0;
+ ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_0;
+ ctrl->ipa_init_sram = _ipa_init_sram_v2;
+ ctrl->ipa_init_hdr = _ipa_init_hdr_v2;
+ ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2;
+ ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2;
+ break;
+ case (IPA_HW_v2_5):
+ ipa_init_mem_partition_v2_5();
+ ipa_controller_shared_static_bind(ctrl);
+ ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_5;
+ ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_5;
+ ctrl->max_holb_tmr_val = IPA_V2_5_MAX_HOLB_TMR_VAL;
+ ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_5;
+ ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_5;
+ ctrl->ipa_init_sram = _ipa_init_sram_v2_5;
+ ctrl->ipa_init_hdr = _ipa_init_hdr_v2_5;
+ ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2_5;
+ ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2_5;
+ break;
+ case (IPA_HW_v2_6L):
+ ipa_init_mem_partition_v2_6L();
+ ipa_controller_shared_static_bind(ctrl);
+ ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_6L;
+ ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_6L;
+ ctrl->max_holb_tmr_val = IPA_V2_6L_MAX_HOLB_TMR_VAL;
+ ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_6L;
+ ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_6L;
+ ctrl->ipa_init_sram = _ipa_init_sram_v2_6L;
+ ctrl->ipa_init_hdr = _ipa_init_hdr_v2_6L;
+ ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2_6L;
+ ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2_6L;
+ break;
+ default:
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+void ipa_skb_recycle(struct sk_buff *skb)
+{
+ struct skb_shared_info *shinfo;
+
+ shinfo = skb_shinfo(skb);
+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+ atomic_set(&shinfo->dataref, 1);
+
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ skb->data = skb->head + NET_SKB_PAD;
+ skb_reset_tail_pointer(skb);
+}
+
+int ipa_id_alloc(void *ptr)
+{
+ int id;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock(&ipa_ctx->idr_lock);
+ id = idr_alloc(&ipa_ctx->ipa_idr, ptr, 0, 0, GFP_NOWAIT);
+ spin_unlock(&ipa_ctx->idr_lock);
+ idr_preload_end();
+
+ return id;
+}
+
+void *ipa_id_find(u32 id)
+{
+ void *ptr;
+
+ spin_lock(&ipa_ctx->idr_lock);
+ ptr = idr_find(&ipa_ctx->ipa_idr, id);
+ spin_unlock(&ipa_ctx->idr_lock);
+
+ return ptr;
+}
+
+void ipa_id_remove(u32 id)
+{
+ spin_lock(&ipa_ctx->idr_lock);
+ idr_remove(&ipa_ctx->ipa_idr, id);
+ spin_unlock(&ipa_ctx->idr_lock);
+}
+
+static void ipa_tag_free_buf(void *user1, int user2)
+{
+ kfree(user1);
+}
+
+static void ipa_tag_free_skb(void *user1, int user2)
+{
+ dev_kfree_skb_any((struct sk_buff *)user1);
+}
+
+#define REQUIRED_TAG_PROCESS_DESCRIPTORS 4
+
+/* ipa_tag_process() - Initiates a tag process. Incorporates the input
+ * descriptors
+ *
+ * @desc: descriptors with commands for IC
+ * @desc_size: amount of descriptors in the above variable
+ *
+ * Note: The descriptors are copied (if there's room), the client needs to
+ * free his descriptors afterwards
+ *
+ * Return: 0 or negative in case of failure
+ */
+int ipa_tag_process(struct ipa_desc desc[],
+ int descs_num,
+ unsigned long timeout)
+{
+ struct ipa_sys_context *sys;
+ struct ipa_desc *tag_desc;
+ int desc_idx = 0;
+ struct ipa_ip_packet_init *pkt_init;
+ struct ipa_register_write *reg_write_nop;
+ struct ipa_ip_packet_tag_status *status;
+ int i;
+ struct sk_buff *dummy_skb;
+ int res;
+ struct ipa_tag_completion *comp;
+ int ep_idx;
+
+ /* Not enough room for the required descriptors for the tag process */
+ if (IPA_TAG_MAX_DESC - descs_num < REQUIRED_TAG_PROCESS_DESCRIPTORS) {
+ IPAERR("up to %d descriptors are allowed (received %d)\n",
+ IPA_TAG_MAX_DESC - REQUIRED_TAG_PROCESS_DESCRIPTORS,
+ descs_num);
+ return -ENOMEM;
+ }
+
+ ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
+ if (-1 == ep_idx) {
+ IPAERR("Client %u is not mapped\n",
+ IPA_CLIENT_APPS_CMD_PROD);
+ return -EFAULT;
+ }
+ sys = ipa_ctx->ep[ep_idx].sys;
+
+ tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, GFP_KERNEL);
+ if (!tag_desc) {
+ IPAERR("failed to allocate memory\n");
+ res = -ENOMEM;
+ goto fail_alloc_desc;
+ }
+
+ /* IP_PACKET_INIT IC for tag status to be sent to apps */
+ pkt_init = kzalloc(sizeof(*pkt_init), GFP_KERNEL);
+ if (!pkt_init) {
+ IPAERR("failed to allocate memory\n");
+ res = -ENOMEM;
+ goto fail_alloc_pkt_init;
+ }
+
+ pkt_init->destination_pipe_index =
+ ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+
+ tag_desc[desc_idx].opcode = IPA_IP_PACKET_INIT;
+ tag_desc[desc_idx].pyld = pkt_init;
+ tag_desc[desc_idx].len = sizeof(*pkt_init);
+ tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
+ tag_desc[desc_idx].callback = ipa_tag_free_buf;
+ tag_desc[desc_idx].user1 = pkt_init;
+ desc_idx++;
+
+ /* NO-OP IC for ensuring that IPA pipeline is empty */
+ reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
+ if (!reg_write_nop) {
+ IPAERR("no mem\n");
+ res = -ENOMEM;
+ goto fail_free_desc;
+ }
+
+ reg_write_nop->skip_pipeline_clear = 0;
+ reg_write_nop->value_mask = 0x0;
+
+ tag_desc[desc_idx].opcode = IPA_REGISTER_WRITE;
+ tag_desc[desc_idx].pyld = reg_write_nop;
+ tag_desc[desc_idx].len = sizeof(*reg_write_nop);
+ tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
+ tag_desc[desc_idx].callback = ipa_tag_free_buf;
+ tag_desc[desc_idx].user1 = reg_write_nop;
+ desc_idx++;
+
+ /* status IC */
+ status = kzalloc(sizeof(*status), GFP_KERNEL);
+ if (!status) {
+ IPAERR("no mem\n");
+ res = -ENOMEM;
+ goto fail_free_desc;
+ }
+
+ status->tag_f_2 = IPA_COOKIE;
+
+ tag_desc[desc_idx].opcode = IPA_IP_PACKET_TAG_STATUS;
+ tag_desc[desc_idx].pyld = status;
+ tag_desc[desc_idx].len = sizeof(*status);
+ tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
+ tag_desc[desc_idx].callback = ipa_tag_free_buf;
+ tag_desc[desc_idx].user1 = status;
+ desc_idx++;
+
+ /* Copy the required descriptors from the client now */
+ if (desc) {
+ memcpy(&(tag_desc[desc_idx]), desc, descs_num *
+ sizeof(struct ipa_desc));
+ desc_idx += descs_num;
+ }
+
+ comp = kzalloc(sizeof(*comp), GFP_KERNEL);
+ if (!comp) {
+ IPAERR("no mem\n");
+ res = -ENOMEM;
+ goto fail_free_desc;
+ }
+ init_completion(&comp->comp);
+
+ /* completion needs to be released from both here and rx handler */
+ atomic_set(&comp->cnt, 2);
+
+ /* dummy packet to send to IPA. packet payload is a completion object */
+ dummy_skb = alloc_skb(sizeof(comp), GFP_KERNEL);
+ if (!dummy_skb) {
+ IPAERR("failed to allocate memory\n");
+ res = -ENOMEM;
+ goto fail_free_skb;
+ }
+
+ memcpy(skb_put(dummy_skb, sizeof(comp)), &comp, sizeof(comp));
+
+ tag_desc[desc_idx].pyld = dummy_skb->data;
+ tag_desc[desc_idx].len = dummy_skb->len;
+ tag_desc[desc_idx].type = IPA_DATA_DESC_SKB;
+ tag_desc[desc_idx].callback = ipa_tag_free_skb;
+ tag_desc[desc_idx].user1 = dummy_skb;
+ desc_idx++;
+
+ /* send all descriptors to IPA with single EOT */
+ res = ipa_send(sys, desc_idx, tag_desc, true);
+ if (res) {
+ IPAERR("failed to send TAG packets %d\n", res);
+ res = -ENOMEM;
+ goto fail_send;
+ }
+ kfree(tag_desc);
+ tag_desc = NULL;
+
+ IPADBG("waiting for TAG response\n");
+ res = wait_for_completion_timeout(&comp->comp, timeout);
+ if (res == 0) {
+ IPAERR("timeout (%lu msec) on waiting for TAG response\n",
+ timeout);
+ WARN_ON(1);
+ if (atomic_dec_return(&comp->cnt) == 0)
+ kfree(comp);
+ return -ETIME;
+ }
+
+ IPADBG("TAG response arrived!\n");
+ if (atomic_dec_return(&comp->cnt) == 0)
+ kfree(comp);
+
+ /* sleep for short period to ensure IPA wrote all packets to BAM */
+ usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC);
+
+ return 0;
+
+fail_send:
+ dev_kfree_skb_any(dummy_skb);
+ desc_idx--;
+fail_free_skb:
+ kfree(comp);
+fail_free_desc:
+ /*
+ * Free only the first descriptors allocated here.
+ * [pkt_init, status, nop]
+ * The user is responsible to free his allocations
+ * in case of failure.
+ * The min is required because we may fail during
+ * of the initial allocations above
+ */
+ for (i = 0; i < min(REQUIRED_TAG_PROCESS_DESCRIPTORS-1, desc_idx); i++)
+ kfree(tag_desc[i].user1);
+
+fail_alloc_pkt_init:
+ kfree(tag_desc);
+fail_alloc_desc:
+ return res;
+}
+
+/**
+ * ipa_tag_generate_force_close_desc() - generate descriptors for force close
+ * immediate command
+ *
+ * @desc: descriptors for IC
+ * @desc_size: desc array size
+ * @start_pipe: first pipe to close aggregation
+ * @end_pipe: last (non-inclusive) pipe to close aggregation
+ *
+ * Return: number of descriptors written or negative in case of failure
+ */
+static int ipa_tag_generate_force_close_desc(struct ipa_desc desc[],
+ int desc_size, int start_pipe, int end_pipe)
+{
+ int i;
+ u32 aggr_init;
+ int desc_idx = 0;
+ int res;
+ struct ipa_register_write *reg_write_agg_close;
+
+ for (i = start_pipe; i < end_pipe; i++) {
+ aggr_init = ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_AGGR_N_OFST_v2_0(i));
+ if (((aggr_init & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) >>
+ IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT) != IPA_ENABLE_AGGR)
+ continue;
+ IPADBG("Force close ep: %d\n", i);
+ if (desc_idx + 1 > desc_size) {
+ IPAERR("Internal error - no descriptors\n");
+ res = -EFAULT;
+ goto fail_no_desc;
+ }
+
+ reg_write_agg_close = kzalloc(sizeof(*reg_write_agg_close),
+ GFP_KERNEL);
+ if (!reg_write_agg_close) {
+ IPAERR("no mem\n");
+ res = -ENOMEM;
+ goto fail_alloc_reg_write_agg_close;
+ }
+
+ reg_write_agg_close->skip_pipeline_clear = 0;
+ reg_write_agg_close->offset = IPA_ENDP_INIT_AGGR_N_OFST_v2_0(i);
+ reg_write_agg_close->value =
+ (1 & IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK) <<
+ IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
+ reg_write_agg_close->value_mask =
+ IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK <<
+ IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
+
+ desc[desc_idx].opcode = IPA_REGISTER_WRITE;
+ desc[desc_idx].pyld = reg_write_agg_close;
+ desc[desc_idx].len = sizeof(*reg_write_agg_close);
+ desc[desc_idx].type = IPA_IMM_CMD_DESC;
+ desc[desc_idx].callback = ipa_tag_free_buf;
+ desc[desc_idx].user1 = reg_write_agg_close;
+ desc_idx++;
+ }
+
+ return desc_idx;
+
+fail_alloc_reg_write_agg_close:
+ for (i = 0; i < desc_idx; i++)
+ kfree(desc[desc_idx].user1);
+fail_no_desc:
+ return res;
+}
+
+/**
+ * ipa_tag_aggr_force_close() - Force close aggregation
+ *
+ * @pipe_num: pipe number or -1 for all pipes
+ */
+int ipa_tag_aggr_force_close(int pipe_num)
+{
+ struct ipa_desc *desc;
+ int res = -1;
+ int start_pipe;
+ int end_pipe;
+ int num_descs;
+ int num_aggr_descs;
+
+ if (pipe_num < -1 || pipe_num >= (int)ipa_ctx->ipa_num_pipes) {
+ IPAERR("Invalid pipe number %d\n", pipe_num);
+ return -EINVAL;
+ }
+
+ if (pipe_num == -1) {
+ start_pipe = 0;
+ end_pipe = ipa_ctx->ipa_num_pipes;
+ } else {
+ start_pipe = pipe_num;
+ end_pipe = pipe_num + 1;
+ }
+
+ num_descs = end_pipe - start_pipe;
+
+ desc = kcalloc(num_descs, sizeof(*desc), GFP_KERNEL);
+ if (!desc) {
+ IPAERR("no mem\n");
+ return -ENOMEM;
+ }
+
+ /* Force close aggregation on all valid pipes with aggregation */
+ num_aggr_descs = ipa_tag_generate_force_close_desc(desc, num_descs,
+ start_pipe, end_pipe);
+ if (num_aggr_descs < 0) {
+ IPAERR("ipa_tag_generate_force_close_desc failed %d\n",
+ num_aggr_descs);
+ goto fail_free_desc;
+ }
+
+ res = ipa_tag_process(desc, num_aggr_descs,
+ IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT);
+
+fail_free_desc:
+ kfree(desc);
+
+ return res;
+}
+
+/**
+ * ipa2_is_ready() - check if IPA module was initialized
+ * successfully
+ *
+ * Return value: true for yes; false for no
+ */
+bool ipa2_is_ready(void)
+{
+ return (ipa_ctx != NULL) ? true : false;
+}
+
+/**
+ * ipa2_is_client_handle_valid() - check if IPA client handle is valid handle
+ *
+ * Return value: true for yes; false for no
+ */
+bool ipa2_is_client_handle_valid(u32 clnt_hdl)
+{
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return false;
+ }
+
+ if (clnt_hdl >= 0 && clnt_hdl < ipa_ctx->ipa_num_pipes)
+ return true;
+ return false;
+}
+
+/**
+ * ipa2_proxy_clk_unvote() - called to remove IPA clock proxy vote
+ *
+ * Return value: none
+ */
+void ipa2_proxy_clk_unvote(void)
+{
+ if (ipa2_is_ready() && ipa_ctx->q6_proxy_clk_vote_valid) {
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE");
+ ipa_ctx->q6_proxy_clk_vote_valid = false;
+ }
+}
+
+/**
+ * ipa2_proxy_clk_vote() - called to add IPA clock proxy vote
+ *
+ * Return value: none
+ */
+void ipa2_proxy_clk_vote(void)
+{
+ if (ipa2_is_ready() && !ipa_ctx->q6_proxy_clk_vote_valid) {
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE");
+ ipa_ctx->q6_proxy_clk_vote_valid = true;
+ }
+}
+
+
+/**
+ * ipa2_get_smem_restr_bytes()- Return IPA smem restricted bytes
+ *
+ * Return value: u16 - number of IPA smem restricted bytes
+ */
+u16 ipa2_get_smem_restr_bytes(void)
+{
+ if (ipa_ctx)
+ return ipa_ctx->smem_restricted_bytes;
+
+ IPAERR("IPA Driver not initialized\n");
+
+ return 0;
+}
+
+/**
+ * ipa2_get_modem_cfg_emb_pipe_flt()- Return ipa_ctx->modem_cfg_emb_pipe_flt
+ *
+ * Return value: true if modem configures embedded pipe flt, false otherwise
+ */
+bool ipa2_get_modem_cfg_emb_pipe_flt(void)
+{
+ if (ipa_ctx)
+ return ipa_ctx->modem_cfg_emb_pipe_flt;
+
+ IPAERR("IPA driver has not been initialized\n");
+
+ return false;
+}
+/**
+ * ipa2_get_transport_type()- Return IPA_TRANSPORT_TYPE_SPS
+ *
+ * Return value: enum ipa_transport_type
+ */
+enum ipa_transport_type ipa2_get_transport_type(void)
+{
+ return IPA_TRANSPORT_TYPE_SPS;
+}
+
+u32 ipa_get_num_pipes(void)
+{
+ if (ipa_ctx->ipa_hw_type == IPA_HW_v2_6L)
+ return ipa_read_reg(ipa_ctx->mmio, IPA_ENABLED_PIPES_OFST);
+ else
+ return IPA_MAX_NUM_PIPES;
+}
+EXPORT_SYMBOL(ipa_get_num_pipes);
+
+/**
+ * ipa2_disable_apps_wan_cons_deaggr()-
+ * set ipa_ctx->ipa_client_apps_wan_cons_agg_gro
+ *
+ * Return value: 0 or negative in case of failure
+ */
+int ipa2_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count)
+{
+ int res = -1;
+
+ /* checking if IPA-HW can support */
+ if ((agg_size >> 10) >
+ IPA_AGGR_BYTE_LIMIT) {
+ IPAWANERR("IPA-AGG byte limit %d\n",
+ IPA_AGGR_BYTE_LIMIT);
+ IPAWANERR("exceed aggr_byte_limit\n");
+ return res;
+ }
+ if (agg_count >
+ IPA_AGGR_PKT_LIMIT) {
+ IPAWANERR("IPA-AGG pkt limit %d\n",
+ IPA_AGGR_PKT_LIMIT);
+ IPAWANERR("exceed aggr_pkt_limit\n");
+ return res;
+ }
+
+ if (ipa_ctx) {
+ ipa_ctx->ipa_client_apps_wan_cons_agg_gro = true;
+ return 0;
+ }
+ return res;
+}
+
+static struct ipa_gsi_ep_config *ipa2_get_gsi_ep_info(int ipa_ep_idx)
+{
+ IPAERR("Not supported for IPA 2.x\n");
+ return NULL;
+}
+
+static int ipa2_stop_gsi_channel(u32 clnt_hdl)
+{
+ IPAERR("Not supported for IPA 2.x\n");
+ return -EFAULT;
+}
+
+static void *ipa2_get_ipc_logbuf(void)
+{
+ /* no support for IPC logging in IPAv2 */
+ return NULL;
+}
+
+static void *ipa2_get_ipc_logbuf_low(void)
+{
+ /* no support for IPC logging in IPAv2 */
+ return NULL;
+}
+
+static void ipa2_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb)
+{
+ *holb = ipa_ctx->ep[ep_idx].holb;
+}
+
+static int ipa2_generate_tag_process(void)
+{
+ int res;
+
+ res = ipa_tag_process(NULL, 0, HZ);
+ if (res)
+ IPAERR("TAG process failed\n");
+
+ return res;
+}
+
+static void ipa2_set_tag_process_before_gating(bool val)
+{
+ ipa_ctx->tag_process_before_gating = val;
+}
+
+int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
+ struct ipa_api_controller *api_ctrl)
+{
+ if (ipa_hw_type < IPA_HW_v2_0 || ipa_hw_type >= IPA_HW_v3_0) {
+ IPAERR("Unsupported IPA HW version %d\n", ipa_hw_type);
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ api_ctrl->ipa_connect = ipa2_connect;
+ api_ctrl->ipa_disconnect = ipa2_disconnect;
+ api_ctrl->ipa_reset_endpoint = ipa2_reset_endpoint;
+ api_ctrl->ipa_clear_endpoint_delay = ipa2_clear_endpoint_delay;
+ api_ctrl->ipa_disable_endpoint = ipa2_disable_endpoint;
+ api_ctrl->ipa_cfg_ep = ipa2_cfg_ep;
+ api_ctrl->ipa_cfg_ep_nat = ipa2_cfg_ep_nat;
+ api_ctrl->ipa_cfg_ep_hdr = ipa2_cfg_ep_hdr;
+ api_ctrl->ipa_cfg_ep_hdr_ext = ipa2_cfg_ep_hdr_ext;
+ api_ctrl->ipa_cfg_ep_mode = ipa2_cfg_ep_mode;
+ api_ctrl->ipa_cfg_ep_aggr = ipa2_cfg_ep_aggr;
+ api_ctrl->ipa_cfg_ep_deaggr = ipa2_cfg_ep_deaggr;
+ api_ctrl->ipa_cfg_ep_route = ipa2_cfg_ep_route;
+ api_ctrl->ipa_cfg_ep_holb = ipa2_cfg_ep_holb;
+ api_ctrl->ipa_get_holb = ipa2_get_holb;
+ api_ctrl->ipa_set_tag_process_before_gating =
+ ipa2_set_tag_process_before_gating;
+ api_ctrl->ipa_cfg_ep_cfg = ipa2_cfg_ep_cfg;
+ api_ctrl->ipa_cfg_ep_metadata_mask = ipa2_cfg_ep_metadata_mask;
+ api_ctrl->ipa_cfg_ep_holb_by_client = ipa2_cfg_ep_holb_by_client;
+ api_ctrl->ipa_cfg_ep_ctrl = ipa2_cfg_ep_ctrl;
+ api_ctrl->ipa_add_hdr = ipa2_add_hdr;
+ api_ctrl->ipa_del_hdr = ipa2_del_hdr;
+ api_ctrl->ipa_commit_hdr = ipa2_commit_hdr;
+ api_ctrl->ipa_reset_hdr = ipa2_reset_hdr;
+ api_ctrl->ipa_get_hdr = ipa2_get_hdr;
+ api_ctrl->ipa_put_hdr = ipa2_put_hdr;
+ api_ctrl->ipa_copy_hdr = ipa2_copy_hdr;
+ api_ctrl->ipa_add_hdr_proc_ctx = ipa2_add_hdr_proc_ctx;
+ api_ctrl->ipa_del_hdr_proc_ctx = ipa2_del_hdr_proc_ctx;
+ api_ctrl->ipa_add_rt_rule = ipa2_add_rt_rule;
+ api_ctrl->ipa_del_rt_rule = ipa2_del_rt_rule;
+ api_ctrl->ipa_commit_rt = ipa2_commit_rt;
+ api_ctrl->ipa_reset_rt = ipa2_reset_rt;
+ api_ctrl->ipa_get_rt_tbl = ipa2_get_rt_tbl;
+ api_ctrl->ipa_put_rt_tbl = ipa2_put_rt_tbl;
+ api_ctrl->ipa_query_rt_index = ipa2_query_rt_index;
+ api_ctrl->ipa_mdfy_rt_rule = ipa2_mdfy_rt_rule;
+ api_ctrl->ipa_add_flt_rule = ipa2_add_flt_rule;
+ api_ctrl->ipa_del_flt_rule = ipa2_del_flt_rule;
+ api_ctrl->ipa_mdfy_flt_rule = ipa2_mdfy_flt_rule;
+ api_ctrl->ipa_commit_flt = ipa2_commit_flt;
+ api_ctrl->ipa_reset_flt = ipa2_reset_flt;
+ api_ctrl->allocate_nat_device = ipa2_allocate_nat_device;
+ api_ctrl->ipa_nat_init_cmd = ipa2_nat_init_cmd;
+ api_ctrl->ipa_nat_dma_cmd = ipa2_nat_dma_cmd;
+ api_ctrl->ipa_nat_del_cmd = ipa2_nat_del_cmd;
+ api_ctrl->ipa_send_msg = ipa2_send_msg;
+ api_ctrl->ipa_register_pull_msg = ipa2_register_pull_msg;
+ api_ctrl->ipa_deregister_pull_msg = ipa2_deregister_pull_msg;
+ api_ctrl->ipa_register_intf = ipa2_register_intf;
+ api_ctrl->ipa_register_intf_ext = ipa2_register_intf_ext;
+ api_ctrl->ipa_deregister_intf = ipa2_deregister_intf;
+ api_ctrl->ipa_set_aggr_mode = ipa2_set_aggr_mode;
+ api_ctrl->ipa_set_qcncm_ndp_sig = ipa2_set_qcncm_ndp_sig;
+ api_ctrl->ipa_set_single_ndp_per_mbim = ipa2_set_single_ndp_per_mbim;
+ api_ctrl->ipa_tx_dp = ipa2_tx_dp;
+ api_ctrl->ipa_tx_dp_mul = ipa2_tx_dp_mul;
+ api_ctrl->ipa_free_skb = ipa2_free_skb;
+ api_ctrl->ipa_setup_sys_pipe = ipa2_setup_sys_pipe;
+ api_ctrl->ipa_teardown_sys_pipe = ipa2_teardown_sys_pipe;
+ api_ctrl->ipa_sys_update_gsi_hdls = ipa2_sys_update_gsi_hdls;
+ api_ctrl->ipa_sys_setup = ipa2_sys_setup;
+ api_ctrl->ipa_sys_teardown = ipa2_sys_teardown;
+ api_ctrl->ipa_connect_wdi_pipe = ipa2_connect_wdi_pipe;
+ api_ctrl->ipa_disconnect_wdi_pipe = ipa2_disconnect_wdi_pipe;
+ api_ctrl->ipa_enable_wdi_pipe = ipa2_enable_wdi_pipe;
+ api_ctrl->ipa_disable_wdi_pipe = ipa2_disable_wdi_pipe;
+ api_ctrl->ipa_resume_wdi_pipe = ipa2_resume_wdi_pipe;
+ api_ctrl->ipa_suspend_wdi_pipe = ipa2_suspend_wdi_pipe;
+ api_ctrl->ipa_get_wdi_stats = ipa2_get_wdi_stats;
+ api_ctrl->ipa_get_smem_restr_bytes = ipa2_get_smem_restr_bytes;
+ api_ctrl->ipa_uc_wdi_get_dbpa = ipa2_uc_wdi_get_dbpa;
+ api_ctrl->ipa_uc_reg_rdyCB = ipa2_uc_reg_rdyCB;
+ api_ctrl->ipa_uc_dereg_rdyCB = ipa2_uc_dereg_rdyCB;
+ api_ctrl->ipa_create_wdi_mapping = ipa2_create_wdi_mapping;
+ api_ctrl->ipa_release_wdi_mapping = ipa2_release_wdi_mapping;
+ api_ctrl->teth_bridge_init = ipa2_teth_bridge_init;
+ api_ctrl->teth_bridge_disconnect = ipa2_teth_bridge_disconnect;
+ api_ctrl->teth_bridge_connect = ipa2_teth_bridge_connect;
+ api_ctrl->ipa_set_client = ipa2_set_client;
+ api_ctrl->ipa_get_client = ipa2_get_client;
+ api_ctrl->ipa_get_client_uplink = ipa2_get_client_uplink;
+ api_ctrl->ipa_dma_init = ipa2_dma_init;
+ api_ctrl->ipa_dma_enable = ipa2_dma_enable;
+ api_ctrl->ipa_dma_disable = ipa2_dma_disable;
+ api_ctrl->ipa_dma_sync_memcpy = ipa2_dma_sync_memcpy;
+ api_ctrl->ipa_dma_async_memcpy = ipa2_dma_async_memcpy;
+ api_ctrl->ipa_dma_uc_memcpy = ipa2_dma_uc_memcpy;
+ api_ctrl->ipa_dma_destroy = ipa2_dma_destroy;
+ api_ctrl->ipa_mhi_init_engine = ipa2_mhi_init_engine;
+ api_ctrl->ipa_connect_mhi_pipe = ipa2_connect_mhi_pipe;
+ api_ctrl->ipa_disconnect_mhi_pipe = ipa2_disconnect_mhi_pipe;
+ api_ctrl->ipa_uc_mhi_reset_channel = ipa2_uc_mhi_reset_channel;
+ api_ctrl->ipa_mhi_sps_channel_empty = ipa2_mhi_sps_channel_empty;
+ api_ctrl->ipa_generate_tag_process = ipa2_generate_tag_process;
+ api_ctrl->ipa_disable_sps_pipe = ipa2_disable_sps_pipe;
+ api_ctrl->ipa_qmi_enable_force_clear_datapath_send =
+ qmi_enable_force_clear_datapath_send;
+ api_ctrl->ipa_qmi_disable_force_clear_datapath_send =
+ qmi_disable_force_clear_datapath_send;
+ api_ctrl->ipa_mhi_reset_channel_internal =
+ ipa2_mhi_reset_channel_internal;
+ api_ctrl->ipa_mhi_start_channel_internal =
+ ipa2_mhi_start_channel_internal;
+ api_ctrl->ipa_mhi_resume_channels_internal =
+ ipa2_mhi_resume_channels_internal;
+ api_ctrl->ipa_uc_mhi_send_dl_ul_sync_info =
+ ipa2_uc_mhi_send_dl_ul_sync_info;
+ api_ctrl->ipa_uc_mhi_init = ipa2_uc_mhi_init;
+ api_ctrl->ipa_uc_mhi_suspend_channel = ipa2_uc_mhi_suspend_channel;
+ api_ctrl->ipa_uc_mhi_stop_event_update_channel =
+ ipa2_uc_mhi_stop_event_update_channel;
+ api_ctrl->ipa_uc_mhi_cleanup = ipa2_uc_mhi_cleanup;
+ api_ctrl->ipa_uc_mhi_print_stats = ipa2_uc_mhi_print_stats;
+ api_ctrl->ipa_uc_state_check = ipa2_uc_state_check;
+ api_ctrl->ipa_write_qmap_id = ipa2_write_qmap_id;
+ api_ctrl->ipa_add_interrupt_handler = ipa2_add_interrupt_handler;
+ api_ctrl->ipa_remove_interrupt_handler = ipa2_remove_interrupt_handler;
+ api_ctrl->ipa_restore_suspend_handler = ipa2_restore_suspend_handler;
+ api_ctrl->ipa_bam_reg_dump = ipa2_bam_reg_dump;
+ api_ctrl->ipa_get_ep_mapping = ipa2_get_ep_mapping;
+ api_ctrl->ipa_is_ready = ipa2_is_ready;
+ api_ctrl->ipa_proxy_clk_vote = ipa2_proxy_clk_vote;
+ api_ctrl->ipa_proxy_clk_unvote = ipa2_proxy_clk_unvote;
+ api_ctrl->ipa_is_client_handle_valid = ipa2_is_client_handle_valid;
+ api_ctrl->ipa_get_client_mapping = ipa2_get_client_mapping;
+ api_ctrl->ipa_get_rm_resource_from_ep = ipa2_get_rm_resource_from_ep;
+ api_ctrl->ipa_get_modem_cfg_emb_pipe_flt =
+ ipa2_get_modem_cfg_emb_pipe_flt;
+ api_ctrl->ipa_get_transport_type = ipa2_get_transport_type;
+ api_ctrl->ipa_ap_suspend = ipa2_ap_suspend;
+ api_ctrl->ipa_ap_resume = ipa2_ap_resume;
+ api_ctrl->ipa_get_smmu_domain = ipa2_get_smmu_domain;
+ api_ctrl->ipa_disable_apps_wan_cons_deaggr =
+ ipa2_disable_apps_wan_cons_deaggr;
+ api_ctrl->ipa_get_dma_dev = ipa2_get_dma_dev;
+ api_ctrl->ipa_get_gsi_ep_info = ipa2_get_gsi_ep_info;
+ api_ctrl->ipa_stop_gsi_channel = ipa2_stop_gsi_channel;
+ api_ctrl->ipa_register_ipa_ready_cb = ipa2_register_ipa_ready_cb;
+ api_ctrl->ipa_inc_client_enable_clks = ipa2_inc_client_enable_clks;
+ api_ctrl->ipa_dec_client_disable_clks = ipa2_dec_client_disable_clks;
+ api_ctrl->ipa_inc_client_enable_clks_no_block =
+ ipa2_inc_client_enable_clks_no_block;
+ api_ctrl->ipa_suspend_resource_no_block =
+ ipa2_suspend_resource_no_block;
+ api_ctrl->ipa_resume_resource = ipa2_resume_resource;
+ api_ctrl->ipa_suspend_resource_sync = ipa2_suspend_resource_sync;
+ api_ctrl->ipa_set_required_perf_profile =
+ ipa2_set_required_perf_profile;
+ api_ctrl->ipa_get_ipc_logbuf = ipa2_get_ipc_logbuf;
+ api_ctrl->ipa_get_ipc_logbuf_low = ipa2_get_ipc_logbuf_low;
+ api_ctrl->ipa_rx_poll = ipa2_rx_poll;
+ api_ctrl->ipa_recycle_wan_skb = ipa2_recycle_wan_skb;
+ api_ctrl->ipa_setup_uc_ntn_pipes = ipa2_setup_uc_ntn_pipes;
+ api_ctrl->ipa_tear_down_uc_offload_pipes =
+ ipa2_tear_down_uc_offload_pipes;
+
+ return 0;
+}
+
+/**
+ * ipa_get_sys_yellow_wm()- Return yellow WM value for IPA SYS pipes.
+ *
+ * Return value: IPA_YELLOW_MARKER_SYS_CFG_OFST register if IPA_HW_v2.6L,
+ * IPA_DEFAULT_SYS_YELLOW_WM otherwise.
+ */
+u32 ipa_get_sys_yellow_wm(struct ipa_sys_context *sys)
+{
+ if (ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) {
+ return ipa_read_reg(ipa_ctx->mmio,
+ IPA_YELLOW_MARKER_SYS_CFG_OFST);
+ } else {
+ if (!sys)
+ return 0;
+
+ return IPA_DEFAULT_SYS_YELLOW_WM * sys->rx_buff_sz;
+ }
+}
+EXPORT_SYMBOL(ipa_get_sys_yellow_wm);
+
+void ipa_suspend_apps_pipes(bool suspend)
+{
+ struct ipa_ep_cfg_ctrl cfg;
+ int ipa_ep_idx;
+ u32 lan_empty = 0, wan_empty = 0;
+ int ret;
+ struct sps_event_notify notify;
+ struct ipa_ep_context *ep;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.ipa_ep_suspend = suspend;
+
+ ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+ ep = &ipa_ctx->ep[ipa_ep_idx];
+ if (ep->valid) {
+ ipa2_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+ /* Check if the pipes are empty. */
+ ret = sps_is_pipe_empty(ep->ep_hdl, &lan_empty);
+ if (ret) {
+ IPAERR("%s: sps_is_pipe_empty failed with %d\n",
+ __func__, ret);
+ }
+ if (!lan_empty) {
+ IPADBG("LAN Cons is not-empty. Enter poll mode.\n");
+ notify.user = ep->sys;
+ notify.event_id = SPS_EVENT_EOT;
+ if (ep->sys->sps_callback)
+ ep->sys->sps_callback(¬ify);
+ }
+ }
+
+ ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+ /* Considering the case for SSR. */
+ if (ipa_ep_idx == -1) {
+ IPADBG("Invalid client.\n");
+ return;
+ }
+ ep = &ipa_ctx->ep[ipa_ep_idx];
+ if (ep->valid) {
+ ipa2_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+ /* Check if the pipes are empty. */
+ ret = sps_is_pipe_empty(ep->ep_hdl, &wan_empty);
+ if (ret) {
+ IPAERR("%s: sps_is_pipe_empty failed with %d\n",
+ __func__, ret);
+ }
+ if (!wan_empty) {
+ IPADBG("WAN Cons is not-empty. Enter poll mode.\n");
+ notify.user = ep->sys;
+ notify.event_id = SPS_EVENT_EOT;
+ if (ep->sys->sps_callback)
+ ep->sys->sps_callback(¬ify);
+ }
+ }
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
new file mode 100644
index 0000000..41f339a
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -0,0 +1,2892 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * WWAN Transport Network Driver.
+ */
+
+#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_device.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <net/pkt_sched.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+#include "ipa_qmi_service.h"
+#include <linux/rmnet_ipa_fd_ioctl.h>
+#include <linux/ipa.h>
+#include <uapi/linux/net_map.h>
+
+#include "ipa_trace.h"
+
+#define WWAN_METADATA_SHFT 24
+#define WWAN_METADATA_MASK 0xFF000000
+#define WWAN_DATA_LEN 2000
+#define IPA_RM_INACTIVITY_TIMER 100 /* IPA_RM */
+#define HEADROOM_FOR_QMAP 8 /* for mux header */
+#define TAILROOM 0 /* for padding by mux layer */
+#define MAX_NUM_OF_MUX_CHANNEL 10 /* max mux channels */
+#define UL_FILTER_RULE_HANDLE_START 69
+#define DEFAULT_OUTSTANDING_HIGH_CTL 96
+#define DEFAULT_OUTSTANDING_HIGH 64
+#define DEFAULT_OUTSTANDING_LOW 32
+
+#define IPA_WWAN_DEV_NAME "rmnet_ipa%d"
+#define IPA_WWAN_DEVICE_COUNT (1)
+
+#define IPA_WWAN_RX_SOFTIRQ_THRESH 16
+
+#define INVALID_MUX_ID 0xFF
+#define IPA_QUOTA_REACH_ALERT_MAX_SIZE 64
+#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64
+#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
+
+#define NAPI_WEIGHT 60
+
+static struct net_device *ipa_netdevs[IPA_WWAN_DEVICE_COUNT];
+static struct ipa_sys_connect_params apps_to_ipa_ep_cfg, ipa_to_apps_ep_cfg;
+static u32 qmap_hdr_hdl, dflt_v4_wan_rt_hdl, dflt_v6_wan_rt_hdl;
+static struct rmnet_mux_val mux_channel[MAX_NUM_OF_MUX_CHANNEL];
+static int num_q6_rule, old_num_q6_rule;
+static int rmnet_index;
+static bool egress_set, a7_ul_flt_set;
+static struct workqueue_struct *ipa_rm_q6_workqueue; /* IPA_RM workqueue*/
+static atomic_t is_initialized;
+static atomic_t is_ssr;
+static void *subsys_notify_handle;
+
+u32 apps_to_ipa_hdl, ipa_to_apps_hdl; /* get handler from ipa */
+static struct mutex ipa_to_apps_pipe_handle_guard;
+static int wwan_add_ul_flt_rule_to_ipa(void);
+static int wwan_del_ul_flt_rule_to_ipa(void);
+static void ipa_wwan_msg_free_cb(void*, u32, u32);
+static void ipa_rmnet_rx_cb(void *priv);
+static int ipa_rmnet_poll(struct napi_struct *napi, int budget);
+
+static void wake_tx_queue(struct work_struct *work);
+static DECLARE_WORK(ipa_tx_wakequeue_work, wake_tx_queue);
+
+static void tethering_stats_poll_queue(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa_tether_stats_poll_wakequeue_work,
+ tethering_stats_poll_queue);
+
+enum wwan_device_status {
+ WWAN_DEVICE_INACTIVE = 0,
+ WWAN_DEVICE_ACTIVE = 1
+};
+
+struct ipa_rmnet_plat_drv_res {
+ bool ipa_rmnet_ssr;
+ bool ipa_loaduC;
+ bool ipa_advertise_sg_support;
+ bool ipa_napi_enable;
+};
+
+static struct ipa_rmnet_plat_drv_res ipa_rmnet_res;
+/**
+ * struct wwan_private - WWAN private data
+ * @net: network interface struct implemented by this driver
+ * @stats: iface statistics
+ * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed
+ * @outstanding_high: number of outstanding packets allowed
+ * @outstanding_low: number of outstanding packets which shall cause
+ * @ch_id: channel id
+ * @lock: spinlock for mutual exclusion
+ * @device_status: holds device status
+ *
+ * WWAN private - holds all relevant info about WWAN driver
+ */
+struct wwan_private {
+ struct net_device *net;
+ struct net_device_stats stats;
+ atomic_t outstanding_pkts;
+ int outstanding_high_ctl;
+ int outstanding_high;
+ int outstanding_low;
+ uint32_t ch_id;
+ spinlock_t lock;
+ struct completion resource_granted_completion;
+ enum wwan_device_status device_status;
+ struct napi_struct napi;
+};
+
+/**
+* ipa_setup_a7_qmap_hdr() - Setup default a7 qmap hdr
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate memory
+* -EPERM: failed to add the tables
+*/
+static int ipa_setup_a7_qmap_hdr(void)
+{
+ struct ipa_ioc_add_hdr *hdr;
+ struct ipa_hdr_add *hdr_entry;
+ u32 pyld_sz;
+ int ret;
+
+ /* install the basic exception header */
+ pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
+ sizeof(struct ipa_hdr_add);
+ hdr = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!hdr) {
+ IPAWANERR("fail to alloc exception hdr\n");
+ return -ENOMEM;
+ }
+ hdr->num_hdrs = 1;
+ hdr->commit = 1;
+ hdr_entry = &hdr->hdr[0];
+
+ strlcpy(hdr_entry->name, IPA_A7_QMAP_HDR_NAME,
+ IPA_RESOURCE_NAME_MAX);
+ hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
+
+ if (ipa2_add_hdr(hdr)) {
+ IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ if (hdr_entry->status) {
+ IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+ qmap_hdr_hdl = hdr_entry->hdr_hdl;
+
+ ret = 0;
+bail:
+ kfree(hdr);
+ return ret;
+}
+
+static void ipa_del_a7_qmap_hdr(void)
+{
+ struct ipa_ioc_del_hdr *del_hdr;
+ struct ipa_hdr_del *hdl_entry;
+ u32 pyld_sz;
+ int ret;
+
+ pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
+ sizeof(struct ipa_hdr_del);
+ del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!del_hdr) {
+ IPAWANERR("fail to alloc exception hdr_del\n");
+ return;
+ }
+
+ del_hdr->commit = 1;
+ del_hdr->num_hdls = 1;
+ hdl_entry = &del_hdr->hdl[0];
+ hdl_entry->hdl = qmap_hdr_hdl;
+
+ ret = ipa2_del_hdr(del_hdr);
+ if (ret || hdl_entry->status)
+ IPAWANERR("ipa2_del_hdr failed\n");
+ else
+ IPAWANDBG("hdrs deletion done\n");
+
+ qmap_hdr_hdl = 0;
+ kfree(del_hdr);
+}
+
+static void ipa_del_qmap_hdr(uint32_t hdr_hdl)
+{
+ struct ipa_ioc_del_hdr *del_hdr;
+ struct ipa_hdr_del *hdl_entry;
+ u32 pyld_sz;
+ int ret;
+
+ if (hdr_hdl == 0) {
+ IPAWANERR("Invalid hdr_hdl provided\n");
+ return;
+ }
+
+ pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
+ sizeof(struct ipa_hdr_del);
+ del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!del_hdr) {
+ IPAWANERR("fail to alloc exception hdr_del\n");
+ return;
+ }
+
+ del_hdr->commit = 1;
+ del_hdr->num_hdls = 1;
+ hdl_entry = &del_hdr->hdl[0];
+ hdl_entry->hdl = hdr_hdl;
+
+ ret = ipa2_del_hdr(del_hdr);
+ if (ret || hdl_entry->status)
+ IPAWANERR("ipa2_del_hdr failed\n");
+ else
+ IPAWANDBG("header deletion done\n");
+
+ qmap_hdr_hdl = 0;
+ kfree(del_hdr);
+}
+
+static void ipa_del_mux_qmap_hdrs(void)
+{
+ int index;
+
+ for (index = 0; index < rmnet_index; index++) {
+ ipa_del_qmap_hdr(mux_channel[index].hdr_hdl);
+ mux_channel[index].hdr_hdl = 0;
+ }
+}
+
+static int ipa_add_qmap_hdr(uint32_t mux_id, uint32_t *hdr_hdl)
+{
+ struct ipa_ioc_add_hdr *hdr;
+ struct ipa_hdr_add *hdr_entry;
+ char hdr_name[IPA_RESOURCE_NAME_MAX];
+ u32 pyld_sz;
+ int ret;
+
+ pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
+ sizeof(struct ipa_hdr_add);
+ hdr = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!hdr) {
+ IPAWANERR("fail to alloc exception hdr\n");
+ return -ENOMEM;
+ }
+ hdr->num_hdrs = 1;
+ hdr->commit = 1;
+ hdr_entry = &hdr->hdr[0];
+
+ snprintf(hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
+ A2_MUX_HDR_NAME_V4_PREF,
+ mux_id);
+ strlcpy(hdr_entry->name, hdr_name,
+ IPA_RESOURCE_NAME_MAX);
+
+ hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
+ hdr_entry->hdr[1] = (uint8_t) mux_id;
+ IPAWANDBG("header (%s) with mux-id: (%d)\n",
+ hdr_name,
+ hdr_entry->hdr[1]);
+ if (ipa2_add_hdr(hdr)) {
+ IPAWANERR("fail to add IPA_QMAP hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ if (hdr_entry->status) {
+ IPAWANERR("fail to add IPA_QMAP hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+ *hdr_hdl = hdr_entry->hdr_hdl;
+bail:
+ kfree(hdr);
+ return ret;
+}
+
+/**
+* ipa_setup_dflt_wan_rt_tables() - Setup default wan routing tables
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate memory
+* -EPERM: failed to add the tables
+*/
+static int ipa_setup_dflt_wan_rt_tables(void)
+{
+ struct ipa_ioc_add_rt_rule *rt_rule;
+ struct ipa_rt_rule_add *rt_rule_entry;
+
+ rt_rule =
+ kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
+ sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
+ if (!rt_rule) {
+ IPAWANERR("fail to alloc mem\n");
+ return -ENOMEM;
+ }
+ /* setup a default v4 route to point to Apps */
+ rt_rule->num_rules = 1;
+ rt_rule->commit = 1;
+ rt_rule->ip = IPA_IP_v4;
+ strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_WAN_RT_TBL_NAME,
+ IPA_RESOURCE_NAME_MAX);
+
+ rt_rule_entry = &rt_rule->rules[0];
+ rt_rule_entry->at_rear = 1;
+ rt_rule_entry->rule.dst = IPA_CLIENT_APPS_WAN_CONS;
+ rt_rule_entry->rule.hdr_hdl = qmap_hdr_hdl;
+
+ if (ipa2_add_rt_rule(rt_rule)) {
+ IPAWANERR("fail to add dflt_wan v4 rule\n");
+ kfree(rt_rule);
+ return -EPERM;
+ }
+
+ IPAWANDBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+ dflt_v4_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
+
+ /* setup a default v6 route to point to A5 */
+ rt_rule->ip = IPA_IP_v6;
+ if (ipa2_add_rt_rule(rt_rule)) {
+ IPAWANERR("fail to add dflt_wan v6 rule\n");
+ kfree(rt_rule);
+ return -EPERM;
+ }
+ IPAWANDBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+ dflt_v6_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
+
+ kfree(rt_rule);
+ return 0;
+}
+
+static void ipa_del_dflt_wan_rt_tables(void)
+{
+ struct ipa_ioc_del_rt_rule *rt_rule;
+ struct ipa_rt_rule_del *rt_rule_entry;
+ int len;
+
+ len = sizeof(struct ipa_ioc_del_rt_rule) + 1 *
+ sizeof(struct ipa_rt_rule_del);
+ rt_rule = kzalloc(len, GFP_KERNEL);
+ if (!rt_rule) {
+ IPAWANERR("unable to allocate memory for del route rule\n");
+ return;
+ }
+
+ memset(rt_rule, 0, len);
+ rt_rule->commit = 1;
+ rt_rule->num_hdls = 1;
+ rt_rule->ip = IPA_IP_v4;
+
+ rt_rule_entry = &rt_rule->hdl[0];
+ rt_rule_entry->status = -1;
+ rt_rule_entry->hdl = dflt_v4_wan_rt_hdl;
+
+ IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
+ rt_rule_entry->hdl, IPA_IP_v4);
+ if (ipa2_del_rt_rule(rt_rule) ||
+ (rt_rule_entry->status)) {
+ IPAWANERR("Routing rule deletion failed!\n");
+ }
+
+ rt_rule->ip = IPA_IP_v6;
+ rt_rule_entry->hdl = dflt_v6_wan_rt_hdl;
+ IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
+ rt_rule_entry->hdl, IPA_IP_v6);
+ if (ipa2_del_rt_rule(rt_rule) ||
+ (rt_rule_entry->status)) {
+ IPAWANERR("Routing rule deletion failed!\n");
+ }
+
+ kfree(rt_rule);
+}
+
+int copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
+ *rule_req, uint32_t *rule_hdl)
+{
+ int i, j;
+
+ if (rule_req->filter_spec_list_valid == true) {
+ num_q6_rule = rule_req->filter_spec_list_len;
+ IPAWANDBG("Received (%d) install_flt_req\n", num_q6_rule);
+ } else {
+ num_q6_rule = 0;
+ IPAWANERR("got no UL rules from modem\n");
+ return -EINVAL;
+ }
+
+ /* copy UL filter rules from Modem*/
+ for (i = 0; i < num_q6_rule; i++) {
+ /* check if rules overside the cache*/
+ if (i == MAX_NUM_Q6_RULE) {
+ IPAWANERR("Reaching (%d) max cache ",
+ MAX_NUM_Q6_RULE);
+ IPAWANERR(" however total (%d)\n",
+ num_q6_rule);
+ goto failure;
+ }
+ /* construct UL_filter_rule handler QMI use-cas */
+ ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl =
+ UL_FILTER_RULE_HANDLE_START + i;
+ rule_hdl[i] = ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].ip =
+ rule_req->filter_spec_list[i].ip_type;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].action =
+ rule_req->filter_spec_list[i].filter_action;
+ if (rule_req->filter_spec_list[i].is_routing_table_index_valid
+ == true)
+ ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx =
+ rule_req->filter_spec_list[i].route_table_index;
+ if (rule_req->filter_spec_list[i].is_mux_id_valid == true)
+ ipa_qmi_ctx->q6_ul_filter_rule[i].mux_id =
+ rule_req->filter_spec_list[i].mux_id;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.rule_eq_bitmap =
+ rule_req->filter_spec_list[i].filter_rule.
+ rule_eq_bitmap;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq_present =
+ rule_req->filter_spec_list[i].filter_rule.
+ tos_eq_present;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq =
+ rule_req->filter_spec_list[i].filter_rule.tos_eq;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ protocol_eq_present = rule_req->filter_spec_list[i].
+ filter_rule.protocol_eq_present;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.protocol_eq =
+ rule_req->filter_spec_list[i].filter_rule.
+ protocol_eq;
+
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ num_ihl_offset_range_16 = rule_req->filter_spec_list[i].
+ filter_rule.num_ihl_offset_range_16;
+ for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ num_ihl_offset_range_16; j++) {
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_range_16[j].offset = rule_req->
+ filter_spec_list[i].filter_rule.
+ ihl_offset_range_16[j].offset;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_range_16[j].range_low = rule_req->
+ filter_spec_list[i].filter_rule.
+ ihl_offset_range_16[j].range_low;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_range_16[j].range_high = rule_req->
+ filter_spec_list[i].filter_rule.
+ ihl_offset_range_16[j].range_high;
+ }
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_32 =
+ rule_req->filter_spec_list[i].filter_rule.
+ num_offset_meq_32;
+ for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ num_offset_meq_32; j++) {
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ offset_meq_32[j].offset = rule_req->filter_spec_list[i].
+ filter_rule.offset_meq_32[j].offset;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ offset_meq_32[j].mask = rule_req->filter_spec_list[i].
+ filter_rule.offset_meq_32[j].mask;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ offset_meq_32[j].value = rule_req->filter_spec_list[i].
+ filter_rule.offset_meq_32[j].value;
+ }
+
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq_present =
+ rule_req->filter_spec_list[i].filter_rule.tc_eq_present;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq =
+ rule_req->filter_spec_list[i].filter_rule.tc_eq;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq_present =
+ rule_req->filter_spec_list[i].filter_rule.
+ flow_eq_present;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq =
+ rule_req->filter_spec_list[i].filter_rule.flow_eq;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_eq_16_present = rule_req->filter_spec_list[i].
+ filter_rule.ihl_offset_eq_16_present;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_eq_16.offset = rule_req->filter_spec_list[i].
+ filter_rule.ihl_offset_eq_16.offset;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_eq_16.value = rule_req->filter_spec_list[i].
+ filter_rule.ihl_offset_eq_16.value;
+
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_eq_32_present = rule_req->filter_spec_list[i].
+ filter_rule.ihl_offset_eq_32_present;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_eq_32.offset = rule_req->filter_spec_list[i].
+ filter_rule.ihl_offset_eq_32.offset;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_eq_32.value = rule_req->filter_spec_list[i].
+ filter_rule.ihl_offset_eq_32.value;
+
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ num_ihl_offset_meq_32 = rule_req->filter_spec_list[i].
+ filter_rule.num_ihl_offset_meq_32;
+ for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].
+ eq_attrib.num_ihl_offset_meq_32; j++) {
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_meq_32[j].offset = rule_req->
+ filter_spec_list[i].filter_rule.
+ ihl_offset_meq_32[j].offset;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_meq_32[j].mask = rule_req->
+ filter_spec_list[i].filter_rule.
+ ihl_offset_meq_32[j].mask;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_meq_32[j].value = rule_req->
+ filter_spec_list[i].filter_rule.
+ ihl_offset_meq_32[j].value;
+ }
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_128 =
+ rule_req->filter_spec_list[i].filter_rule.
+ num_offset_meq_128;
+ for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ num_offset_meq_128; j++) {
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ offset_meq_128[j].offset = rule_req->
+ filter_spec_list[i].filter_rule.
+ offset_meq_128[j].offset;
+ memcpy(ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ offset_meq_128[j].mask,
+ rule_req->filter_spec_list[i].
+ filter_rule.offset_meq_128[j].mask, 16);
+ memcpy(ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ offset_meq_128[j].value, rule_req->
+ filter_spec_list[i].filter_rule.
+ offset_meq_128[j].value, 16);
+ }
+
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ metadata_meq32_present = rule_req->filter_spec_list[i].
+ filter_rule.metadata_meq32_present;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ metadata_meq32.offset = rule_req->filter_spec_list[i].
+ filter_rule.metadata_meq32.offset;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ metadata_meq32.mask = rule_req->filter_spec_list[i].
+ filter_rule.metadata_meq32.mask;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.metadata_meq32.
+ value = rule_req->filter_spec_list[i].filter_rule.
+ metadata_meq32.value;
+ ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ipv4_frag_eq_present = rule_req->filter_spec_list[i].
+ filter_rule.ipv4_frag_eq_present;
+ }
+
+ if (rule_req->xlat_filter_indices_list_valid) {
+ if (rule_req->xlat_filter_indices_list_len > num_q6_rule) {
+ IPAWANERR("Number of xlat indices is not valid: %d\n",
+ rule_req->xlat_filter_indices_list_len);
+ goto failure;
+ }
+ IPAWANDBG("Receive %d XLAT indices: ",
+ rule_req->xlat_filter_indices_list_len);
+ for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++)
+ IPAWANDBG("%d ", rule_req->xlat_filter_indices_list[i]);
+ IPAWANDBG("\n");
+
+ for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) {
+ if (rule_req->xlat_filter_indices_list[i]
+ >= num_q6_rule) {
+ IPAWANERR("Xlat rule idx is wrong: %d\n",
+ rule_req->xlat_filter_indices_list[i]);
+ goto failure;
+ } else {
+ ipa_qmi_ctx->q6_ul_filter_rule
+ [rule_req->xlat_filter_indices_list[i]]
+ .is_xlat_rule = 1;
+ IPAWANDBG("Rule %d is xlat rule\n",
+ rule_req->xlat_filter_indices_list[i]);
+ }
+ }
+ }
+ goto success;
+
+failure:
+ num_q6_rule = 0;
+ memset(ipa_qmi_ctx->q6_ul_filter_rule, 0,
+ sizeof(ipa_qmi_ctx->q6_ul_filter_rule));
+ return -EINVAL;
+
+success:
+ return 0;
+}
+
+static int wwan_add_ul_flt_rule_to_ipa(void)
+{
+ u32 pyld_sz;
+ int i, retval = 0;
+ int num_v4_rule = 0, num_v6_rule = 0;
+ struct ipa_ioc_add_flt_rule *param;
+ struct ipa_flt_rule_add flt_rule_entry;
+ struct ipa_fltr_installed_notif_req_msg_v01 *req;
+
+ if (ipa_qmi_ctx == NULL) {
+ IPAWANERR("ipa_qmi_ctx is NULL!\n");
+ return -EFAULT;
+ }
+
+ pyld_sz = sizeof(struct ipa_ioc_add_flt_rule) +
+ sizeof(struct ipa_flt_rule_add);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param)
+ return -ENOMEM;
+
+ req = (struct ipa_fltr_installed_notif_req_msg_v01 *)
+ kzalloc(sizeof(struct ipa_fltr_installed_notif_req_msg_v01),
+ GFP_KERNEL);
+ if (!req) {
+ kfree(param);
+ return -ENOMEM;
+ }
+
+ param->commit = 1;
+ param->ep = IPA_CLIENT_APPS_LAN_WAN_PROD;
+ param->global = false;
+ param->num_rules = (uint8_t)1;
+
+ mutex_lock(&ipa_qmi_lock);
+ for (i = 0; i < num_q6_rule; i++) {
+ param->ip = ipa_qmi_ctx->q6_ul_filter_rule[i].ip;
+ memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_add));
+ flt_rule_entry.at_rear = true;
+ flt_rule_entry.rule.action =
+ ipa_qmi_ctx->q6_ul_filter_rule[i].action;
+ flt_rule_entry.rule.rt_tbl_idx
+ = ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx;
+ flt_rule_entry.rule.retain_hdr = true;
+
+ /* debug rt-hdl*/
+ IPAWANDBG("install-IPA index(%d),rt-tbl:(%d)\n",
+ i, flt_rule_entry.rule.rt_tbl_idx);
+ flt_rule_entry.rule.eq_attrib_type = true;
+ memcpy(&(flt_rule_entry.rule.eq_attrib),
+ &ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib,
+ sizeof(struct ipa_ipfltri_rule_eq));
+ memcpy(&(param->rules[0]), &flt_rule_entry,
+ sizeof(struct ipa_flt_rule_add));
+ if (ipa2_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
+ retval = -EFAULT;
+ IPAWANERR("add A7 UL filter rule(%d) failed\n", i);
+ } else {
+ /* store the rule handler */
+ ipa_qmi_ctx->q6_ul_filter_rule_hdl[i] =
+ param->rules[0].flt_rule_hdl;
+ }
+ }
+ mutex_unlock(&ipa_qmi_lock);
+
+ /* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/
+ req->source_pipe_index =
+ ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
+ req->install_status = QMI_RESULT_SUCCESS_V01;
+ req->filter_index_list_len = num_q6_rule;
+ mutex_lock(&ipa_qmi_lock);
+ for (i = 0; i < num_q6_rule; i++) {
+ if (ipa_qmi_ctx->q6_ul_filter_rule[i].ip == IPA_IP_v4) {
+ req->filter_index_list[i].filter_index = num_v4_rule;
+ num_v4_rule++;
+ } else {
+ req->filter_index_list[i].filter_index = num_v6_rule;
+ num_v6_rule++;
+ }
+ req->filter_index_list[i].filter_handle =
+ ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl;
+ }
+ mutex_unlock(&ipa_qmi_lock);
+ if (qmi_filter_notify_send(req)) {
+ IPAWANDBG("add filter rule index on A7-RX failed\n");
+ retval = -EFAULT;
+ }
+ old_num_q6_rule = num_q6_rule;
+ IPAWANDBG("add (%d) filter rule index on A7-RX\n",
+ old_num_q6_rule);
+ kfree(param);
+ kfree(req);
+ return retval;
+}
+
+static int wwan_del_ul_flt_rule_to_ipa(void)
+{
+ u32 pyld_sz;
+ int i, retval = 0;
+ struct ipa_ioc_del_flt_rule *param;
+ struct ipa_flt_rule_del flt_rule_entry;
+
+ pyld_sz = sizeof(struct ipa_ioc_del_flt_rule) +
+ sizeof(struct ipa_flt_rule_del);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ IPAWANERR("kzalloc failed\n");
+ return -ENOMEM;
+ }
+
+ param->commit = 1;
+ param->num_hdls = (uint8_t) 1;
+
+ for (i = 0; i < old_num_q6_rule; i++) {
+ param->ip = ipa_qmi_ctx->q6_ul_filter_rule[i].ip;
+ memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_del));
+ flt_rule_entry.hdl = ipa_qmi_ctx->q6_ul_filter_rule_hdl[i];
+ /* debug rt-hdl*/
+ IPAWANDBG("delete-IPA rule index(%d)\n", i);
+ memcpy(&(param->hdl[0]), &flt_rule_entry,
+ sizeof(struct ipa_flt_rule_del));
+ if (ipa2_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
+ IPAWANERR("del A7 UL filter rule(%d) failed\n", i);
+ kfree(param);
+ return -EFAULT;
+ }
+ }
+
+ /* set UL filter-rule add-indication */
+ a7_ul_flt_set = false;
+ old_num_q6_rule = 0;
+
+ kfree(param);
+ return retval;
+}
+
+static int find_mux_channel_index(uint32_t mux_id)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
+ if (mux_id == mux_channel[i].mux_id)
+ return i;
+ }
+ return MAX_NUM_OF_MUX_CHANNEL;
+}
+
+static int find_vchannel_name_index(const char *vchannel_name)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
+ if (strcmp(mux_channel[i].vchannel_name, vchannel_name == 0))
+ return i;
+ }
+ return MAX_NUM_OF_MUX_CHANNEL;
+}
+
+static int wwan_register_to_ipa(int index)
+{
+ struct ipa_tx_intf tx_properties = {0};
+ struct ipa_ioc_tx_intf_prop tx_ioc_properties[2] = { {0}, {0} };
+ struct ipa_ioc_tx_intf_prop *tx_ipv4_property;
+ struct ipa_ioc_tx_intf_prop *tx_ipv6_property;
+ struct ipa_rx_intf rx_properties = {0};
+ struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
+ struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
+ struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
+ struct ipa_ext_intf ext_properties = {0};
+ struct ipa_ioc_ext_intf_prop *ext_ioc_properties;
+ u32 pyld_sz;
+ int ret = 0, i;
+
+ IPAWANDBG("index(%d) device[%s]:\n", index,
+ mux_channel[index].vchannel_name);
+ if (!mux_channel[index].mux_hdr_set) {
+ ret = ipa_add_qmap_hdr(mux_channel[index].mux_id,
+ &mux_channel[index].hdr_hdl);
+ if (ret) {
+ IPAWANERR("ipa_add_mux_hdr failed (%d)\n", index);
+ return ret;
+ }
+ mux_channel[index].mux_hdr_set = true;
+ }
+ tx_properties.prop = tx_ioc_properties;
+ tx_ipv4_property = &tx_properties.prop[0];
+ tx_ipv4_property->ip = IPA_IP_v4;
+ tx_ipv4_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
+ snprintf(tx_ipv4_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
+ A2_MUX_HDR_NAME_V4_PREF,
+ mux_channel[index].mux_id);
+ tx_ipv6_property = &tx_properties.prop[1];
+ tx_ipv6_property->ip = IPA_IP_v6;
+ tx_ipv6_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
+ /* no need use A2_MUX_HDR_NAME_V6_PREF, same header */
+ snprintf(tx_ipv6_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
+ A2_MUX_HDR_NAME_V4_PREF,
+ mux_channel[index].mux_id);
+ tx_properties.num_props = 2;
+
+ rx_properties.prop = rx_ioc_properties;
+ rx_ipv4_property = &rx_properties.prop[0];
+ rx_ipv4_property->ip = IPA_IP_v4;
+ rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
+ rx_ipv4_property->attrib.meta_data =
+ mux_channel[index].mux_id << WWAN_METADATA_SHFT;
+ rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
+ rx_ipv4_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
+ rx_ipv6_property = &rx_properties.prop[1];
+ rx_ipv6_property->ip = IPA_IP_v6;
+ rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
+ rx_ipv6_property->attrib.meta_data =
+ mux_channel[index].mux_id << WWAN_METADATA_SHFT;
+ rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
+ rx_ipv6_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
+ rx_properties.num_props = 2;
+
+ pyld_sz = num_q6_rule *
+ sizeof(struct ipa_ioc_ext_intf_prop);
+ ext_ioc_properties = kmalloc(pyld_sz, GFP_KERNEL);
+ if (!ext_ioc_properties) {
+ IPAWANERR("Error allocate memory\n");
+ return -ENOMEM;
+ }
+
+ ext_properties.prop = ext_ioc_properties;
+ ext_properties.excp_pipe_valid = true;
+ ext_properties.excp_pipe = IPA_CLIENT_APPS_WAN_CONS;
+ ext_properties.num_props = num_q6_rule;
+ for (i = 0; i < num_q6_rule; i++) {
+ memcpy(&(ext_properties.prop[i]),
+ &(ipa_qmi_ctx->q6_ul_filter_rule[i]),
+ sizeof(struct ipa_ioc_ext_intf_prop));
+ ext_properties.prop[i].mux_id = mux_channel[index].mux_id;
+ IPAWANDBG("index %d ip: %d rt-tbl:%d\n", i,
+ ext_properties.prop[i].ip,
+ ext_properties.prop[i].rt_tbl_idx);
+ IPAWANDBG("action: %d mux:%d\n",
+ ext_properties.prop[i].action,
+ ext_properties.prop[i].mux_id);
+ }
+ ret = ipa2_register_intf_ext(mux_channel[index].
+ vchannel_name, &tx_properties,
+ &rx_properties, &ext_properties);
+ if (ret) {
+ IPAWANERR("[%s]:ipa2_register_intf failed %d\n",
+ mux_channel[index].vchannel_name, ret);
+ goto fail;
+ }
+ mux_channel[index].ul_flt_reg = true;
+fail:
+ kfree(ext_ioc_properties);
+ return ret;
+}
+
+static void ipa_cleanup_deregister_intf(void)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < rmnet_index; i++) {
+ if (mux_channel[i].ul_flt_reg) {
+ ret = ipa2_deregister_intf(
+ mux_channel[i].vchannel_name);
+ if (ret < 0) {
+ IPAWANERR("de-register device %s(%d) failed\n",
+ mux_channel[i].vchannel_name,
+ i);
+ return;
+ }
+ IPAWANDBG("de-register device %s(%d) success\n",
+ mux_channel[i].vchannel_name,
+ i);
+ }
+ mux_channel[i].ul_flt_reg = false;
+ }
+}
+
+int wwan_update_mux_channel_prop(void)
+{
+ int ret = 0, i;
+ /* install UL filter rules */
+ if (egress_set) {
+ if (ipa_qmi_ctx &&
+ ipa_qmi_ctx->modem_cfg_emb_pipe_flt == false) {
+ IPAWANDBG("setup UL filter rules\n");
+ if (a7_ul_flt_set) {
+ IPAWANDBG("del previous UL filter rules\n");
+ /* delete rule hdlers */
+ ret = wwan_del_ul_flt_rule_to_ipa();
+ if (ret) {
+ IPAWANERR("failed to del old rules\n");
+ return -EINVAL;
+ }
+ IPAWANDBG("deleted old UL rules\n");
+ }
+ ret = wwan_add_ul_flt_rule_to_ipa();
+ }
+ if (ret)
+ IPAWANERR("failed to install UL rules\n");
+ else
+ a7_ul_flt_set = true;
+ }
+ /* update Tx/Rx/Ext property */
+ IPAWANDBG("update Tx/Rx/Ext property in IPA\n");
+ if (rmnet_index == 0) {
+ IPAWANDBG("no Tx/Rx/Ext property registered in IPA\n");
+ return ret;
+ }
+
+ ipa_cleanup_deregister_intf();
+
+ for (i = 0; i < rmnet_index; i++) {
+ ret = wwan_register_to_ipa(i);
+ if (ret < 0) {
+ IPAWANERR("failed to re-regist %s, mux %d, index %d\n",
+ mux_channel[i].vchannel_name,
+ mux_channel[i].mux_id,
+ i);
+ return -ENODEV;
+ }
+ IPAWANERR("dev(%s) has registered to IPA\n",
+ mux_channel[i].vchannel_name);
+ mux_channel[i].ul_flt_reg = true;
+ }
+ return ret;
+}
+
+#ifdef INIT_COMPLETION
+#define reinit_completion(x) INIT_COMPLETION(*(x))
+#endif /* INIT_COMPLETION */
+
+static int __ipa_wwan_open(struct net_device *dev)
+{
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+
+ IPAWANDBG("[%s] __wwan_open()\n", dev->name);
+ if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE)
+ reinit_completion(&wwan_ptr->resource_granted_completion);
+ wwan_ptr->device_status = WWAN_DEVICE_ACTIVE;
+
+ if (ipa_rmnet_res.ipa_napi_enable)
+ napi_enable(&(wwan_ptr->napi));
+ return 0;
+}
+
+/**
+ * wwan_open() - Opens the wwan network interface. Opens logical
+ * channel on A2 MUX driver and starts the network stack queue
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * -ENODEV: Error while opening logical channel on A2 MUX driver
+ */
+static int ipa_wwan_open(struct net_device *dev)
+{
+ int rc = 0;
+
+ IPAWANDBG("[%s] wwan_open()\n", dev->name);
+ rc = __ipa_wwan_open(dev);
+ if (rc == 0)
+ netif_start_queue(dev);
+ return rc;
+}
+
+static int __ipa_wwan_close(struct net_device *dev)
+{
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+ int rc = 0;
+
+ if (wwan_ptr->device_status == WWAN_DEVICE_ACTIVE) {
+ wwan_ptr->device_status = WWAN_DEVICE_INACTIVE;
+ /* do not close wwan port once up, this causes
+ * remote side to hang if tried to open again
+ */
+ reinit_completion(&wwan_ptr->resource_granted_completion);
+ if (ipa_rmnet_res.ipa_napi_enable)
+ napi_disable(&(wwan_ptr->napi));
+ rc = ipa2_deregister_intf(dev->name);
+ if (rc) {
+ IPAWANERR("[%s]: ipa2_deregister_intf failed %d\n",
+ dev->name, rc);
+ return rc;
+ }
+ return rc;
+ } else {
+ return -EBADF;
+ }
+}
+
+/**
+ * ipa_wwan_stop() - Stops the wwan network interface. Closes
+ * logical channel on A2 MUX driver and stops the network stack
+ * queue
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * -ENODEV: Error while opening logical channel on A2 MUX driver
+ */
+static int ipa_wwan_stop(struct net_device *dev)
+{
+ IPAWANDBG("[%s] ipa_wwan_stop()\n", dev->name);
+ __ipa_wwan_close(dev);
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static int ipa_wwan_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (0 > new_mtu || WWAN_DATA_LEN < new_mtu)
+ return -EINVAL;
+ IPAWANDBG("[%s] MTU change: old=%d new=%d\n",
+ dev->name, dev->mtu, new_mtu);
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+/**
+ * ipa_wwan_xmit() - Transmits an skb.
+ *
+ * @skb: skb to be transmitted
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
+ * later
+ * -EFAULT: Error while transmitting the skb
+ */
+static int ipa_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ int ret = 0;
+ bool qmap_check;
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+ struct ipa_tx_meta meta;
+
+ if (skb->protocol != htons(ETH_P_MAP)) {
+ IPAWANDBG
+ ("SW filtering out none QMAP packet received from %s",
+ current->comm);
+ return NETDEV_TX_OK;
+ }
+
+ qmap_check = RMNET_MAP_GET_CD_BIT(skb);
+ if (netif_queue_stopped(dev)) {
+ if (qmap_check &&
+ atomic_read(&wwan_ptr->outstanding_pkts) <
+ wwan_ptr->outstanding_high_ctl) {
+ pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name);
+ goto send;
+ } else {
+ pr_err("[%s]fatal: ipa_wwan_xmit stopped\n", dev->name);
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ /* checking High WM hit */
+ if (atomic_read(&wwan_ptr->outstanding_pkts) >=
+ wwan_ptr->outstanding_high) {
+ if (!qmap_check) {
+ IPAWANDBG("pending(%d)/(%d)- stop(%d), qmap_chk(%d)\n",
+ atomic_read(&wwan_ptr->outstanding_pkts),
+ wwan_ptr->outstanding_high,
+ netif_queue_stopped(dev),
+ qmap_check);
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+send:
+ /* IPA_RM checking start */
+ ret = ipa_rm_inactivity_timer_request_resource(
+ IPA_RM_RESOURCE_WWAN_0_PROD);
+ if (ret == -EINPROGRESS) {
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+ if (ret) {
+ pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
+ dev->name, ret);
+ return -EFAULT;
+ }
+ /* IPA_RM checking end */
+
+ if (qmap_check) {
+ memset(&meta, 0, sizeof(meta));
+ meta.pkt_init_dst_ep_valid = true;
+ meta.pkt_init_dst_ep_remote = true;
+ ret = ipa2_tx_dp(IPA_CLIENT_Q6_LAN_CONS, skb, &meta);
+ } else {
+ ret = ipa2_tx_dp(IPA_CLIENT_APPS_LAN_WAN_PROD, skb, NULL);
+ }
+
+ if (ret) {
+ ret = NETDEV_TX_BUSY;
+ dev->stats.tx_dropped++;
+ goto out;
+ }
+
+ atomic_inc(&wwan_ptr->outstanding_pkts);
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ ret = NETDEV_TX_OK;
+out:
+ ipa_rm_inactivity_timer_release_resource(
+ IPA_RM_RESOURCE_WWAN_0_PROD);
+ return ret;
+}
+
+static void ipa_wwan_tx_timeout(struct net_device *dev)
+{
+ IPAWANERR("[%s] ipa_wwan_tx_timeout(), data stall in UL\n", dev->name);
+}
+
+/**
+ * apps_ipa_tx_complete_notify() - Rx notify
+ *
+ * @priv: driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * Check that the packet is the one we sent and release it
+ * This function will be called in defered context in IPA wq.
+ */
+static void apps_ipa_tx_complete_notify(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ struct sk_buff *skb = (struct sk_buff *)data;
+ struct net_device *dev = (struct net_device *)priv;
+ struct wwan_private *wwan_ptr;
+
+ if (dev != ipa_netdevs[0]) {
+ IPAWANDBG("Received pre-SSR packet completion\n");
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if (evt != IPA_WRITE_DONE) {
+ IPAWANERR("unsupported evt on Tx callback, Drop the packet\n");
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return;
+ }
+
+ wwan_ptr = netdev_priv(dev);
+ atomic_dec(&wwan_ptr->outstanding_pkts);
+ __netif_tx_lock_bh(netdev_get_tx_queue(dev, 0));
+ if (!atomic_read(&is_ssr) &&
+ netif_queue_stopped(wwan_ptr->net) &&
+ atomic_read(&wwan_ptr->outstanding_pkts) <
+ (wwan_ptr->outstanding_low)) {
+ IPAWANDBG("Outstanding low (%d) - wake up queue\n",
+ wwan_ptr->outstanding_low);
+ netif_wake_queue(wwan_ptr->net);
+ }
+ __netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0));
+ dev_kfree_skb_any(skb);
+ ipa_rm_inactivity_timer_release_resource(
+ IPA_RM_RESOURCE_WWAN_0_PROD);
+}
+
+/**
+ * apps_ipa_packet_receive_notify() - Rx notify
+ *
+ * @priv: driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * IPA will pass a packet to the Linux network stack with skb->data
+ */
+static void apps_ipa_packet_receive_notify(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)priv;
+
+ if (evt == IPA_RECEIVE) {
+ struct sk_buff *skb = (struct sk_buff *)data;
+ int result;
+ unsigned int packet_len = skb->len;
+
+ IPAWANDBG("Rx packet was received");
+ skb->dev = ipa_netdevs[0];
+ skb->protocol = htons(ETH_P_MAP);
+
+ if (ipa_rmnet_res.ipa_napi_enable) {
+ trace_rmnet_ipa_netif_rcv_skb(dev->stats.rx_packets);
+ result = netif_receive_skb(skb);
+ } else {
+ if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH
+ == 0) {
+ trace_rmnet_ipa_netifni(dev->stats.rx_packets);
+ result = netif_rx_ni(skb);
+ } else {
+ trace_rmnet_ipa_netifrx(dev->stats.rx_packets);
+ result = netif_rx(skb);
+ }
+ }
+
+ if (result) {
+ pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n",
+ __func__, __LINE__);
+ dev->stats.rx_dropped++;
+ }
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += packet_len;
+ } else if (evt == IPA_CLIENT_START_POLL)
+ ipa_rmnet_rx_cb(priv);
+ else if (evt == IPA_CLIENT_COMP_NAPI) {
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+
+ if (ipa_rmnet_res.ipa_napi_enable)
+ napi_complete(&(wwan_ptr->napi));
+ } else
+ IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt);
+
+}
+
+/**
+ * ipa_wwan_ioctl() - I/O control for wwan network driver.
+ *
+ * @dev: network device
+ * @ifr: ignored
+ * @cmd: cmd to be excecuded. can be one of the following:
+ * IPA_WWAN_IOCTL_OPEN - Open the network interface
+ * IPA_WWAN_IOCTL_CLOSE - Close the network interface
+ *
+ * Return codes:
+ * 0: success
+ * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
+ * later
+ * -EFAULT: Error while transmitting the skb
+ */
+static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ int rc = 0;
+ int mru = 1000, epid = 1, mux_index, len;
+ struct ipa_msg_meta msg_meta;
+ struct ipa_wan_msg *wan_msg = NULL;
+ struct rmnet_ioctl_extended_s extend_ioctl_data;
+ struct rmnet_ioctl_data_s ioctl_data;
+
+ IPAWANDBG("rmnet_ipa got ioctl number 0x%08x", cmd);
+ switch (cmd) {
+ /* Set Ethernet protocol */
+ case RMNET_IOCTL_SET_LLP_ETHERNET:
+ break;
+ /* Set RAWIP protocol */
+ case RMNET_IOCTL_SET_LLP_IP:
+ break;
+ /* Get link protocol */
+ case RMNET_IOCTL_GET_LLP:
+ ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+ sizeof(struct rmnet_ioctl_data_s)))
+ rc = -EFAULT;
+ break;
+ /* Set QoS header enabled */
+ case RMNET_IOCTL_SET_QOS_ENABLE:
+ return -EINVAL;
+ /* Set QoS header disabled */
+ case RMNET_IOCTL_SET_QOS_DISABLE:
+ break;
+ /* Get QoS header state */
+ case RMNET_IOCTL_GET_QOS:
+ ioctl_data.u.operation_mode = RMNET_MODE_NONE;
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+ sizeof(struct rmnet_ioctl_data_s)))
+ rc = -EFAULT;
+ break;
+ /* Get operation mode */
+ case RMNET_IOCTL_GET_OPMODE:
+ ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+ sizeof(struct rmnet_ioctl_data_s)))
+ rc = -EFAULT;
+ break;
+ /* Open transport port */
+ case RMNET_IOCTL_OPEN:
+ break;
+ /* Close transport port */
+ case RMNET_IOCTL_CLOSE:
+ break;
+ /* Flow enable */
+ case RMNET_IOCTL_FLOW_ENABLE:
+ IPAWANDBG("Received flow enable\n");
+ if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
+ sizeof(struct rmnet_ioctl_data_s))) {
+ rc = -EFAULT;
+ break;
+ }
+ ipa_flow_control(IPA_CLIENT_USB_PROD, true,
+ ioctl_data.u.tcm_handle);
+ break;
+ /* Flow disable */
+ case RMNET_IOCTL_FLOW_DISABLE:
+ IPAWANDBG("Received flow disable\n");
+ if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
+ sizeof(struct rmnet_ioctl_data_s))) {
+ rc = -EFAULT;
+ break;
+ }
+ ipa_flow_control(IPA_CLIENT_USB_PROD, false,
+ ioctl_data.u.tcm_handle);
+ break;
+ /* Set flow handle */
+ case RMNET_IOCTL_FLOW_SET_HNDL:
+ break;
+
+ /* Extended IOCTLs */
+ case RMNET_IOCTL_EXTENDED:
+ IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n");
+ if (copy_from_user(&extend_ioctl_data,
+ (u8 *)ifr->ifr_ifru.ifru_data,
+ sizeof(struct rmnet_ioctl_extended_s))) {
+ IPAWANERR("failed to copy extended ioctl data\n");
+ rc = -EFAULT;
+ break;
+ }
+ switch (extend_ioctl_data.extended_ioctl) {
+ /* Get features */
+ case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
+ IPAWANDBG("get RMNET_IOCTL_GET_SUPPORTED_FEATURES\n");
+ extend_ioctl_data.u.data =
+ (RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL |
+ RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT |
+ RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT);
+ if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+ &extend_ioctl_data,
+ sizeof(struct rmnet_ioctl_extended_s)))
+ rc = -EFAULT;
+ break;
+ /* Set MRU */
+ case RMNET_IOCTL_SET_MRU:
+ mru = extend_ioctl_data.u.data;
+ IPAWANDBG("get MRU size %d\n",
+ extend_ioctl_data.u.data);
+ break;
+ /* Get MRU */
+ case RMNET_IOCTL_GET_MRU:
+ extend_ioctl_data.u.data = mru;
+ if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+ &extend_ioctl_data,
+ sizeof(struct rmnet_ioctl_extended_s)))
+ rc = -EFAULT;
+ break;
+ /* GET SG support */
+ case RMNET_IOCTL_GET_SG_SUPPORT:
+ extend_ioctl_data.u.data =
+ ipa_rmnet_res.ipa_advertise_sg_support;
+ if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+ &extend_ioctl_data,
+ sizeof(struct rmnet_ioctl_extended_s)))
+ rc = -EFAULT;
+ break;
+ /* Get endpoint ID */
+ case RMNET_IOCTL_GET_EPID:
+ IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EPID\n");
+ extend_ioctl_data.u.data = epid;
+ if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+ &extend_ioctl_data,
+ sizeof(struct rmnet_ioctl_extended_s)))
+ rc = -EFAULT;
+ if (copy_from_user(&extend_ioctl_data,
+ (u8 *)ifr->ifr_ifru.ifru_data,
+ sizeof(struct rmnet_ioctl_extended_s))) {
+ IPAWANERR("copy extended ioctl data failed\n");
+ rc = -EFAULT;
+ break;
+ }
+ IPAWANDBG("RMNET_IOCTL_GET_EPID return %d\n",
+ extend_ioctl_data.u.data);
+ break;
+ /* Endpoint pair */
+ case RMNET_IOCTL_GET_EP_PAIR:
+ IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EP_PAIR\n");
+ extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num =
+ ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
+ extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num =
+ ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+ if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+ &extend_ioctl_data,
+ sizeof(struct rmnet_ioctl_extended_s)))
+ rc = -EFAULT;
+ if (copy_from_user(&extend_ioctl_data,
+ (u8 *)ifr->ifr_ifru.ifru_data,
+ sizeof(struct rmnet_ioctl_extended_s))) {
+ IPAWANERR("copy extended ioctl data failed\n");
+ rc = -EFAULT;
+ break;
+ }
+ IPAWANDBG("RMNET_IOCTL_GET_EP_PAIR c: %d p: %d\n",
+ extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num,
+ extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num);
+ break;
+ /* Get driver name */
+ case RMNET_IOCTL_GET_DRIVER_NAME:
+ memcpy(&extend_ioctl_data.u.if_name,
+ ipa_netdevs[0]->name,
+ sizeof(IFNAMSIZ));
+ if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+ &extend_ioctl_data,
+ sizeof(struct rmnet_ioctl_extended_s)))
+ rc = -EFAULT;
+ break;
+ /* Add MUX ID */
+ case RMNET_IOCTL_ADD_MUX_CHANNEL:
+ mux_index = find_mux_channel_index(
+ extend_ioctl_data.u.rmnet_mux_val.mux_id);
+ if (mux_index < MAX_NUM_OF_MUX_CHANNEL) {
+ IPAWANDBG("already setup mux(%d)\n",
+ extend_ioctl_data.u.
+ rmnet_mux_val.mux_id);
+ return rc;
+ }
+ if (rmnet_index >= MAX_NUM_OF_MUX_CHANNEL) {
+ IPAWANERR("Exceed mux_channel limit(%d)\n",
+ rmnet_index);
+ return -EFAULT;
+ }
+ IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
+ extend_ioctl_data.u.rmnet_mux_val.mux_id,
+ extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
+ /* cache the mux name and id */
+ mux_channel[rmnet_index].mux_id =
+ extend_ioctl_data.u.rmnet_mux_val.mux_id;
+ memcpy(mux_channel[rmnet_index].vchannel_name,
+ extend_ioctl_data.u.rmnet_mux_val.vchannel_name,
+ sizeof(mux_channel[rmnet_index].vchannel_name));
+ IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n",
+ mux_channel[rmnet_index].vchannel_name,
+ mux_channel[rmnet_index].mux_id,
+ rmnet_index);
+ /* check if UL filter rules coming*/
+ if (num_q6_rule != 0) {
+ IPAWANERR("dev(%s) register to IPA\n",
+ extend_ioctl_data.u.rmnet_mux_val.
+ vchannel_name);
+ rc = wwan_register_to_ipa(rmnet_index);
+ if (rc < 0) {
+ IPAWANERR("device %s reg IPA failed\n",
+ extend_ioctl_data.u.
+ rmnet_mux_val.vchannel_name);
+ return -ENODEV;
+ }
+ mux_channel[rmnet_index].mux_channel_set = true;
+ mux_channel[rmnet_index].ul_flt_reg = true;
+ } else {
+ IPAWANDBG("dev(%s) haven't registered to IPA\n",
+ extend_ioctl_data.u.
+ rmnet_mux_val.vchannel_name);
+ mux_channel[rmnet_index].mux_channel_set = true;
+ mux_channel[rmnet_index].ul_flt_reg = false;
+ }
+ rmnet_index++;
+ break;
+ case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
+ IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n");
+ if ((extend_ioctl_data.u.data) &
+ RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM) {
+ apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 8;
+ apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
+ cs_offload_en =
+ IPA_ENABLE_CS_OFFLOAD_UL;
+ apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
+ cs_metadata_hdr_offset = 1;
+ } else {
+ apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
+ }
+ if ((extend_ioctl_data.u.data) &
+ RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION)
+ apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en =
+ IPA_ENABLE_AGGR;
+ else
+ apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en =
+ IPA_BYPASS_AGGR;
+ apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.
+ hdr_ofst_metadata_valid = 1;
+ /* modem want offset at 0! */
+ apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 0;
+ apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.dst =
+ IPA_CLIENT_APPS_LAN_WAN_PROD;
+ apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.mode = IPA_BASIC;
+
+ apps_to_ipa_ep_cfg.client =
+ IPA_CLIENT_APPS_LAN_WAN_PROD;
+ apps_to_ipa_ep_cfg.notify =
+ apps_ipa_tx_complete_notify;
+ apps_to_ipa_ep_cfg.desc_fifo_sz =
+ IPA_SYS_TX_DATA_DESC_FIFO_SZ;
+ apps_to_ipa_ep_cfg.priv = dev;
+
+ rc = ipa2_setup_sys_pipe(&apps_to_ipa_ep_cfg,
+ &apps_to_ipa_hdl);
+ if (rc)
+ IPAWANERR("failed to config egress endpoint\n");
+
+ if (num_q6_rule != 0) {
+ /* already got Q6 UL filter rules*/
+ if (ipa_qmi_ctx &&
+ ipa_qmi_ctx->modem_cfg_emb_pipe_flt
+ == false)
+ rc = wwan_add_ul_flt_rule_to_ipa();
+ else
+ rc = 0;
+ egress_set = true;
+ if (rc)
+ IPAWANERR("install UL rules failed\n");
+ else
+ a7_ul_flt_set = true;
+ } else {
+ /* wait Q6 UL filter rules*/
+ egress_set = true;
+ IPAWANDBG("no UL-rules, egress_set(%d)\n",
+ egress_set);
+ }
+ break;
+ case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/* Set IDF */
+ IPAWANDBG("get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
+ if ((extend_ioctl_data.u.data) &
+ RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.cfg.
+ cs_offload_en =
+ IPA_ENABLE_CS_OFFLOAD_DL;
+
+ if ((extend_ioctl_data.u.data) &
+ RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
+ IPAWANERR("get AGG size %d count %d\n",
+ extend_ioctl_data.u.
+ ingress_format.agg_size,
+ extend_ioctl_data.u.
+ ingress_format.agg_count);
+ if (!ipa_disable_apps_wan_cons_deaggr(
+ extend_ioctl_data.u.
+ ingress_format.agg_size,
+ extend_ioctl_data.
+ u.ingress_format.agg_count)) {
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.
+ aggr_byte_limit = extend_ioctl_data.
+ u.ingress_format.agg_size;
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.
+ aggr_pkt_limit = extend_ioctl_data.
+ u.ingress_format.agg_count;
+ }
+ }
+
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
+ hdr_ofst_metadata_valid = 1;
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.
+ hdr.hdr_ofst_metadata = 1;
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
+ hdr_ofst_pkt_size_valid = 1;
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
+ hdr_ofst_pkt_size = 2;
+
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
+ hdr_total_len_or_pad_valid = true;
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
+ hdr_total_len_or_pad = 0;
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
+ hdr_payload_len_inc_padding = true;
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
+ hdr_total_len_or_pad_offset = 0;
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
+ hdr_little_endian = 0;
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.metadata_mask.
+ metadata_mask = 0xFF000000;
+
+ ipa_to_apps_ep_cfg.client = IPA_CLIENT_APPS_WAN_CONS;
+ ipa_to_apps_ep_cfg.notify =
+ apps_ipa_packet_receive_notify;
+ ipa_to_apps_ep_cfg.priv = dev;
+
+ ipa_to_apps_ep_cfg.napi_enabled =
+ ipa_rmnet_res.ipa_napi_enable;
+ if (ipa_to_apps_ep_cfg.napi_enabled)
+ ipa_to_apps_ep_cfg.desc_fifo_sz =
+ IPA_WAN_CONS_DESC_FIFO_SZ;
+ else
+ ipa_to_apps_ep_cfg.desc_fifo_sz =
+ IPA_SYS_DESC_FIFO_SZ;
+
+ mutex_lock(&ipa_to_apps_pipe_handle_guard);
+ if (atomic_read(&is_ssr)) {
+ IPAWANDBG("In SSR sequence/recovery\n");
+ mutex_unlock(&ipa_to_apps_pipe_handle_guard);
+ rc = -EFAULT;
+ break;
+ }
+ rc = ipa2_setup_sys_pipe(
+ &ipa_to_apps_ep_cfg, &ipa_to_apps_hdl);
+ mutex_unlock(&ipa_to_apps_pipe_handle_guard);
+ if (rc)
+ IPAWANERR("failed to configure ingress\n");
+ break;
+ case RMNET_IOCTL_SET_XLAT_DEV_INFO:
+ wan_msg = kzalloc(sizeof(struct ipa_wan_msg),
+ GFP_KERNEL);
+ if (!wan_msg) {
+ IPAWANERR("Failed to allocate memory.\n");
+ return -ENOMEM;
+ }
+ len = sizeof(wan_msg->upstream_ifname) >
+ sizeof(extend_ioctl_data.u.if_name) ?
+ sizeof(extend_ioctl_data.u.if_name) :
+ sizeof(wan_msg->upstream_ifname);
+ strlcpy(wan_msg->upstream_ifname,
+ extend_ioctl_data.u.if_name, len);
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ msg_meta.msg_type = WAN_XLAT_CONNECT;
+ msg_meta.msg_len = sizeof(struct ipa_wan_msg);
+ rc = ipa2_send_msg(&msg_meta, wan_msg,
+ ipa_wwan_msg_free_cb);
+ if (rc) {
+ IPAWANERR("Failed to send XLAT_CONNECT msg\n");
+ kfree(wan_msg);
+ }
+ break;
+ /* Get agg count */
+ case RMNET_IOCTL_GET_AGGREGATION_COUNT:
+ break;
+ /* Set agg count */
+ case RMNET_IOCTL_SET_AGGREGATION_COUNT:
+ break;
+ /* Get agg size */
+ case RMNET_IOCTL_GET_AGGREGATION_SIZE:
+ break;
+ /* Set agg size */
+ case RMNET_IOCTL_SET_AGGREGATION_SIZE:
+ break;
+ /* Do flow control */
+ case RMNET_IOCTL_FLOW_CONTROL:
+ break;
+ /* For legacy use */
+ case RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL:
+ break;
+ /* Get HW/SW map */
+ case RMNET_IOCTL_GET_HWSW_MAP:
+ break;
+ /* Set RX Headroom */
+ case RMNET_IOCTL_SET_RX_HEADROOM:
+ break;
+ default:
+ IPAWANERR("[%s] unsupported extended cmd[%d]",
+ dev->name,
+ extend_ioctl_data.extended_ioctl);
+ rc = -EINVAL;
+ }
+ break;
+ default:
+ IPAWANERR("[%s] unsupported cmd[%d]",
+ dev->name, cmd);
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+static const struct net_device_ops ipa_wwan_ops_ip = {
+ .ndo_open = ipa_wwan_open,
+ .ndo_stop = ipa_wwan_stop,
+ .ndo_start_xmit = ipa_wwan_xmit,
+ .ndo_tx_timeout = ipa_wwan_tx_timeout,
+ .ndo_do_ioctl = ipa_wwan_ioctl,
+ .ndo_change_mtu = ipa_wwan_change_mtu,
+ .ndo_set_mac_address = 0,
+ .ndo_validate_addr = 0,
+};
+
+/**
+ * wwan_setup() - Setups the wwan network driver.
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * None
+ */
+
+static void ipa_wwan_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &ipa_wwan_ops_ip;
+ ether_setup(dev);
+ /* set this after calling ether_setup */
+ dev->header_ops = 0; /* No header */
+ dev->type = ARPHRD_RAWIP;
+ dev->hard_header_len = 0;
+ dev->mtu = WWAN_DATA_LEN;
+ dev->addr_len = 0;
+ dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ dev->needed_headroom = HEADROOM_FOR_QMAP;
+ dev->needed_tailroom = TAILROOM;
+ dev->watchdog_timeo = 1000;
+}
+
+/* IPA_RM related functions start*/
+static void q6_prod_rm_request_resource(struct work_struct *work);
+static DECLARE_DELAYED_WORK(q6_con_rm_request, q6_prod_rm_request_resource);
+static void q6_prod_rm_release_resource(struct work_struct *work);
+static DECLARE_DELAYED_WORK(q6_con_rm_release, q6_prod_rm_release_resource);
+
+static void q6_prod_rm_request_resource(struct work_struct *work)
+{
+ int ret = 0;
+
+ ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD);
+ if (ret < 0 && ret != -EINPROGRESS) {
+ IPAWANERR("%s: ipa_rm_request_resource failed %d\n", __func__,
+ ret);
+ return;
+ }
+}
+
+static int q6_rm_request_resource(void)
+{
+ queue_delayed_work(ipa_rm_q6_workqueue,
+ &q6_con_rm_request, 0);
+ return 0;
+}
+
+static void q6_prod_rm_release_resource(struct work_struct *work)
+{
+ int ret = 0;
+
+ ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD);
+ if (ret < 0 && ret != -EINPROGRESS) {
+ IPAWANERR("%s: ipa_rm_release_resource failed %d\n", __func__,
+ ret);
+ return;
+ }
+}
+
+
+static int q6_rm_release_resource(void)
+{
+ queue_delayed_work(ipa_rm_q6_workqueue,
+ &q6_con_rm_release, 0);
+ return 0;
+}
+
+
+static void q6_rm_notify_cb(void *user_data,
+ enum ipa_rm_event event,
+ unsigned long data)
+{
+ switch (event) {
+ case IPA_RM_RESOURCE_GRANTED:
+ IPAWANDBG("%s: Q6_PROD GRANTED CB\n", __func__);
+ break;
+ case IPA_RM_RESOURCE_RELEASED:
+ IPAWANDBG("%s: Q6_PROD RELEASED CB\n", __func__);
+ break;
+ default:
+ return;
+ }
+}
+static int q6_initialize_rm(void)
+{
+ struct ipa_rm_create_params create_params;
+ struct ipa_rm_perf_profile profile;
+ int result;
+
+ /* Initialize IPA_RM workqueue */
+ ipa_rm_q6_workqueue = create_singlethread_workqueue("clnt_req");
+ if (!ipa_rm_q6_workqueue)
+ return -ENOMEM;
+
+ memset(&create_params, 0, sizeof(create_params));
+ create_params.name = IPA_RM_RESOURCE_Q6_PROD;
+ create_params.reg_params.notify_cb = &q6_rm_notify_cb;
+ result = ipa_rm_create_resource(&create_params);
+ if (result)
+ goto create_rsrc_err1;
+ memset(&create_params, 0, sizeof(create_params));
+ create_params.name = IPA_RM_RESOURCE_Q6_CONS;
+ create_params.release_resource = &q6_rm_release_resource;
+ create_params.request_resource = &q6_rm_request_resource;
+ result = ipa_rm_create_resource(&create_params);
+ if (result)
+ goto create_rsrc_err2;
+ /* add dependency*/
+ result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_APPS_CONS);
+ if (result)
+ goto add_dpnd_err;
+ /* setup Performance profile */
+ memset(&profile, 0, sizeof(profile));
+ profile.max_supported_bandwidth_mbps = 100;
+ result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
+ &profile);
+ if (result)
+ goto set_perf_err;
+ result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS,
+ &profile);
+ if (result)
+ goto set_perf_err;
+ return result;
+
+set_perf_err:
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_APPS_CONS);
+add_dpnd_err:
+ result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
+ if (result < 0)
+ IPAWANERR("Error deleting resource %d, ret=%d\n",
+ IPA_RM_RESOURCE_Q6_CONS, result);
+create_rsrc_err2:
+ result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
+ if (result < 0)
+ IPAWANERR("Error deleting resource %d, ret=%d\n",
+ IPA_RM_RESOURCE_Q6_PROD, result);
+create_rsrc_err1:
+ destroy_workqueue(ipa_rm_q6_workqueue);
+ return result;
+}
+
+void q6_deinitialize_rm(void)
+{
+ int ret;
+
+ ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_APPS_CONS);
+ if (ret < 0)
+ IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
+ IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS,
+ ret);
+ ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
+ if (ret < 0)
+ IPAWANERR("Error deleting resource %d, ret=%d\n",
+ IPA_RM_RESOURCE_Q6_CONS, ret);
+ ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
+ if (ret < 0)
+ IPAWANERR("Error deleting resource %d, ret=%d\n",
+ IPA_RM_RESOURCE_Q6_PROD, ret);
+ destroy_workqueue(ipa_rm_q6_workqueue);
+}
+
+static void wake_tx_queue(struct work_struct *work)
+{
+ if (ipa_netdevs[0]) {
+ __netif_tx_lock_bh(netdev_get_tx_queue(ipa_netdevs[0], 0));
+ netif_wake_queue(ipa_netdevs[0]);
+ __netif_tx_unlock_bh(netdev_get_tx_queue(ipa_netdevs[0], 0));
+ }
+}
+
+/**
+ * ipa_rm_resource_granted() - Called upon
+ * IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped.
+ *
+ * @work: work object supplied ny workqueue
+ *
+ * Return codes:
+ * None
+ */
+static void ipa_rm_resource_granted(void *dev)
+{
+ IPAWANDBG("Resource Granted - starting queue\n");
+ schedule_work(&ipa_tx_wakequeue_work);
+}
+
+/**
+ * ipa_rm_notify() - Callback function for RM events. Handles
+ * IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events.
+ * IPA_RM_RESOURCE_GRANTED is handled in the context of shared
+ * workqueue.
+ *
+ * @dev: network device
+ * @event: IPA RM event
+ * @data: Additional data provided by IPA RM
+ *
+ * Return codes:
+ * None
+ */
+static void ipa_rm_notify(void *dev, enum ipa_rm_event event,
+ unsigned long data)
+{
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+
+ pr_debug("%s: event %d\n", __func__, event);
+ switch (event) {
+ case IPA_RM_RESOURCE_GRANTED:
+ if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) {
+ complete_all(&wwan_ptr->resource_granted_completion);
+ break;
+ }
+ ipa_rm_resource_granted(dev);
+ break;
+ case IPA_RM_RESOURCE_RELEASED:
+ break;
+ default:
+ pr_err("%s: unknown event %d\n", __func__, event);
+ break;
+ }
+}
+
+/* IPA_RM related functions end*/
+
+static int ssr_notifier_cb(struct notifier_block *this,
+ unsigned long code,
+ void *data);
+
+static struct notifier_block ssr_notifier = {
+ .notifier_call = ssr_notifier_cb,
+};
+
+static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
+ struct ipa_rmnet_plat_drv_res *ipa_rmnet_drv_res)
+{
+ ipa_rmnet_drv_res->ipa_rmnet_ssr =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,rmnet-ipa-ssr");
+ pr_info("IPA SSR support = %s\n",
+ ipa_rmnet_drv_res->ipa_rmnet_ssr ? "True" : "False");
+ ipa_rmnet_drv_res->ipa_loaduC =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,ipa-loaduC");
+ pr_info("IPA ipa-loaduC = %s\n",
+ ipa_rmnet_drv_res->ipa_loaduC ? "True" : "False");
+
+ ipa_rmnet_drv_res->ipa_advertise_sg_support =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,ipa-advertise-sg-support");
+ pr_info("IPA SG support = %s\n",
+ ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False");
+
+ ipa_rmnet_drv_res->ipa_napi_enable =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,ipa-napi-enable");
+ pr_info("IPA Napi Enable = %s\n",
+ ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
+ return 0;
+}
+
+struct ipa_rmnet_context ipa_rmnet_ctx;
+
+/**
+ * ipa_wwan_probe() - Initialized the module and registers as a
+ * network interface to the network stack
+ *
+ * Return codes:
+ * 0: success
+ * -ENOMEM: No memory available
+ * -EFAULT: Internal error
+ * -ENODEV: IPA driver not loaded
+ */
+static int ipa_wwan_probe(struct platform_device *pdev)
+{
+ int ret, i;
+ struct net_device *dev;
+ struct wwan_private *wwan_ptr;
+ struct ipa_rm_create_params ipa_rm_params; /* IPA_RM */
+ struct ipa_rm_perf_profile profile; /* IPA_RM */
+
+ pr_info("rmnet_ipa started initialization\n");
+
+ if (!ipa2_is_ready()) {
+ IPAWANERR("IPA driver not loaded\n");
+ return -ENODEV;
+ }
+
+ ret = get_ipa_rmnet_dts_configuration(pdev, &ipa_rmnet_res);
+ ipa_rmnet_ctx.ipa_rmnet_ssr = ipa_rmnet_res.ipa_rmnet_ssr;
+
+ ret = ipa_init_q6_smem();
+ if (ret) {
+ IPAWANERR("ipa_init_q6_smem failed!\n");
+ return ret;
+ }
+
+ /* initialize tx/rx enpoint setup */
+ memset(&apps_to_ipa_ep_cfg, 0, sizeof(struct ipa_sys_connect_params));
+ memset(&ipa_to_apps_ep_cfg, 0, sizeof(struct ipa_sys_connect_params));
+
+ /* initialize ex property setup */
+ num_q6_rule = 0;
+ old_num_q6_rule = 0;
+ rmnet_index = 0;
+ egress_set = false;
+ a7_ul_flt_set = false;
+ for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++)
+ memset(&mux_channel[i], 0, sizeof(struct rmnet_mux_val));
+
+ /* start A7 QMI service/client */
+ if (ipa_rmnet_res.ipa_loaduC)
+ /* Android platform loads uC */
+ ipa_qmi_service_init(QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01);
+ else
+ /* LE platform not loads uC */
+ ipa_qmi_service_init(QMI_IPA_PLATFORM_TYPE_LE_V01);
+
+ /* construct default WAN RT tbl for IPACM */
+ ret = ipa_setup_a7_qmap_hdr();
+ if (ret)
+ goto setup_a7_qmap_hdr_err;
+ ret = ipa_setup_dflt_wan_rt_tables();
+ if (ret)
+ goto setup_dflt_wan_rt_tables_err;
+
+ if (!atomic_read(&is_ssr)) {
+ /* Start transport-driver fd ioctl for ipacm for first init */
+ ret = wan_ioctl_init();
+ if (ret)
+ goto wan_ioctl_init_err;
+ } else {
+ /* Enable sending QMI messages after SSR */
+ wan_ioctl_enable_qmi_messages();
+ }
+
+ /* initialize wan-driver netdev */
+ dev = alloc_netdev(sizeof(struct wwan_private),
+ IPA_WWAN_DEV_NAME,
+ NET_NAME_UNKNOWN,
+ ipa_wwan_setup);
+ if (!dev) {
+ IPAWANERR("no memory for netdev\n");
+ ret = -ENOMEM;
+ goto alloc_netdev_err;
+ }
+ ipa_netdevs[0] = dev;
+ wwan_ptr = netdev_priv(dev);
+ memset(wwan_ptr, 0, sizeof(*wwan_ptr));
+ IPAWANDBG("wwan_ptr (private) = %p", wwan_ptr);
+ wwan_ptr->net = dev;
+ wwan_ptr->outstanding_high_ctl = DEFAULT_OUTSTANDING_HIGH_CTL;
+ wwan_ptr->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
+ wwan_ptr->outstanding_low = DEFAULT_OUTSTANDING_LOW;
+ atomic_set(&wwan_ptr->outstanding_pkts, 0);
+ spin_lock_init(&wwan_ptr->lock);
+ init_completion(&wwan_ptr->resource_granted_completion);
+
+ if (!atomic_read(&is_ssr)) {
+ /* IPA_RM configuration starts */
+ ret = q6_initialize_rm();
+ if (ret) {
+ IPAWANERR("%s: q6_initialize_rm failed, ret: %d\n",
+ __func__, ret);
+ goto q6_init_err;
+ }
+ }
+
+ memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params));
+ ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD;
+ ipa_rm_params.reg_params.user_data = dev;
+ ipa_rm_params.reg_params.notify_cb = ipa_rm_notify;
+ ret = ipa_rm_create_resource(&ipa_rm_params);
+ if (ret) {
+ pr_err("%s: unable to create resourse %d in IPA RM\n",
+ __func__, IPA_RM_RESOURCE_WWAN_0_PROD);
+ goto create_rsrc_err;
+ }
+ ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD,
+ IPA_RM_INACTIVITY_TIMER);
+ if (ret) {
+ pr_err("%s: ipa rm timer init failed %d on resourse %d\n",
+ __func__, ret, IPA_RM_RESOURCE_WWAN_0_PROD);
+ goto timer_init_err;
+ }
+ /* add dependency */
+ ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ if (ret)
+ goto add_dpnd_err;
+ /* setup Performance profile */
+ memset(&profile, 0, sizeof(profile));
+ profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
+ ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD,
+ &profile);
+ if (ret)
+ goto set_perf_err;
+ /* IPA_RM configuration ends */
+
+ /* Enable SG support in netdevice. */
+ if (ipa_rmnet_res.ipa_advertise_sg_support)
+ dev->hw_features |= NETIF_F_SG;
+
+ /* Enable NAPI support in netdevice. */
+ if (ipa_rmnet_res.ipa_napi_enable) {
+ netif_napi_add(dev, &(wwan_ptr->napi),
+ ipa_rmnet_poll, NAPI_WEIGHT);
+ }
+
+ ret = register_netdev(dev);
+ if (ret) {
+ IPAWANERR("unable to register ipa_netdev %d rc=%d\n",
+ 0, ret);
+ goto set_perf_err;
+ }
+
+ IPAWANDBG("IPA-WWAN devices (%s) initialization ok :>>>>\n",
+ ipa_netdevs[0]->name);
+ if (ret) {
+ IPAWANERR("default configuration failed rc=%d\n",
+ ret);
+ goto config_err;
+ }
+ atomic_set(&is_initialized, 1);
+ if (!atomic_read(&is_ssr)) {
+ /* offline charging mode */
+ ipa2_proxy_clk_unvote();
+ }
+ atomic_set(&is_ssr, 0);
+
+ pr_info("rmnet_ipa completed initialization\n");
+ return 0;
+config_err:
+ if (ipa_rmnet_res.ipa_napi_enable)
+ netif_napi_del(&(wwan_ptr->napi));
+ unregister_netdev(ipa_netdevs[0]);
+set_perf_err:
+ ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ if (ret)
+ IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
+ IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
+ ret);
+add_dpnd_err:
+ ret = ipa_rm_inactivity_timer_destroy(
+ IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */
+ if (ret)
+ IPAWANERR("Error ipa_rm_inactivity_timer_destroy %d, ret=%d\n",
+ IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+timer_init_err:
+ ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+ if (ret)
+ IPAWANERR("Error deleting resource %d, ret=%d\n",
+ IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+create_rsrc_err:
+ q6_deinitialize_rm();
+q6_init_err:
+ free_netdev(ipa_netdevs[0]);
+ ipa_netdevs[0] = NULL;
+alloc_netdev_err:
+ wan_ioctl_deinit();
+wan_ioctl_init_err:
+ ipa_del_dflt_wan_rt_tables();
+setup_dflt_wan_rt_tables_err:
+ ipa_del_a7_qmap_hdr();
+setup_a7_qmap_hdr_err:
+ ipa_qmi_service_exit();
+ atomic_set(&is_ssr, 0);
+ return ret;
+}
+
+static int ipa_wwan_remove(struct platform_device *pdev)
+{
+ int ret;
+ struct wwan_private *wwan_ptr;
+
+ wwan_ptr = netdev_priv(ipa_netdevs[0]);
+
+ pr_info("rmnet_ipa started deinitialization\n");
+ mutex_lock(&ipa_to_apps_pipe_handle_guard);
+ ret = ipa2_teardown_sys_pipe(ipa_to_apps_hdl);
+ if (ret < 0)
+ IPAWANERR("Failed to teardown IPA->APPS pipe\n");
+ else
+ ipa_to_apps_hdl = -1;
+ if (ipa_rmnet_res.ipa_napi_enable)
+ netif_napi_del(&(wwan_ptr->napi));
+ mutex_unlock(&ipa_to_apps_pipe_handle_guard);
+ unregister_netdev(ipa_netdevs[0]);
+ ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ if (ret < 0)
+ IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
+ IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
+ ret);
+ ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD);
+ if (ret < 0)
+ IPAWANERR(
+ "Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n",
+ IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+ ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+ if (ret < 0)
+ IPAWANERR("Error deleting resource %d, ret=%d\n",
+ IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+ cancel_work_sync(&ipa_tx_wakequeue_work);
+ cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
+ free_netdev(ipa_netdevs[0]);
+ ipa_netdevs[0] = NULL;
+ /* No need to remove wwan_ioctl during SSR */
+ if (!atomic_read(&is_ssr))
+ wan_ioctl_deinit();
+ ipa_del_dflt_wan_rt_tables();
+ ipa_del_a7_qmap_hdr();
+ ipa_del_mux_qmap_hdrs();
+ if (ipa_qmi_ctx && ipa_qmi_ctx->modem_cfg_emb_pipe_flt == false)
+ wwan_del_ul_flt_rule_to_ipa();
+ ipa_cleanup_deregister_intf();
+ atomic_set(&is_initialized, 0);
+ pr_info("rmnet_ipa completed deinitialization\n");
+ return 0;
+}
+
+/**
+* rmnet_ipa_ap_suspend() - suspend callback for runtime_pm
+* @dev: pointer to device
+*
+* This callback will be invoked by the runtime_pm framework when an AP suspend
+* operation is invoked, usually by pressing a suspend button.
+*
+* Returns -EAGAIN to runtime_pm framework in case there are pending packets
+* in the Tx queue. This will postpone the suspend operation until all the
+* pending packets will be transmitted.
+*
+* In case there are no packets to send, releases the WWAN0_PROD entity.
+* As an outcome, the number of IPA active clients should be decremented
+* until IPA clocks can be gated.
+*/
+static int rmnet_ipa_ap_suspend(struct device *dev)
+{
+ struct net_device *netdev = ipa_netdevs[0];
+ struct wwan_private *wwan_ptr = netdev_priv(netdev);
+
+ IPAWANDBG("Enter...\n");
+ /* Do not allow A7 to suspend in case there are oustanding packets */
+ if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) {
+ IPAWANDBG("Outstanding packets, postponing AP suspend.\n");
+ return -EAGAIN;
+ }
+
+ /* Make sure that there is no Tx operation ongoing */
+ netif_tx_lock_bh(netdev);
+ ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+ netif_tx_unlock_bh(netdev);
+ IPAWANDBG("Exit\n");
+
+ return 0;
+}
+
+/**
+* rmnet_ipa_ap_resume() - resume callback for runtime_pm
+* @dev: pointer to device
+*
+* This callback will be invoked by the runtime_pm framework when an AP resume
+* operation is invoked.
+*
+* Enables the network interface queue and returns success to the
+* runtime_pm framework.
+*/
+static int rmnet_ipa_ap_resume(struct device *dev)
+{
+ struct net_device *netdev = ipa_netdevs[0];
+
+ IPAWANDBG("Enter...\n");
+ netif_wake_queue(netdev);
+ IPAWANDBG("Exit\n");
+
+ return 0;
+}
+
+static void ipa_stop_polling_stats(void)
+{
+ cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
+ ipa_rmnet_ctx.polling_interval = 0;
+}
+
+static const struct of_device_id rmnet_ipa_dt_match[] = {
+ {.compatible = "qcom,rmnet-ipa"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, rmnet_ipa_dt_match);
+
+static const struct dev_pm_ops rmnet_ipa_pm_ops = {
+ .suspend_noirq = rmnet_ipa_ap_suspend,
+ .resume_noirq = rmnet_ipa_ap_resume,
+};
+
+static struct platform_driver rmnet_ipa_driver = {
+ .driver = {
+ .name = "rmnet_ipa",
+ .owner = THIS_MODULE,
+ .pm = &rmnet_ipa_pm_ops,
+ .of_match_table = rmnet_ipa_dt_match,
+ },
+ .probe = ipa_wwan_probe,
+ .remove = ipa_wwan_remove,
+};
+
+static int ssr_notifier_cb(struct notifier_block *this,
+ unsigned long code,
+ void *data)
+{
+ if (ipa_rmnet_ctx.ipa_rmnet_ssr) {
+ if (code == SUBSYS_BEFORE_SHUTDOWN) {
+ pr_info("IPA received MPSS BEFORE_SHUTDOWN\n");
+ atomic_set(&is_ssr, 1);
+ ipa_q6_pre_shutdown_cleanup();
+ if (ipa_netdevs[0])
+ netif_stop_queue(ipa_netdevs[0]);
+ ipa_qmi_stop_workqueues();
+ wan_ioctl_stop_qmi_messages();
+ ipa_stop_polling_stats();
+ if (atomic_read(&is_initialized))
+ platform_driver_unregister(&rmnet_ipa_driver);
+ pr_info("IPA BEFORE_SHUTDOWN handling is complete\n");
+ return NOTIFY_DONE;
+ }
+ if (code == SUBSYS_AFTER_SHUTDOWN) {
+ pr_info("IPA received MPSS AFTER_SHUTDOWN\n");
+ if (atomic_read(&is_ssr))
+ ipa_q6_post_shutdown_cleanup();
+ pr_info("IPA AFTER_SHUTDOWN handling is complete\n");
+ return NOTIFY_DONE;
+ }
+ if (code == SUBSYS_AFTER_POWERUP) {
+ pr_info("IPA received MPSS AFTER_POWERUP\n");
+ if (!atomic_read(&is_initialized)
+ && atomic_read(&is_ssr))
+ platform_driver_register(&rmnet_ipa_driver);
+ pr_info("IPA AFTER_POWERUP handling is complete\n");
+ return NOTIFY_DONE;
+ }
+ if (code == SUBSYS_BEFORE_POWERUP) {
+ pr_info("IPA received MPSS BEFORE_POWERUP\n");
+ if (atomic_read(&is_ssr))
+ /* clean up cached QMI msg/handlers */
+ ipa_qmi_service_exit();
+ ipa2_proxy_clk_vote();
+ pr_info("IPA BEFORE_POWERUP handling is complete\n");
+ return NOTIFY_DONE;
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+/**
+ * rmnet_ipa_free_msg() - Free the msg sent to user space via ipa2_send_msg
+ * @buff: pointer to buffer containing the message
+ * @len: message len
+ * @type: message type
+ *
+ * This function is invoked when ipa2_send_msg is complete (Provided as a
+ * free function pointer along with the message).
+ */
+static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type)
+{
+ if (!buff) {
+ IPAWANERR("Null buffer\n");
+ return;
+ }
+
+ if (type != IPA_TETHERING_STATS_UPDATE_STATS &&
+ type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) {
+ IPAWANERR("Wrong type given. buff %p type %d\n",
+ buff, type);
+ }
+ kfree(buff);
+}
+
+/**
+ * rmnet_ipa_get_stats_and_update(bool reset) - Gets pipe stats from Modem
+ *
+ * This function queries the IPA Modem driver for the pipe stats
+ * via QMI, and updates the user space IPA entity.
+ */
+static void rmnet_ipa_get_stats_and_update(bool reset)
+{
+ struct ipa_get_data_stats_req_msg_v01 req;
+ struct ipa_get_data_stats_resp_msg_v01 *resp;
+ struct ipa_msg_meta msg_meta;
+ int rc;
+
+ resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
+ GFP_KERNEL);
+ if (!resp) {
+ IPAWANERR("Can't allocate memory for stats message\n");
+ return;
+ }
+
+ memset(&req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
+ memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
+
+ req.ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
+ if (reset == true) {
+ req.reset_stats_valid = true;
+ req.reset_stats = true;
+ IPAWANERR("Get the latest pipe-stats and reset it\n");
+ }
+
+ rc = ipa_qmi_get_data_stats(&req, resp);
+
+ if (!rc) {
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS;
+ msg_meta.msg_len =
+ sizeof(struct ipa_get_data_stats_resp_msg_v01);
+ rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
+ if (rc) {
+ IPAWANERR("ipa2_send_msg failed: %d\n", rc);
+ kfree(resp);
+ return;
+ }
+ }
+}
+
+/**
+ * tethering_stats_poll_queue() - Stats polling function
+ * @work - Work entry
+ *
+ * This function is scheduled periodically (per the interval) in
+ * order to poll the IPA Modem driver for the pipe stats.
+ */
+static void tethering_stats_poll_queue(struct work_struct *work)
+{
+ rmnet_ipa_get_stats_and_update(false);
+
+ /* Schedule again only if there's an active polling interval */
+ if (ipa_rmnet_ctx.polling_interval != 0)
+ schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work,
+ msecs_to_jiffies(ipa_rmnet_ctx.polling_interval*1000));
+}
+
+/**
+ * rmnet_ipa_get_network_stats_and_update() - Get network stats from IPA Modem
+ *
+ * This function retrieves the data usage (used quota) from the IPA Modem driver
+ * via QMI, and updates IPA user space entity.
+ */
+static void rmnet_ipa_get_network_stats_and_update(void)
+{
+ struct ipa_get_apn_data_stats_req_msg_v01 req;
+ struct ipa_get_apn_data_stats_resp_msg_v01 *resp;
+ struct ipa_msg_meta msg_meta;
+ int rc;
+
+ resp = kzalloc(sizeof(struct ipa_get_apn_data_stats_resp_msg_v01),
+ GFP_KERNEL);
+ if (!resp) {
+ IPAWANERR("Can't allocate memory for network stats message\n");
+ return;
+ }
+
+ memset(&req, 0, sizeof(struct ipa_get_apn_data_stats_req_msg_v01));
+ memset(resp, 0, sizeof(struct ipa_get_apn_data_stats_resp_msg_v01));
+
+ req.mux_id_list_valid = true;
+ req.mux_id_list_len = 1;
+ req.mux_id_list[0] = ipa_rmnet_ctx.metered_mux_id;
+
+ rc = ipa_qmi_get_network_stats(&req, resp);
+
+ if (!rc) {
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS;
+ msg_meta.msg_len =
+ sizeof(struct ipa_get_apn_data_stats_resp_msg_v01);
+ rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
+ if (rc) {
+ IPAWANERR("ipa2_send_msg failed: %d\n", rc);
+ kfree(resp);
+ return;
+ }
+ }
+}
+
+/**
+ * rmnet_ipa_poll_tethering_stats() - Tethering stats polling IOCTL handler
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_POLL_TETHERING_STATS.
+ * In case polling interval received is 0, polling will stop
+ * (If there's a polling in progress, it will allow it to finish), and then will
+ * fetch network stats, and update the IPA user space.
+ *
+ * Return codes:
+ * 0: Success
+ */
+int rmnet_ipa_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data)
+{
+ ipa_rmnet_ctx.polling_interval = data->polling_interval_secs;
+
+ cancel_delayed_work_sync(&ipa_tether_stats_poll_wakequeue_work);
+
+ if (ipa_rmnet_ctx.polling_interval == 0) {
+ ipa_qmi_stop_data_qouta();
+ rmnet_ipa_get_network_stats_and_update();
+ rmnet_ipa_get_stats_and_update(true);
+ return 0;
+ }
+
+ schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work, 0);
+ return 0;
+}
+
+/**
+ * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SET_DATA_QUOTA.
+ * It translates the given interface name to the Modem MUX ID and
+ * sends the request of the quota to the IPA Modem driver via QMI.
+ *
+ * Return codes:
+ * 0: Success
+ * -EFAULT: Invalid interface name provided
+ * other: See ipa_qmi_set_data_quota
+ */
+int rmnet_ipa_set_data_quota(struct wan_ioctl_set_data_quota *data)
+{
+ u32 mux_id;
+ int index;
+ struct ipa_set_data_usage_quota_req_msg_v01 req;
+
+ index = find_vchannel_name_index(data->interface_name);
+ IPAWANERR("iface name %s, quota %lu\n",
+ data->interface_name,
+ (unsigned long int) data->quota_mbytes);
+
+ if (index == MAX_NUM_OF_MUX_CHANNEL) {
+ IPAWANERR("%s is an invalid iface name\n",
+ data->interface_name);
+ return -EFAULT;
+ }
+
+ mux_id = mux_channel[index].mux_id;
+
+ ipa_rmnet_ctx.metered_mux_id = mux_id;
+
+ memset(&req, 0, sizeof(struct ipa_set_data_usage_quota_req_msg_v01));
+ req.apn_quota_list_valid = true;
+ req.apn_quota_list_len = 1;
+ req.apn_quota_list[0].mux_id = mux_id;
+ req.apn_quota_list[0].num_Mbytes = data->quota_mbytes;
+
+ return ipa_qmi_set_data_quota(&req);
+}
+
+ /* rmnet_ipa_set_tether_client_pipe() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SET_DATA_QUOTA.
+ * It translates the given interface name to the Modem MUX ID and
+ * sends the request of the quota to the IPA Modem driver via QMI.
+ *
+ * Return codes:
+ * 0: Success
+ * -EFAULT: Invalid interface name provided
+ * other: See ipa_qmi_set_data_quota
+ */
+int rmnet_ipa_set_tether_client_pipe(
+ struct wan_ioctl_set_tether_client_pipe *data)
+{
+ int number, i;
+
+ IPAWANDBG("client %d, UL %d, DL %d, reset %d\n",
+ data->ipa_client,
+ data->ul_src_pipe_len,
+ data->dl_dst_pipe_len,
+ data->reset_client);
+ number = data->ul_src_pipe_len;
+ for (i = 0; i < number; i++) {
+ IPAWANDBG("UL index-%d pipe %d\n", i,
+ data->ul_src_pipe_list[i]);
+ if (data->reset_client)
+ ipa_set_client(data->ul_src_pipe_list[i],
+ 0, false);
+ else
+ ipa_set_client(data->ul_src_pipe_list[i],
+ data->ipa_client, true);
+ }
+ number = data->dl_dst_pipe_len;
+ for (i = 0; i < number; i++) {
+ IPAWANDBG("DL index-%d pipe %d\n", i,
+ data->dl_dst_pipe_list[i]);
+ if (data->reset_client)
+ ipa_set_client(data->dl_dst_pipe_list[i],
+ 0, false);
+ else
+ ipa_set_client(data->dl_dst_pipe_list[i],
+ data->ipa_client, false);
+ }
+ return 0;
+}
+
+int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
+ bool reset)
+{
+ struct ipa_get_data_stats_req_msg_v01 *req;
+ struct ipa_get_data_stats_resp_msg_v01 *resp;
+ int pipe_len, rc;
+
+ req = kzalloc(sizeof(struct ipa_get_data_stats_req_msg_v01),
+ GFP_KERNEL);
+ if (!req) {
+ IPAWANERR("failed to allocate memory for stats message\n");
+ return -ENOMEM;
+ }
+ resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
+ GFP_KERNEL);
+ if (!resp) {
+ IPAWANERR("failed to allocate memory for stats message\n");
+ kfree(req);
+ return -ENOMEM;
+ }
+ memset(req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
+ memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
+
+ req->ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
+ if (reset) {
+ req->reset_stats_valid = true;
+ req->reset_stats = true;
+ IPAWANERR("reset the pipe stats\n");
+ } else {
+ /* print tethered-client enum */
+ IPAWANDBG("Tethered-client enum(%d)\n", data->ipa_client);
+ }
+
+ rc = ipa_qmi_get_data_stats(req, resp);
+ if (rc) {
+ IPAWANERR("can't get ipa_qmi_get_data_stats\n");
+ kfree(req);
+ kfree(resp);
+ return rc;
+ } else if (reset) {
+ kfree(req);
+ kfree(resp);
+ return 0;
+ }
+
+ if (resp->dl_dst_pipe_stats_list_valid) {
+ for (pipe_len = 0; pipe_len < resp->dl_dst_pipe_stats_list_len;
+ pipe_len++) {
+ IPAWANDBG("Check entry(%d) dl_dst_pipe(%d)\n",
+ pipe_len, resp->dl_dst_pipe_stats_list
+ [pipe_len].pipe_index);
+ IPAWANDBG("dl_p_v4(%lu)v6(%lu) dl_b_v4(%lu)v6(%lu)\n",
+ (unsigned long int) resp->
+ dl_dst_pipe_stats_list[pipe_len].
+ num_ipv4_packets,
+ (unsigned long int) resp->
+ dl_dst_pipe_stats_list[pipe_len].
+ num_ipv6_packets,
+ (unsigned long int) resp->
+ dl_dst_pipe_stats_list[pipe_len].
+ num_ipv4_bytes,
+ (unsigned long int) resp->
+ dl_dst_pipe_stats_list[pipe_len].
+ num_ipv6_bytes);
+ if (ipa_get_client_uplink(resp->
+ dl_dst_pipe_stats_list[pipe_len].
+ pipe_index) == false) {
+ if (data->ipa_client == ipa_get_client(resp->
+ dl_dst_pipe_stats_list[pipe_len].
+ pipe_index)) {
+ /* update the DL stats */
+ data->ipv4_rx_packets += resp->
+ dl_dst_pipe_stats_list[pipe_len].
+ num_ipv4_packets;
+ data->ipv6_rx_packets += resp->
+ dl_dst_pipe_stats_list[pipe_len].
+ num_ipv6_packets;
+ data->ipv4_rx_bytes += resp->
+ dl_dst_pipe_stats_list[pipe_len].
+ num_ipv4_bytes;
+ data->ipv6_rx_bytes += resp->
+ dl_dst_pipe_stats_list[pipe_len].
+ num_ipv6_bytes;
+ }
+ }
+ }
+ }
+ IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
+ (unsigned long int) data->ipv4_rx_packets,
+ (unsigned long int) data->ipv6_rx_packets,
+ (unsigned long int) data->ipv4_rx_bytes,
+ (unsigned long int) data->ipv6_rx_bytes);
+
+ if (resp->ul_src_pipe_stats_list_valid) {
+ for (pipe_len = 0; pipe_len < resp->ul_src_pipe_stats_list_len;
+ pipe_len++) {
+ IPAWANDBG("Check entry(%d) ul_dst_pipe(%d)\n",
+ pipe_len,
+ resp->ul_src_pipe_stats_list[pipe_len].
+ pipe_index);
+ IPAWANDBG("ul_p_v4(%lu)v6(%lu)ul_b_v4(%lu)v6(%lu)\n",
+ (unsigned long int) resp->
+ ul_src_pipe_stats_list[pipe_len].
+ num_ipv4_packets,
+ (unsigned long int) resp->
+ ul_src_pipe_stats_list[pipe_len].
+ num_ipv6_packets,
+ (unsigned long int) resp->
+ ul_src_pipe_stats_list[pipe_len].
+ num_ipv4_bytes,
+ (unsigned long int) resp->
+ ul_src_pipe_stats_list[pipe_len].
+ num_ipv6_bytes);
+ if (ipa_get_client_uplink(resp->
+ ul_src_pipe_stats_list[pipe_len].
+ pipe_index) == true) {
+ if (data->ipa_client == ipa_get_client(resp->
+ ul_src_pipe_stats_list[pipe_len].
+ pipe_index)) {
+ /* update the DL stats */
+ data->ipv4_tx_packets += resp->
+ ul_src_pipe_stats_list[pipe_len].
+ num_ipv4_packets;
+ data->ipv6_tx_packets += resp->
+ ul_src_pipe_stats_list[pipe_len].
+ num_ipv6_packets;
+ data->ipv4_tx_bytes += resp->
+ ul_src_pipe_stats_list[pipe_len].
+ num_ipv4_bytes;
+ data->ipv6_tx_bytes += resp->
+ ul_src_pipe_stats_list[pipe_len].
+ num_ipv6_bytes;
+ }
+ }
+ }
+ }
+ IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
+ (unsigned long int) data->ipv4_tx_packets,
+ (unsigned long int) data->ipv6_tx_packets,
+ (unsigned long int) data->ipv4_tx_bytes,
+ (unsigned long int) data->ipv6_tx_bytes);
+ kfree(req);
+ kfree(resp);
+ return 0;
+}
+
+/**
+ * ipa_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota
+ * @mux_id - The MUX ID on which the quota has been reached
+ *
+ * This function broadcasts a Netlink event using the kobject of the
+ * rmnet_ipa interface in order to alert the user space that the quota
+ * on the specific interface which matches the mux_id has been reached.
+ *
+ */
+void ipa_broadcast_quota_reach_ind(u32 mux_id)
+{
+ char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE];
+ char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
+ char iface_name_m[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
+ char *envp[IPA_UEVENT_NUM_EVNP] = {
+ alert_msg, iface_name_l, iface_name_m, NULL };
+ int res;
+ int index;
+
+ index = find_mux_channel_index(mux_id);
+
+ if (index == MAX_NUM_OF_MUX_CHANNEL) {
+ IPAWANERR("%u is an mux ID\n", mux_id);
+ return;
+ }
+
+ res = snprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE,
+ "ALERT_NAME=%s", "quotaReachedAlert");
+ if (res >= IPA_QUOTA_REACH_ALERT_MAX_SIZE) {
+ IPAWANERR("message too long (%d)", res);
+ return;
+ }
+ /* posting msg for L-release for CNE */
+ res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
+ "UPSTREAM=%s", mux_channel[index].vchannel_name);
+ if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
+ IPAWANERR("message too long (%d)", res);
+ return;
+ }
+ /* posting msg for M-release for CNE */
+ res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
+ "INTERFACE=%s", mux_channel[index].vchannel_name);
+ if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
+ IPAWANERR("message too long (%d)", res);
+ return;
+ }
+
+ IPAWANERR("putting nlmsg: <%s> <%s> <%s>\n",
+ alert_msg, iface_name_l, iface_name_m);
+ kobject_uevent_env(&(ipa_netdevs[0]->dev.kobj), KOBJ_CHANGE, envp);
+}
+
+/**
+ * ipa_q6_handshake_complete() - Perform operations once Q6 is up
+ * @ssr_bootup - Indicates whether this is a cold boot-up or post-SSR.
+ *
+ * This function is invoked once the handshake between the IPA AP driver
+ * and IPA Q6 driver is complete. At this point, it is possible to perform
+ * operations which can't be performed until IPA Q6 driver is up.
+ *
+ */
+void ipa_q6_handshake_complete(bool ssr_bootup)
+{
+ /* It is required to recover the network stats after SSR recovery */
+ if (ssr_bootup) {
+ /*
+ * In case the uC is required to be loaded by the Modem,
+ * the proxy vote will be removed only when uC loading is
+ * complete and indication is received by the AP. After SSR,
+ * uC is already loaded. Therefore, proxy vote can be removed
+ * once Modem init is complete.
+ */
+ ipa2_proxy_clk_unvote();
+
+ /*
+ * It is required to recover the network stats after
+ * SSR recovery
+ */
+ rmnet_ipa_get_network_stats_and_update();
+
+ /* Enable holb monitoring on Q6 pipes. */
+ ipa_q6_monitor_holb_mitigation(true);
+ }
+}
+
+static int __init ipa_wwan_init(void)
+{
+ atomic_set(&is_initialized, 0);
+ atomic_set(&is_ssr, 0);
+
+ mutex_init(&ipa_to_apps_pipe_handle_guard);
+ ipa_to_apps_hdl = -1;
+
+ ipa_qmi_init();
+
+ /* Register for Modem SSR */
+ subsys_notify_handle = subsys_notif_register_notifier(SUBSYS_MODEM,
+ &ssr_notifier);
+ if (!IS_ERR(subsys_notify_handle))
+ return platform_driver_register(&rmnet_ipa_driver);
+ else
+ return (int)PTR_ERR(subsys_notify_handle);
+}
+
+static void __exit ipa_wwan_cleanup(void)
+{
+ int ret;
+
+ ipa_qmi_cleanup();
+ mutex_destroy(&ipa_to_apps_pipe_handle_guard);
+ ret = subsys_notif_unregister_notifier(subsys_notify_handle,
+ &ssr_notifier);
+ if (ret)
+ IPAWANERR(
+ "Error subsys_notif_unregister_notifier system %s, ret=%d\n",
+ SUBSYS_MODEM, ret);
+ platform_driver_unregister(&rmnet_ipa_driver);
+}
+
+static void ipa_wwan_msg_free_cb(void *buff, u32 len, u32 type)
+{
+ if (!buff)
+ IPAWANERR("Null buffer.\n");
+ kfree(buff);
+}
+
+static void ipa_rmnet_rx_cb(void *priv)
+{
+ struct net_device *dev = priv;
+ struct wwan_private *wwan_ptr;
+
+ IPAWANDBG("\n");
+
+ if (dev != ipa_netdevs[0]) {
+ IPAWANERR("Not matching with netdev\n");
+ return;
+ }
+
+ wwan_ptr = netdev_priv(dev);
+ napi_schedule(&(wwan_ptr->napi));
+}
+
+static int ipa_rmnet_poll(struct napi_struct *napi, int budget)
+{
+ int rcvd_pkts = 0;
+
+ rcvd_pkts = ipa_rx_poll(ipa_to_apps_hdl, NAPI_WEIGHT);
+ IPAWANDBG("rcvd packets: %d\n", rcvd_pkts);
+ return rcvd_pkts;
+}
+
+late_initcall(ipa_wwan_init);
+module_exit(ipa_wwan_cleanup);
+MODULE_DESCRIPTION("WWAN Network Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c
new file mode 100644
index 0000000..811dba4
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c
@@ -0,0 +1,391 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/rmnet_ipa_fd_ioctl.h>
+#include "ipa_qmi_service.h"
+
+#define DRIVER_NAME "wwan_ioctl"
+
+#ifdef CONFIG_COMPAT
+#define WAN_IOC_ADD_FLT_RULE32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_ADD_FLT_RULE, \
+ compat_uptr_t)
+#define WAN_IOC_ADD_FLT_RULE_INDEX32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_ADD_FLT_INDEX, \
+ compat_uptr_t)
+#define WAN_IOC_POLL_TETHERING_STATS32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_POLL_TETHERING_STATS, \
+ compat_uptr_t)
+#define WAN_IOC_SET_DATA_QUOTA32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_SET_DATA_QUOTA, \
+ compat_uptr_t)
+#define WAN_IOC_SET_TETHER_CLIENT_PIPE32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_SET_TETHER_CLIENT_PIPE, \
+ compat_uptr_t)
+#define WAN_IOC_QUERY_TETHER_STATS32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_QUERY_TETHER_STATS, \
+ compat_uptr_t)
+#define WAN_IOC_RESET_TETHER_STATS32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_RESET_TETHER_STATS, \
+ compat_uptr_t)
+#define WAN_IOC_QUERY_DL_FILTER_STATS32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_QUERY_DL_FILTER_STATS, \
+ compat_uptr_t)
+#endif
+
+static unsigned int dev_num = 1;
+static struct cdev wan_ioctl_cdev;
+static unsigned int process_ioctl = 1;
+static struct class *class;
+static dev_t device;
+
+static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int retval = 0;
+ u32 pyld_sz;
+ u8 *param = NULL;
+
+ IPAWANDBG("device %s got ioctl events :>>>\n",
+ DRIVER_NAME);
+
+ if (!process_ioctl) {
+ IPAWANDBG("modem is in SSR, ignoring ioctl\n");
+ return -EAGAIN;
+ }
+
+ switch (cmd) {
+ case WAN_IOC_ADD_FLT_RULE:
+ IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE :>>>\n",
+ DRIVER_NAME);
+ pyld_sz = sizeof(struct ipa_install_fltr_rule_req_msg_v01);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (qmi_filter_request_send(
+ (struct ipa_install_fltr_rule_req_msg_v01 *)param)) {
+ IPAWANDBG("IPACM->Q6 add filter rule failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case WAN_IOC_ADD_FLT_RULE_INDEX:
+ IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE_INDEX :>>>\n",
+ DRIVER_NAME);
+ pyld_sz = sizeof(struct ipa_fltr_installed_notif_req_msg_v01);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (qmi_filter_notify_send(
+ (struct ipa_fltr_installed_notif_req_msg_v01 *)param)) {
+ IPAWANDBG("IPACM->Q6 rule index fail\n");
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case WAN_IOC_VOTE_FOR_BW_MBPS:
+ IPAWANDBG("device %s got WAN_IOC_VOTE_FOR_BW_MBPS :>>>\n",
+ DRIVER_NAME);
+ pyld_sz = sizeof(uint32_t);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (vote_for_bus_bw((uint32_t *)param)) {
+ IPAWANERR("Failed to vote for bus BW\n");
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case WAN_IOC_POLL_TETHERING_STATS:
+ IPAWANDBG("device %s got WAN_IOCTL_POLL_TETHERING_STATS :>>>\n",
+ DRIVER_NAME);
+ pyld_sz = sizeof(struct wan_ioctl_poll_tethering_stats);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (rmnet_ipa_poll_tethering_stats(
+ (struct wan_ioctl_poll_tethering_stats *)param)) {
+ IPAWANERR("WAN_IOCTL_POLL_TETHERING_STATS failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case WAN_IOC_SET_DATA_QUOTA:
+ IPAWANDBG("device %s got WAN_IOCTL_SET_DATA_QUOTA :>>>\n",
+ DRIVER_NAME);
+ pyld_sz = sizeof(struct wan_ioctl_set_data_quota);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (rmnet_ipa_set_data_quota(
+ (struct wan_ioctl_set_data_quota *)param)) {
+ IPAWANERR("WAN_IOC_SET_DATA_QUOTA failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case WAN_IOC_SET_TETHER_CLIENT_PIPE:
+ IPAWANDBG("device %s got WAN_IOC_SET_TETHER_CLIENT_PIPE :>>>\n",
+ DRIVER_NAME);
+ pyld_sz = sizeof(struct wan_ioctl_set_tether_client_pipe);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (rmnet_ipa_set_tether_client_pipe(
+ (struct wan_ioctl_set_tether_client_pipe *)param)) {
+ IPAWANERR("WAN_IOC_SET_TETHER_CLIENT_PIPE failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case WAN_IOC_QUERY_TETHER_STATS:
+ IPAWANDBG("device %s got WAN_IOC_QUERY_TETHER_STATS :>>>\n",
+ DRIVER_NAME);
+ pyld_sz = sizeof(struct wan_ioctl_query_tether_stats);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (rmnet_ipa_query_tethering_stats(
+ (struct wan_ioctl_query_tether_stats *)param, false)) {
+ IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n");
+ retval = -EFAULT;
+ break;
+ }
+
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case WAN_IOC_RESET_TETHER_STATS:
+ IPAWANDBG("device %s got WAN_IOC_RESET_TETHER_STATS :>>>\n",
+ DRIVER_NAME);
+ pyld_sz = sizeof(struct wan_ioctl_reset_tether_stats);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (rmnet_ipa_query_tethering_stats(NULL, true)) {
+ IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ default:
+ retval = -ENOTTY;
+ }
+ kfree(param);
+ return retval;
+}
+
+#ifdef CONFIG_COMPAT
+long compat_wan_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case WAN_IOC_ADD_FLT_RULE32:
+ cmd = WAN_IOC_ADD_FLT_RULE;
+ break;
+ case WAN_IOC_ADD_FLT_RULE_INDEX32:
+ cmd = WAN_IOC_ADD_FLT_RULE_INDEX;
+ break;
+ case WAN_IOC_POLL_TETHERING_STATS32:
+ cmd = WAN_IOC_POLL_TETHERING_STATS;
+ break;
+ case WAN_IOC_SET_DATA_QUOTA32:
+ cmd = WAN_IOC_SET_DATA_QUOTA;
+ break;
+ case WAN_IOC_SET_TETHER_CLIENT_PIPE32:
+ cmd = WAN_IOC_SET_TETHER_CLIENT_PIPE;
+ break;
+ case WAN_IOC_QUERY_TETHER_STATS32:
+ cmd = WAN_IOC_QUERY_TETHER_STATS;
+ break;
+ case WAN_IOC_RESET_TETHER_STATS32:
+ cmd = WAN_IOC_RESET_TETHER_STATS;
+ break;
+ case WAN_IOC_QUERY_DL_FILTER_STATS32:
+ cmd = WAN_IOC_QUERY_DL_FILTER_STATS;
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return wan_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
+static int wan_ioctl_open(struct inode *inode, struct file *filp)
+{
+ IPAWANDBG("\n IPA A7 wan_ioctl open OK :>>>> ");
+ return 0;
+}
+
+const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = wan_ioctl_open,
+ .read = NULL,
+ .unlocked_ioctl = wan_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_wan_ioctl,
+#endif
+};
+
+int wan_ioctl_init(void)
+{
+ unsigned int wan_ioctl_major = 0;
+ int ret;
+ struct device *dev;
+
+ device = MKDEV(wan_ioctl_major, 0);
+
+ ret = alloc_chrdev_region(&device, 0, dev_num, DRIVER_NAME);
+ if (ret) {
+ IPAWANERR(":device_alloc err.\n");
+ goto dev_alloc_err;
+ }
+ wan_ioctl_major = MAJOR(device);
+
+ class = class_create(THIS_MODULE, DRIVER_NAME);
+ if (IS_ERR(class)) {
+ IPAWANERR(":class_create err.\n");
+ goto class_err;
+ }
+
+ dev = device_create(class, NULL, device,
+ NULL, DRIVER_NAME);
+ if (IS_ERR(dev)) {
+ IPAWANERR(":device_create err.\n");
+ goto device_err;
+ }
+
+ cdev_init(&wan_ioctl_cdev, &fops);
+ ret = cdev_add(&wan_ioctl_cdev, device, dev_num);
+ if (ret) {
+ IPAWANERR(":cdev_add err.\n");
+ goto cdev_add_err;
+ }
+
+ process_ioctl = 1;
+
+ IPAWANDBG("IPA %s major(%d) initial ok :>>>>\n",
+ DRIVER_NAME, wan_ioctl_major);
+ return 0;
+
+cdev_add_err:
+ device_destroy(class, device);
+device_err:
+ class_destroy(class);
+class_err:
+ unregister_chrdev_region(device, dev_num);
+dev_alloc_err:
+ return -ENODEV;
+}
+
+void wan_ioctl_stop_qmi_messages(void)
+{
+ process_ioctl = 0;
+}
+
+void wan_ioctl_enable_qmi_messages(void)
+{
+ process_ioctl = 1;
+}
+
+void wan_ioctl_deinit(void)
+{
+ cdev_del(&wan_ioctl_cdev);
+ device_destroy(class, device);
+ class_destroy(class);
+ unregister_chrdev_region(device, dev_num);
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/teth_bridge.c b/drivers/platform/msm/ipa/ipa_v2/teth_bridge.c
new file mode 100644
index 0000000..110ee03
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/teth_bridge.c
@@ -0,0 +1,240 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msm_ipa.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/ipa.h>
+#include <linux/netdevice.h>
+#include "ipa_i.h"
+
+#define TETH_BRIDGE_DRV_NAME "ipa_tethering_bridge"
+
+#define TETH_DBG(fmt, args...) \
+ pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args)
+#define TETH_DBG_FUNC_ENTRY() \
+ pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d ENTRY\n", __func__, __LINE__)
+#define TETH_DBG_FUNC_EXIT() \
+ pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d EXIT\n", __func__, __LINE__)
+#define TETH_ERR(fmt, args...) \
+ pr_err(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+
+/**
+ * struct teth_bridge_ctx - Tethering bridge driver context information
+ * @class: kernel class pointer
+ * @dev_num: kernel device number
+ * @dev: kernel device struct pointer
+ * @cdev: kernel character device struct
+ */
+struct teth_bridge_ctx {
+ struct class *class;
+ dev_t dev_num;
+ struct device *dev;
+ struct cdev cdev;
+};
+static struct teth_bridge_ctx *teth_ctx;
+
+/**
+* teth_bridge_ipa_cb() - Callback to handle IPA data path events
+* @priv - private data
+* @evt - event type
+* @data - event specific data (usually skb)
+*
+* This callback is called by IPA driver for exception packets from USB.
+* All exception packets are handled by Q6 and should not reach this function.
+* Packets will arrive to AP exception pipe only in case where packets are
+* sent from USB before Q6 has setup the call.
+*/
+static void teth_bridge_ipa_cb(void *priv, enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ struct sk_buff *skb = (struct sk_buff *)data;
+
+ TETH_DBG_FUNC_ENTRY();
+ if (evt != IPA_RECEIVE) {
+ TETH_ERR("unexpected event %d\n", evt);
+ WARN_ON(1);
+ return;
+ }
+
+ TETH_ERR("Unexpected exception packet from USB, dropping packet\n");
+ dev_kfree_skb_any(skb);
+ TETH_DBG_FUNC_EXIT();
+}
+
+/**
+* ipa2_teth_bridge_init() - Initialize the Tethering bridge driver
+* @params - in/out params for USB initialization API (please look at struct
+* definition for more info)
+*
+* USB driver gets a pointer to a callback function (usb_notify_cb) and an
+* associated data. USB driver installs this callback function in the call to
+* ipa_connect().
+*
+* Builds IPA resource manager dependency graph.
+*
+* Return codes: 0: success,
+* -EINVAL - Bad parameter
+* Other negative value - Failure
+*/
+int ipa2_teth_bridge_init(struct teth_bridge_init_params *params)
+{
+ int res = 0;
+
+ TETH_DBG_FUNC_ENTRY();
+
+ if (!params) {
+ TETH_ERR("Bad parameter\n");
+ TETH_DBG_FUNC_EXIT();
+ return -EINVAL;
+ }
+
+ params->usb_notify_cb = teth_bridge_ipa_cb;
+ params->private_data = NULL;
+ params->skip_ep_cfg = true;
+
+ /* Build dependency graph */
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_USB_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ if (res < 0 && res != -EINPROGRESS) {
+ TETH_ERR("ipa_rm_add_dependency() failed.\n");
+ goto bail;
+ }
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_USB_CONS);
+ if (res < 0 && res != -EINPROGRESS) {
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ TETH_ERR("ipa_rm_add_dependency() failed.\n");
+ goto bail;
+ }
+
+ res = 0;
+ goto bail;
+
+bail:
+ TETH_DBG_FUNC_EXIT();
+ return res;
+}
+
+/**
+* ipa2_teth_bridge_disconnect() - Disconnect tethering bridge module
+*/
+int ipa2_teth_bridge_disconnect(enum ipa_client_type client)
+{
+ TETH_DBG_FUNC_ENTRY();
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_USB_CONS);
+ TETH_DBG_FUNC_EXIT();
+
+ return 0;
+}
+
+/**
+* ipa2_teth_bridge_connect() - Connect bridge for a tethered Rmnet / MBIM call
+* @connect_params: Connection info
+*
+* Return codes: 0: success
+* -EINVAL: invalid parameters
+* -EPERM: Operation not permitted as the bridge is already
+* connected
+*/
+int ipa2_teth_bridge_connect(struct teth_bridge_connect_params *connect_params)
+{
+ return 0;
+}
+
+static long teth_bridge_ioctl(struct file *filp,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ IPAERR("No ioctls are supported !\n");
+ return -ENOIOCTLCMD;
+}
+
+static const struct file_operations teth_bridge_drv_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = teth_bridge_ioctl,
+};
+
+/**
+* teth_bridge_driver_init() - Initialize tethering bridge driver
+*
+*/
+int teth_bridge_driver_init(void)
+{
+ int res;
+
+ TETH_DBG("Tethering bridge driver init\n");
+ teth_ctx = kzalloc(sizeof(*teth_ctx), GFP_KERNEL);
+ if (!teth_ctx) {
+ TETH_ERR("kzalloc err.\n");
+ return -ENOMEM;
+ }
+
+ teth_ctx->class = class_create(THIS_MODULE, TETH_BRIDGE_DRV_NAME);
+
+ res = alloc_chrdev_region(&teth_ctx->dev_num, 0, 1,
+ TETH_BRIDGE_DRV_NAME);
+ if (res) {
+ TETH_ERR("alloc_chrdev_region err.\n");
+ res = -ENODEV;
+ goto fail_alloc_chrdev_region;
+ }
+
+ teth_ctx->dev = device_create(teth_ctx->class, NULL, teth_ctx->dev_num,
+ teth_ctx, TETH_BRIDGE_DRV_NAME);
+ if (IS_ERR(teth_ctx->dev)) {
+ TETH_ERR(":device_create err.\n");
+ res = -ENODEV;
+ goto fail_device_create;
+ }
+
+ cdev_init(&teth_ctx->cdev, &teth_bridge_drv_fops);
+ teth_ctx->cdev.owner = THIS_MODULE;
+ teth_ctx->cdev.ops = &teth_bridge_drv_fops;
+
+ res = cdev_add(&teth_ctx->cdev, teth_ctx->dev_num, 1);
+ if (res) {
+ TETH_ERR(":cdev_add err=%d\n", -res);
+ res = -ENODEV;
+ goto fail_cdev_add;
+ }
+ TETH_DBG("Tethering bridge driver init OK\n");
+
+ return 0;
+fail_cdev_add:
+ device_destroy(teth_ctx->class, teth_ctx->dev_num);
+fail_device_create:
+ unregister_chrdev_region(teth_ctx->dev_num, 1);
+fail_alloc_chrdev_region:
+ kfree(teth_ctx);
+ teth_ctx = NULL;
+
+ return res;
+}
+EXPORT_SYMBOL(teth_bridge_driver_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Tethering bridge driver");
diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile
new file mode 100644
index 0000000..a4faaea
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_IPA3) += ipahal/
+
+obj-$(CONFIG_IPA3) += ipat.o
+ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
+ ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
+ ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o
+
+obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
new file mode 100644
index 0000000..a2e1366
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -0,0 +1,5412 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/compat.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/fs.h>
+#include <linux/genalloc.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/rbtree.h>
+#include <linux/of_gpio.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/msm_gsi.h>
+#include <linux/qcom_iommu.h>
+#include <linux/time.h>
+#include <linux/hashtable.h>
+#include <linux/hash.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/smem.h>
+#define IPA_SUBSYSTEM_NAME "ipa_fws"
+#include "ipa_i.h"
+#include "../ipa_rm_i.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+
+#define CREATE_TRACE_POINTS
+#include "ipa_trace.h"
+
+#define IPA_GPIO_IN_QUERY_CLK_IDX 0
+#define IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX 0
+#define IPA_GPIO_OUT_CLK_VOTE_IDX 1
+
+#define IPA_SUMMING_THRESHOLD (0x10)
+#define IPA_PIPE_MEM_START_OFST (0x0)
+#define IPA_PIPE_MEM_SIZE (0x0)
+#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
+ x == IPA_MODE_MOBILE_AP_WAN || \
+ x == IPA_MODE_MOBILE_AP_WLAN)
+#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
+#define IPA_A5_MUX_HEADER_LENGTH (8)
+
+#define IPA_AGGR_MAX_STR_LENGTH (10)
+
+#define CLEANUP_TAG_PROCESS_TIMEOUT 150
+
+#define IPA_AGGR_STR_IN_BYTES(str) \
+ (strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
+
+#define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100
+
+#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
+
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
+
+#define IPA_SMEM_SIZE (8 * 1024)
+
+/* round addresses for closes page per SMMU requirements */
+#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \
+ do { \
+ (iova_p) = rounddown((iova), PAGE_SIZE); \
+ (pa_p) = rounddown((pa), PAGE_SIZE); \
+ (size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \
+ } while (0)
+
+
+/* The relative location in /lib/firmware where the FWs will reside */
+#define IPA_FWS_PATH "ipa/ipa_fws.elf"
+
+#ifdef CONFIG_COMPAT
+#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_HDR, \
+ compat_uptr_t)
+#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_HDR, \
+ compat_uptr_t)
+#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_RT_RULE, \
+ compat_uptr_t)
+#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_RT_RULE, \
+ compat_uptr_t)
+#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_FLT_RULE, \
+ compat_uptr_t)
+#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_FLT_RULE, \
+ compat_uptr_t)
+#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_RT_TBL, \
+ compat_uptr_t)
+#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_COPY_HDR, \
+ compat_uptr_t)
+#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF, \
+ compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF_TX_PROPS, \
+ compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF_RX_PROPS, \
+ compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
+ compat_uptr_t)
+#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_HDR, \
+ compat_uptr_t)
+#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ALLOC_NAT_MEM, \
+ compat_uptr_t)
+#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_V4_INIT_NAT, \
+ compat_uptr_t)
+#define IPA_IOC_NAT_DMA32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_NAT_DMA, \
+ compat_uptr_t)
+#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_V4_DEL_NAT, \
+ compat_uptr_t)
+#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_NAT_OFFSET, \
+ compat_uptr_t)
+#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_PULL_MSG, \
+ compat_uptr_t)
+#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_RM_ADD_DEPENDENCY, \
+ compat_uptr_t)
+#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_RM_DEL_DEPENDENCY, \
+ compat_uptr_t)
+#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GENERATE_FLT_EQ, \
+ compat_uptr_t)
+#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_RT_TBL_INDEX, \
+ compat_uptr_t)
+#define IPA_IOC_WRITE_QMAPID32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_WRITE_QMAPID, \
+ compat_uptr_t)
+#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_MDFY_FLT_RULE, \
+ compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
+ compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
+ compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
+ compat_uptr_t)
+#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_HDR_PROC_CTX, \
+ compat_uptr_t)
+#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_HDR_PROC_CTX, \
+ compat_uptr_t)
+#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_MDFY_RT_RULE, \
+ compat_uptr_t)
+
+/**
+ * struct ipa3_ioc_nat_alloc_mem32 - nat table memory allocation
+ * properties
+ * @dev_name: input parameter, the name of table
+ * @size: input parameter, size of table in bytes
+ * @offset: output parameter, offset into page in case of system memory
+ */
+struct ipa3_ioc_nat_alloc_mem32 {
+ char dev_name[IPA_RESOURCE_NAME_MAX];
+ compat_size_t size;
+ compat_off_t offset;
+};
+#endif
+
+static void ipa3_start_tag_process(struct work_struct *work);
+static DECLARE_WORK(ipa3_tag_work, ipa3_start_tag_process);
+
+static void ipa3_sps_release_resource(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_sps_release_resource_work,
+ ipa3_sps_release_resource);
+static void ipa_gsi_notify_cb(struct gsi_per_notify *notify);
+
+static void ipa_gsi_request_resource(struct work_struct *work);
+static DECLARE_WORK(ipa_gsi_request_resource_work,
+ ipa_gsi_request_resource);
+
+static void ipa_gsi_release_resource(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa_gsi_release_resource_work,
+ ipa_gsi_release_resource);
+
+static struct ipa3_plat_drv_res ipa3_res = {0, };
+struct msm_bus_scale_pdata *ipa3_bus_scale_table;
+
+static struct clk *ipa3_clk;
+
+struct ipa3_context *ipa3_ctx;
+static struct device *master_dev;
+struct platform_device *ipa3_pdev;
+static struct {
+ bool present;
+ bool arm_smmu;
+ bool disable_htw;
+ bool fast_map;
+ bool s1_bypass;
+ bool use_64_bit_dma_mask;
+ u32 ipa_base;
+ u32 ipa_size;
+} smmu_info;
+
+static char *active_clients_table_buf;
+
+int ipa3_active_clients_log_print_buffer(char *buf, int size)
+{
+ int i;
+ int nbytes;
+ int cnt = 0;
+ int start_idx;
+ int end_idx;
+
+ start_idx = (ipa3_ctx->ipa3_active_clients_logging.log_tail + 1) %
+ IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+ end_idx = ipa3_ctx->ipa3_active_clients_logging.log_head;
+ for (i = start_idx; i != end_idx;
+ i = (i + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) {
+ nbytes = scnprintf(buf + cnt, size - cnt, "%s\n",
+ ipa3_ctx->ipa3_active_clients_logging
+ .log_buffer[i]);
+ cnt += nbytes;
+ }
+
+ return cnt;
+}
+
+int ipa3_active_clients_log_print_table(char *buf, int size)
+{
+ int i;
+ struct ipa3_active_client_htable_entry *iterator;
+ int cnt = 0;
+
+ cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n");
+ hash_for_each(ipa3_ctx->ipa3_active_clients_logging.htable, i,
+ iterator, list) {
+ switch (iterator->type) {
+ case IPA3_ACTIVE_CLIENT_LOG_TYPE_EP:
+ cnt += scnprintf(buf + cnt, size - cnt,
+ "%-40s %-3d ENDPOINT\n",
+ iterator->id_string, iterator->count);
+ break;
+ case IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE:
+ cnt += scnprintf(buf + cnt, size - cnt,
+ "%-40s %-3d SIMPLE\n",
+ iterator->id_string, iterator->count);
+ break;
+ case IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE:
+ cnt += scnprintf(buf + cnt, size - cnt,
+ "%-40s %-3d RESOURCE\n",
+ iterator->id_string, iterator->count);
+ break;
+ case IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL:
+ cnt += scnprintf(buf + cnt, size - cnt,
+ "%-40s %-3d SPECIAL\n",
+ iterator->id_string, iterator->count);
+ break;
+ default:
+ IPAERR("Trying to print illegal active_clients type");
+ break;
+ }
+ }
+ cnt += scnprintf(buf + cnt, size - cnt,
+ "\nTotal active clients count: %d\n",
+ ipa3_ctx->ipa3_active_clients.cnt);
+
+ return cnt;
+}
+
+static int ipa3_active_clients_panic_notifier(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ ipa3_active_clients_lock();
+ ipa3_active_clients_log_print_table(active_clients_table_buf,
+ IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
+ IPAERR("%s", active_clients_table_buf);
+ ipa3_active_clients_unlock();
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ipa3_active_clients_panic_blk = {
+ .notifier_call = ipa3_active_clients_panic_notifier,
+};
+
+static int ipa3_active_clients_log_insert(const char *string)
+{
+ int head;
+ int tail;
+
+ if (!ipa3_ctx->ipa3_active_clients_logging.log_rdy)
+ return -EPERM;
+
+ head = ipa3_ctx->ipa3_active_clients_logging.log_head;
+ tail = ipa3_ctx->ipa3_active_clients_logging.log_tail;
+
+ memset(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], '_',
+ IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
+ strlcpy(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], string,
+ (size_t)IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
+ head = (head + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+ if (tail == head)
+ tail = (tail + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+
+ ipa3_ctx->ipa3_active_clients_logging.log_tail = tail;
+ ipa3_ctx->ipa3_active_clients_logging.log_head = head;
+
+ return 0;
+}
+
+static int ipa3_active_clients_log_init(void)
+{
+ int i;
+
+ ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] = kzalloc(
+ IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES *
+ sizeof(char[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN]),
+ GFP_KERNEL);
+ active_clients_table_buf = kzalloc(sizeof(
+ char[IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL);
+ if (ipa3_ctx->ipa3_active_clients_logging.log_buffer == NULL) {
+ pr_err("Active Clients Logging memory allocation failed");
+ goto bail;
+ }
+ for (i = 0; i < IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; i++) {
+ ipa3_ctx->ipa3_active_clients_logging.log_buffer[i] =
+ ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] +
+ (IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN * i);
+ }
+ ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
+ ipa3_ctx->ipa3_active_clients_logging.log_tail =
+ IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+ hash_init(ipa3_ctx->ipa3_active_clients_logging.htable);
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &ipa3_active_clients_panic_blk);
+ ipa3_ctx->ipa3_active_clients_logging.log_rdy = 1;
+
+ return 0;
+
+bail:
+ return -ENOMEM;
+}
+
+void ipa3_active_clients_log_clear(void)
+{
+ ipa3_active_clients_lock();
+ ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
+ ipa3_ctx->ipa3_active_clients_logging.log_tail =
+ IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+ ipa3_active_clients_unlock();
+}
+
+static void ipa3_active_clients_log_destroy(void)
+{
+ ipa3_ctx->ipa3_active_clients_logging.log_rdy = 0;
+ kfree(ipa3_ctx->ipa3_active_clients_logging.log_buffer[0]);
+ ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
+ ipa3_ctx->ipa3_active_clients_logging.log_tail =
+ IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+}
+
+enum ipa_smmu_cb_type {
+ IPA_SMMU_CB_AP,
+ IPA_SMMU_CB_WLAN,
+ IPA_SMMU_CB_UC,
+ IPA_SMMU_CB_MAX
+
+};
+
+static struct ipa_smmu_cb_ctx smmu_cb[IPA_SMMU_CB_MAX];
+
+struct iommu_domain *ipa3_get_smmu_domain(void)
+{
+ if (smmu_cb[IPA_SMMU_CB_AP].valid)
+ return smmu_cb[IPA_SMMU_CB_AP].mapping->domain;
+
+ IPAERR("CB not valid\n");
+
+ return NULL;
+}
+
+struct iommu_domain *ipa3_get_uc_smmu_domain(void)
+{
+ if (smmu_cb[IPA_SMMU_CB_UC].valid)
+ return smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
+
+ IPAERR("CB not valid\n");
+
+ return NULL;
+}
+
+struct iommu_domain *ipa3_get_wlan_smmu_domain(void)
+{
+ if (smmu_cb[IPA_SMMU_CB_WLAN].valid)
+ return smmu_cb[IPA_SMMU_CB_WLAN].iommu;
+
+ IPAERR("CB not valid\n");
+
+ return NULL;
+}
+
+
+struct device *ipa3_get_dma_dev(void)
+{
+ return ipa3_ctx->pdev;
+}
+
+/**
+ * ipa3_get_smmu_ctx()- Return the wlan smmu context
+ *
+ * Return value: pointer to smmu context address
+ */
+struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(void)
+{
+ return &smmu_cb[IPA_SMMU_CB_AP];
+}
+
+/**
+ * ipa3_get_wlan_smmu_ctx()- Return the wlan smmu context
+ *
+ * Return value: pointer to smmu context address
+ */
+struct ipa_smmu_cb_ctx *ipa3_get_wlan_smmu_ctx(void)
+{
+ return &smmu_cb[IPA_SMMU_CB_WLAN];
+}
+
+/**
+ * ipa3_get_uc_smmu_ctx()- Return the uc smmu context
+ *
+ * Return value: pointer to smmu context address
+ */
+struct ipa_smmu_cb_ctx *ipa3_get_uc_smmu_ctx(void)
+{
+ return &smmu_cb[IPA_SMMU_CB_UC];
+}
+
+static int ipa3_open(struct inode *inode, struct file *filp)
+{
+ struct ipa3_context *ctx = NULL;
+
+ IPADBG_LOW("ENTER\n");
+ ctx = container_of(inode->i_cdev, struct ipa3_context, cdev);
+ filp->private_data = ctx;
+
+ return 0;
+}
+
+/**
+* ipa3_flow_control() - Enable/Disable flow control on a particular client.
+* Return codes:
+* None
+*/
+void ipa3_flow_control(enum ipa_client_type ipa_client,
+ bool enable, uint32_t qmap_id)
+{
+ struct ipa_ep_cfg_ctrl ep_ctrl = {0};
+ int ep_idx;
+ struct ipa3_ep_context *ep;
+
+ /* Check if tethered flow control is needed or not.*/
+ if (!ipa3_ctx->tethered_flow_control) {
+ IPADBG("Apps flow control is not needed\n");
+ return;
+ }
+
+ /* Check if ep is valid. */
+ ep_idx = ipa3_get_ep_mapping(ipa_client);
+ if (ep_idx == -1) {
+ IPADBG("Invalid IPA client\n");
+ return;
+ }
+
+ ep = &ipa3_ctx->ep[ep_idx];
+ if (!ep->valid || (ep->client != IPA_CLIENT_USB_PROD)) {
+ IPADBG("EP not valid/Not applicable for client.\n");
+ return;
+ }
+
+ spin_lock(&ipa3_ctx->disconnect_lock);
+ /* Check if the QMAP_ID matches. */
+ if (ep->cfg.meta.qmap_id != qmap_id) {
+ IPADBG("Flow control ind not for same flow: %u %u\n",
+ ep->cfg.meta.qmap_id, qmap_id);
+ spin_unlock(&ipa3_ctx->disconnect_lock);
+ return;
+ }
+ if (!ep->disconnect_in_progress) {
+ if (enable) {
+ IPADBG("Enabling Flow\n");
+ ep_ctrl.ipa_ep_delay = false;
+ IPA_STATS_INC_CNT(ipa3_ctx->stats.flow_enable);
+ } else {
+ IPADBG("Disabling Flow\n");
+ ep_ctrl.ipa_ep_delay = true;
+ IPA_STATS_INC_CNT(ipa3_ctx->stats.flow_disable);
+ }
+ ep_ctrl.ipa_ep_suspend = false;
+ ipa3_cfg_ep_ctrl(ep_idx, &ep_ctrl);
+ } else {
+ IPADBG("EP disconnect is in progress\n");
+ }
+ spin_unlock(&ipa3_ctx->disconnect_lock);
+}
+
+static void ipa3_wan_msg_free_cb(void *buff, u32 len, u32 type)
+{
+ if (!buff) {
+ IPAERR("Null buffer\n");
+ return;
+ }
+
+ if (type != WAN_UPSTREAM_ROUTE_ADD &&
+ type != WAN_UPSTREAM_ROUTE_DEL &&
+ type != WAN_EMBMS_CONNECT) {
+ IPAERR("Wrong type given. buff %p type %d\n", buff, type);
+ return;
+ }
+
+ kfree(buff);
+}
+
+static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type)
+{
+ int retval;
+ struct ipa_wan_msg *wan_msg;
+ struct ipa_msg_meta msg_meta;
+
+ wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
+ if (!wan_msg) {
+ IPAERR("no memory\n");
+ return -ENOMEM;
+ }
+
+ if (copy_from_user((u8 *)wan_msg, (u8 *)usr_param,
+ sizeof(struct ipa_wan_msg))) {
+ kfree(wan_msg);
+ return -EFAULT;
+ }
+
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ msg_meta.msg_type = msg_type;
+ msg_meta.msg_len = sizeof(struct ipa_wan_msg);
+ retval = ipa3_send_msg(&msg_meta, wan_msg, ipa3_wan_msg_free_cb);
+ if (retval) {
+ IPAERR("ipa3_send_msg failed: %d\n", retval);
+ kfree(wan_msg);
+ return retval;
+ }
+
+ return 0;
+}
+
+
+static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int retval = 0;
+ u32 pyld_sz;
+ u8 header[128] = { 0 };
+ u8 *param = NULL;
+ struct ipa_ioc_nat_alloc_mem nat_mem;
+ struct ipa_ioc_v4_nat_init nat_init;
+ struct ipa_ioc_v4_nat_del nat_del;
+ struct ipa_ioc_rm_dependency rm_depend;
+ size_t sz;
+
+ IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
+
+ if (!ipa3_is_ready()) {
+ IPAERR("IPA not ready, waiting for init completion\n");
+ wait_for_completion(&ipa3_ctx->init_completion_obj);
+ }
+
+ if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
+ return -ENOTTY;
+ if (_IOC_NR(cmd) >= IPA_IOCTL_MAX)
+ return -ENOTTY;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ switch (cmd) {
+ case IPA_IOC_ALLOC_NAT_MEM:
+ if (copy_from_user((u8 *)&nat_mem, (u8 *)arg,
+ sizeof(struct ipa_ioc_nat_alloc_mem))) {
+ retval = -EFAULT;
+ break;
+ }
+ /* null terminate the string */
+ nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
+
+ if (ipa3_allocate_nat_device(&nat_mem)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, (u8 *)&nat_mem,
+ sizeof(struct ipa_ioc_nat_alloc_mem))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_V4_INIT_NAT:
+ if (copy_from_user((u8 *)&nat_init, (u8 *)arg,
+ sizeof(struct ipa_ioc_v4_nat_init))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_nat_init_cmd(&nat_init)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_NAT_DMA:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_nat_dma_cmd))) {
+ retval = -EFAULT;
+ break;
+ }
+
+ pyld_sz =
+ sizeof(struct ipa_ioc_nat_dma_cmd) +
+ ((struct ipa_ioc_nat_dma_cmd *)header)->entries *
+ sizeof(struct ipa_ioc_nat_dma_one);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (ipa3_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_V4_DEL_NAT:
+ if (copy_from_user((u8 *)&nat_del, (u8 *)arg,
+ sizeof(struct ipa_ioc_v4_nat_del))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_nat_del_cmd(&nat_del)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_ADD_HDR:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_add_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_hdr) +
+ ((struct ipa_ioc_add_hdr *)header)->num_hdrs *
+ sizeof(struct ipa_hdr_add);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_add_hdr((struct ipa_ioc_add_hdr *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_DEL_HDR:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_del_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_del_hdr) +
+ ((struct ipa_ioc_del_hdr *)header)->num_hdls *
+ sizeof(struct ipa_hdr_del);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_del_hdr((struct ipa_ioc_del_hdr *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_ADD_RT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_add_rt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_rt_rule) +
+ ((struct ipa_ioc_add_rt_rule *)header)->num_rules *
+ sizeof(struct ipa_rt_rule_add);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_ADD_RT_RULE_AFTER:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_add_rt_rule_after))) {
+
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_rt_rule_after) +
+ ((struct ipa_ioc_add_rt_rule_after *)header)->num_rules *
+ sizeof(struct ipa_rt_rule_add);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_add_rt_rule_after(
+ (struct ipa_ioc_add_rt_rule_after *)param)) {
+
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_MDFY_RT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_mdfy_rt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_mdfy_rt_rule) +
+ ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules *
+ sizeof(struct ipa_rt_rule_mdfy);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_DEL_RT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_del_rt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_del_rt_rule) +
+ ((struct ipa_ioc_del_rt_rule *)header)->num_hdls *
+ sizeof(struct ipa_rt_rule_del);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_ADD_FLT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_add_flt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_flt_rule) +
+ ((struct ipa_ioc_add_flt_rule *)header)->num_rules *
+ sizeof(struct ipa_flt_rule_add);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_ADD_FLT_RULE_AFTER:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_add_flt_rule_after))) {
+
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_flt_rule_after) +
+ ((struct ipa_ioc_add_flt_rule_after *)header)->num_rules *
+ sizeof(struct ipa_flt_rule_add);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_add_flt_rule_after(
+ (struct ipa_ioc_add_flt_rule_after *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_DEL_FLT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_del_flt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_del_flt_rule) +
+ ((struct ipa_ioc_del_flt_rule *)header)->num_hdls *
+ sizeof(struct ipa_flt_rule_del);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_MDFY_FLT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_mdfy_flt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_mdfy_flt_rule) +
+ ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules *
+ sizeof(struct ipa_flt_rule_mdfy);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_COMMIT_HDR:
+ retval = ipa3_commit_hdr();
+ break;
+ case IPA_IOC_RESET_HDR:
+ retval = ipa3_reset_hdr();
+ break;
+ case IPA_IOC_COMMIT_RT:
+ retval = ipa3_commit_rt(arg);
+ break;
+ case IPA_IOC_RESET_RT:
+ retval = ipa3_reset_rt(arg);
+ break;
+ case IPA_IOC_COMMIT_FLT:
+ retval = ipa3_commit_flt(arg);
+ break;
+ case IPA_IOC_RESET_FLT:
+ retval = ipa3_reset_flt(arg);
+ break;
+ case IPA_IOC_GET_RT_TBL:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_get_rt_tbl))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, header,
+ sizeof(struct ipa_ioc_get_rt_tbl))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_PUT_RT_TBL:
+ retval = ipa3_put_rt_tbl(arg);
+ break;
+ case IPA_IOC_GET_HDR:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_get_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_get_hdr((struct ipa_ioc_get_hdr *)header)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, header,
+ sizeof(struct ipa_ioc_get_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_PUT_HDR:
+ retval = ipa3_put_hdr(arg);
+ break;
+ case IPA_IOC_SET_FLT:
+ retval = ipa3_cfg_filter(arg);
+ break;
+ case IPA_IOC_COPY_HDR:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_copy_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_copy_hdr((struct ipa_ioc_copy_hdr *)header)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, header,
+ sizeof(struct ipa_ioc_copy_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_QUERY_INTF:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_query_intf))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_query_intf((struct ipa_ioc_query_intf *)header)) {
+ retval = -1;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, header,
+ sizeof(struct ipa_ioc_query_intf))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_QUERY_INTF_TX_PROPS:
+ sz = sizeof(struct ipa_ioc_query_intf_tx_props);
+ if (copy_from_user(header, (u8 *)arg, sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (((struct ipa_ioc_query_intf_tx_props *)header)->num_tx_props
+ > IPA_NUM_PROPS_MAX) {
+ retval = -EFAULT;
+ break;
+ }
+
+ pyld_sz = sz + ((struct ipa_ioc_query_intf_tx_props *)
+ header)->num_tx_props *
+ sizeof(struct ipa_ioc_tx_intf_prop);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_query_intf_tx_props(
+ (struct ipa_ioc_query_intf_tx_props *)param)) {
+ retval = -1;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_QUERY_INTF_RX_PROPS:
+ sz = sizeof(struct ipa_ioc_query_intf_rx_props);
+ if (copy_from_user(header, (u8 *)arg, sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (((struct ipa_ioc_query_intf_rx_props *)header)->num_rx_props
+ > IPA_NUM_PROPS_MAX) {
+ retval = -EFAULT;
+ break;
+ }
+
+ pyld_sz = sz + ((struct ipa_ioc_query_intf_rx_props *)
+ header)->num_rx_props *
+ sizeof(struct ipa_ioc_rx_intf_prop);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_query_intf_rx_props(
+ (struct ipa_ioc_query_intf_rx_props *)param)) {
+ retval = -1;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_QUERY_INTF_EXT_PROPS:
+ sz = sizeof(struct ipa_ioc_query_intf_ext_props);
+ if (copy_from_user(header, (u8 *)arg, sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (((struct ipa_ioc_query_intf_ext_props *)
+ header)->num_ext_props > IPA_NUM_PROPS_MAX) {
+ retval = -EFAULT;
+ break;
+ }
+
+ pyld_sz = sz + ((struct ipa_ioc_query_intf_ext_props *)
+ header)->num_ext_props *
+ sizeof(struct ipa_ioc_ext_intf_prop);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_query_intf_ext_props(
+ (struct ipa_ioc_query_intf_ext_props *)param)) {
+ retval = -1;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_PULL_MSG:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_msg_meta))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz = sizeof(struct ipa_msg_meta) +
+ ((struct ipa_msg_meta *)header)->msg_len;
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_pull_msg((struct ipa_msg_meta *)param,
+ (char *)param + sizeof(struct ipa_msg_meta),
+ ((struct ipa_msg_meta *)param)->msg_len) !=
+ ((struct ipa_msg_meta *)param)->msg_len) {
+ retval = -1;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_RM_ADD_DEPENDENCY:
+ if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
+ sizeof(struct ipa_ioc_rm_dependency))) {
+ retval = -EFAULT;
+ break;
+ }
+ retval = ipa_rm_add_dependency_from_ioctl(
+ rm_depend.resource_name, rm_depend.depends_on_name);
+ break;
+ case IPA_IOC_RM_DEL_DEPENDENCY:
+ if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
+ sizeof(struct ipa_ioc_rm_dependency))) {
+ retval = -EFAULT;
+ break;
+ }
+ retval = ipa_rm_delete_dependency_from_ioctl(
+ rm_depend.resource_name, rm_depend.depends_on_name);
+ break;
+ case IPA_IOC_GENERATE_FLT_EQ:
+ {
+ struct ipa_ioc_generate_flt_eq flt_eq;
+
+ if (copy_from_user(&flt_eq, (u8 *)arg,
+ sizeof(struct ipa_ioc_generate_flt_eq))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipahal_flt_generate_equation(flt_eq.ip,
+ &flt_eq.attrib, &flt_eq.eq_attrib)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, &flt_eq,
+ sizeof(struct ipa_ioc_generate_flt_eq))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ }
+ case IPA_IOC_QUERY_EP_MAPPING:
+ {
+ retval = ipa3_get_ep_mapping(arg);
+ break;
+ }
+ case IPA_IOC_QUERY_RT_TBL_INDEX:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_query_rt_index(
+ (struct ipa_ioc_get_rt_tbl_indx *)header)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, header,
+ sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_WRITE_QMAPID:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_write_qmapid))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_write_qmap_id((struct ipa_ioc_write_qmapid *)header)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, header,
+ sizeof(struct ipa_ioc_write_qmapid))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD:
+ retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD);
+ if (retval) {
+ IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
+ break;
+ }
+ break;
+ case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL:
+ retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL);
+ if (retval) {
+ IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
+ break;
+ }
+ break;
+ case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED:
+ retval = ipa3_send_wan_msg(arg, WAN_EMBMS_CONNECT);
+ if (retval) {
+ IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
+ break;
+ }
+ break;
+ case IPA_IOC_ADD_HDR_PROC_CTX:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_add_hdr_proc_ctx))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_hdr_proc_ctx) +
+ ((struct ipa_ioc_add_hdr_proc_ctx *)header)->num_proc_ctxs *
+ sizeof(struct ipa_hdr_proc_ctx_add);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_add_hdr_proc_ctx(
+ (struct ipa_ioc_add_hdr_proc_ctx *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_DEL_HDR_PROC_CTX:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_del_hdr_proc_ctx))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_del_hdr_proc_ctx) +
+ ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls *
+ sizeof(struct ipa_hdr_proc_ctx_del);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_del_hdr_proc_ctx(
+ (struct ipa_ioc_del_hdr_proc_ctx *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_GET_HW_VERSION:
+ pyld_sz = sizeof(enum ipa_hw_type);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ memcpy(param, &ipa3_ctx->ipa_hw_type, pyld_sz);
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ default: /* redundant, as cmd was checked against MAXNR */
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return -ENOTTY;
+ }
+ kfree(param);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return retval;
+}
+
+/**
+* ipa3_setup_dflt_rt_tables() - Setup default routing tables
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate memory
+* -EPERM: failed to add the tables
+*/
+int ipa3_setup_dflt_rt_tables(void)
+{
+ struct ipa_ioc_add_rt_rule *rt_rule;
+ struct ipa_rt_rule_add *rt_rule_entry;
+
+ rt_rule =
+ kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
+ sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
+ if (!rt_rule) {
+ IPAERR("fail to alloc mem\n");
+ return -ENOMEM;
+ }
+ /* setup a default v4 route to point to Apps */
+ rt_rule->num_rules = 1;
+ rt_rule->commit = 1;
+ rt_rule->ip = IPA_IP_v4;
+ strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
+ IPA_RESOURCE_NAME_MAX);
+
+ rt_rule_entry = &rt_rule->rules[0];
+ rt_rule_entry->at_rear = 1;
+ rt_rule_entry->rule.dst = IPA_CLIENT_APPS_LAN_CONS;
+ rt_rule_entry->rule.hdr_hdl = ipa3_ctx->excp_hdr_hdl;
+ rt_rule_entry->rule.retain_hdr = 1;
+
+ if (ipa3_add_rt_rule(rt_rule)) {
+ IPAERR("fail to add dflt v4 rule\n");
+ kfree(rt_rule);
+ return -EPERM;
+ }
+ IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+ ipa3_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+ /* setup a default v6 route to point to A5 */
+ rt_rule->ip = IPA_IP_v6;
+ if (ipa3_add_rt_rule(rt_rule)) {
+ IPAERR("fail to add dflt v6 rule\n");
+ kfree(rt_rule);
+ return -EPERM;
+ }
+ IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+ ipa3_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+ /*
+ * because these tables are the very first to be added, they will both
+ * have the same index (0) which is essential for programming the
+ * "route" end-point config
+ */
+
+ kfree(rt_rule);
+
+ return 0;
+}
+
+static int ipa3_setup_exception_path(void)
+{
+ struct ipa_ioc_add_hdr *hdr;
+ struct ipa_hdr_add *hdr_entry;
+ struct ipahal_reg_route route = { 0 };
+ int ret;
+
+ /* install the basic exception header */
+ hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
+ sizeof(struct ipa_hdr_add), GFP_KERNEL);
+ if (!hdr) {
+ IPAERR("fail to alloc exception hdr\n");
+ return -ENOMEM;
+ }
+ hdr->num_hdrs = 1;
+ hdr->commit = 1;
+ hdr_entry = &hdr->hdr[0];
+
+ strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, IPA_RESOURCE_NAME_MAX);
+ hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
+
+ if (ipa3_add_hdr(hdr)) {
+ IPAERR("fail to add exception hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ if (hdr_entry->status) {
+ IPAERR("fail to add exception hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
+
+ /* set the route register to pass exception packets to Apps */
+ route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+ route.route_frag_def_pipe = ipa3_get_ep_mapping(
+ IPA_CLIENT_APPS_LAN_CONS);
+ route.route_def_hdr_table = !ipa3_ctx->hdr_tbl_lcl;
+ route.route_def_retain_hdr = 1;
+
+ if (ipa3_cfg_route(&route)) {
+ IPAERR("fail to add exception hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ kfree(hdr);
+ return ret;
+}
+
+static int ipa3_init_smem_region(int memory_region_size,
+ int memory_region_offset)
+{
+ struct ipahal_imm_cmd_dma_shared_mem cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+ struct ipa3_desc desc;
+ struct ipa_mem_buffer mem;
+ int rc;
+
+ if (memory_region_size == 0)
+ return 0;
+
+ memset(&desc, 0, sizeof(desc));
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&mem, 0, sizeof(mem));
+
+ mem.size = memory_region_size;
+ mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
+ &mem.phys_base, GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
+ return -ENOMEM;
+ }
+
+ memset(mem.base, 0, mem.size);
+ cmd.is_read = false;
+ cmd.skip_pipeline_clear = false;
+ cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ cmd.size = mem.size;
+ cmd.system_addr = mem.phys_base;
+ cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+ memory_region_offset;
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct dma_shared_mem imm cmd\n");
+ return -ENOMEM;
+ }
+ desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc.pyld = cmd_pyld->data;
+ desc.len = cmd_pyld->len;
+ desc.type = IPA_IMM_CMD_DESC;
+
+ rc = ipa3_send_cmd(1, &desc);
+ if (rc) {
+ IPAERR("failed to send immediate command (error %d)\n", rc);
+ rc = -EFAULT;
+ }
+
+ ipahal_destroy_imm_cmd(cmd_pyld);
+ dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
+ mem.phys_base);
+
+ return rc;
+}
+
+/**
+* ipa3_init_q6_smem() - Initialize Q6 general memory and
+* header memory regions in IPA.
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate dma memory
+* -EFAULT: failed to send IPA command to initialize the memory
+*/
+int ipa3_init_q6_smem(void)
+{
+ int rc;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ rc = ipa3_init_smem_region(IPA_MEM_PART(modem_size),
+ IPA_MEM_PART(modem_ofst));
+ if (rc) {
+ IPAERR("failed to initialize Modem RAM memory\n");
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return rc;
+ }
+
+ rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_size),
+ IPA_MEM_PART(modem_hdr_ofst));
+ if (rc) {
+ IPAERR("failed to initialize Modem HDRs RAM memory\n");
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return rc;
+ }
+
+ rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_proc_ctx_size),
+ IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
+ if (rc) {
+ IPAERR("failed to initialize Modem proc ctx RAM memory\n");
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return rc;
+ }
+
+ rc = ipa3_init_smem_region(IPA_MEM_PART(modem_comp_decomp_size),
+ IPA_MEM_PART(modem_comp_decomp_ofst));
+ if (rc) {
+ IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n");
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return rc;
+ }
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return rc;
+}
+
+static void ipa3_destroy_imm(void *user1, int user2)
+{
+ ipahal_destroy_imm_cmd(user1);
+}
+
+static void ipa3_q6_pipe_delay(bool delay)
+{
+ int client_idx;
+ int ep_idx;
+ struct ipa_ep_cfg_ctrl ep_ctrl;
+
+ memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_ctrl.ipa_ep_delay = delay;
+
+ for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+ if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
+ ep_idx = ipa3_get_ep_mapping(client_idx);
+ if (ep_idx == -1)
+ continue;
+
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
+ ep_idx, &ep_ctrl);
+ }
+ }
+}
+
+static void ipa3_q6_avoid_holb(void)
+{
+ int ep_idx;
+ int client_idx;
+ struct ipa_ep_cfg_ctrl ep_suspend;
+ struct ipa_ep_cfg_holb ep_holb;
+
+ memset(&ep_suspend, 0, sizeof(ep_suspend));
+ memset(&ep_holb, 0, sizeof(ep_holb));
+
+ ep_suspend.ipa_ep_suspend = true;
+ ep_holb.tmr_val = 0;
+ ep_holb.en = 1;
+
+ for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+ if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
+ ep_idx = ipa3_get_ep_mapping(client_idx);
+ if (ep_idx == -1)
+ continue;
+
+ /*
+ * ipa3_cfg_ep_holb is not used here because we are
+ * setting HOLB on Q6 pipes, and from APPS perspective
+ * they are not valid, therefore, the above function
+ * will fail.
+ */
+ ipahal_write_reg_n_fields(
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
+ ep_idx, &ep_holb);
+ ipahal_write_reg_n_fields(
+ IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+ ep_idx, &ep_holb);
+
+ ipahal_write_reg_n_fields(
+ IPA_ENDP_INIT_CTRL_n,
+ ep_idx, &ep_suspend);
+ }
+ }
+}
+
+static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
+ enum ipa_rule_type rlt)
+{
+ struct ipa3_desc *desc;
+ struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
+ struct ipahal_imm_cmd_pyld **cmd_pyld;
+ int retval = 0;
+ int pipe_idx;
+ int flt_idx = 0;
+ int num_cmds = 0;
+ int index;
+ u32 lcl_addr_mem_part;
+ u32 lcl_hdr_sz;
+ struct ipa_mem_buffer mem;
+
+ IPADBG("Entry\n");
+
+ if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
+ IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
+ return -EINVAL;
+ }
+
+ /* Up to filtering pipes we have filtering tables */
+ desc = kcalloc(ipa3_ctx->ep_flt_num, sizeof(struct ipa3_desc),
+ GFP_KERNEL);
+ if (!desc) {
+ IPAERR("failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ cmd_pyld = kcalloc(ipa3_ctx->ep_flt_num,
+ sizeof(struct ipahal_imm_cmd_pyld *), GFP_KERNEL);
+ if (!cmd_pyld) {
+ IPAERR("failed to allocate memory\n");
+ retval = -ENOMEM;
+ goto free_desc;
+ }
+
+ if (ip == IPA_IP_v4) {
+ if (rlt == IPA_RULE_HASHABLE) {
+ lcl_addr_mem_part = IPA_MEM_PART(v4_flt_hash_ofst);
+ lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
+ } else {
+ lcl_addr_mem_part = IPA_MEM_PART(v4_flt_nhash_ofst);
+ lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
+ }
+ } else {
+ if (rlt == IPA_RULE_HASHABLE) {
+ lcl_addr_mem_part = IPA_MEM_PART(v6_flt_hash_ofst);
+ lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
+ } else {
+ lcl_addr_mem_part = IPA_MEM_PART(v6_flt_nhash_ofst);
+ lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
+ }
+ }
+
+ retval = ipahal_flt_generate_empty_img(1, lcl_hdr_sz, lcl_hdr_sz,
+ 0, &mem);
+ if (retval) {
+ IPAERR("failed to generate flt single tbl empty img\n");
+ goto free_cmd_pyld;
+ }
+
+ for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes; pipe_idx++) {
+ if (!ipa_is_ep_support_flt(pipe_idx))
+ continue;
+
+ /*
+ * Iterating over all the filtering pipes which are either
+ * invalid but connected or connected but not configured by AP.
+ */
+ if (!ipa3_ctx->ep[pipe_idx].valid ||
+ ipa3_ctx->ep[pipe_idx].skip_ep_cfg) {
+
+ cmd.is_read = false;
+ cmd.skip_pipeline_clear = false;
+ cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ cmd.size = mem.size;
+ cmd.system_addr = mem.phys_base;
+ cmd.local_addr =
+ ipa3_ctx->smem_restricted_bytes +
+ lcl_addr_mem_part +
+ ipahal_get_hw_tbl_hdr_width() +
+ flt_idx * ipahal_get_hw_tbl_hdr_width();
+ cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+ if (!cmd_pyld[num_cmds]) {
+ IPAERR("fail construct dma_shared_mem cmd\n");
+ retval = -ENOMEM;
+ goto free_empty_img;
+ }
+ desc[num_cmds].opcode = ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmds].pyld = cmd_pyld[num_cmds]->data;
+ desc[num_cmds].len = cmd_pyld[num_cmds]->len;
+ desc[num_cmds].type = IPA_IMM_CMD_DESC;
+ num_cmds++;
+ }
+
+ flt_idx++;
+ }
+
+ IPADBG("Sending %d descriptors for flt tbl clearing\n", num_cmds);
+ retval = ipa3_send_cmd(num_cmds, desc);
+ if (retval) {
+ IPAERR("failed to send immediate command (err %d)\n", retval);
+ retval = -EFAULT;
+ }
+
+free_empty_img:
+ ipahal_free_dma_mem(&mem);
+free_cmd_pyld:
+ for (index = 0; index < num_cmds; index++)
+ ipahal_destroy_imm_cmd(cmd_pyld[index]);
+ kfree(cmd_pyld);
+free_desc:
+ kfree(desc);
+ return retval;
+}
+
+static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip,
+ enum ipa_rule_type rlt)
+{
+ struct ipa3_desc *desc;
+ struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
+ struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+ int retval = 0;
+ u32 modem_rt_index_lo;
+ u32 modem_rt_index_hi;
+ u32 lcl_addr_mem_part;
+ u32 lcl_hdr_sz;
+ struct ipa_mem_buffer mem;
+
+ IPADBG("Entry\n");
+
+ if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
+ IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
+ return -EINVAL;
+ }
+
+ if (ip == IPA_IP_v4) {
+ modem_rt_index_lo = IPA_MEM_PART(v4_modem_rt_index_lo);
+ modem_rt_index_hi = IPA_MEM_PART(v4_modem_rt_index_hi);
+ if (rlt == IPA_RULE_HASHABLE) {
+ lcl_addr_mem_part = IPA_MEM_PART(v4_rt_hash_ofst);
+ lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
+ } else {
+ lcl_addr_mem_part = IPA_MEM_PART(v4_rt_nhash_ofst);
+ lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
+ }
+ } else {
+ modem_rt_index_lo = IPA_MEM_PART(v6_modem_rt_index_lo);
+ modem_rt_index_hi = IPA_MEM_PART(v6_modem_rt_index_hi);
+ if (rlt == IPA_RULE_HASHABLE) {
+ lcl_addr_mem_part = IPA_MEM_PART(v6_rt_hash_ofst);
+ lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
+ } else {
+ lcl_addr_mem_part = IPA_MEM_PART(v6_rt_nhash_ofst);
+ lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
+ }
+ }
+
+ retval = ipahal_rt_generate_empty_img(
+ modem_rt_index_hi - modem_rt_index_lo + 1,
+ lcl_hdr_sz, lcl_hdr_sz, &mem);
+ if (retval) {
+ IPAERR("fail generate empty rt img\n");
+ return -ENOMEM;
+ }
+
+ desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
+ if (!desc) {
+ IPAERR("failed to allocate memory\n");
+ goto free_empty_img;
+ }
+
+ cmd.is_read = false;
+ cmd.skip_pipeline_clear = false;
+ cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ cmd.size = mem.size;
+ cmd.system_addr = mem.phys_base;
+ cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+ lcl_addr_mem_part +
+ modem_rt_index_lo * ipahal_get_hw_tbl_hdr_width();
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct dma_shared_mem imm cmd\n");
+ retval = -ENOMEM;
+ goto free_desc;
+ }
+ desc->opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc->pyld = cmd_pyld->data;
+ desc->len = cmd_pyld->len;
+ desc->type = IPA_IMM_CMD_DESC;
+
+ IPADBG("Sending 1 descriptor for rt tbl clearing\n");
+ retval = ipa3_send_cmd(1, desc);
+ if (retval) {
+ IPAERR("failed to send immediate command (err %d)\n", retval);
+ retval = -EFAULT;
+ }
+
+ ipahal_destroy_imm_cmd(cmd_pyld);
+free_desc:
+ kfree(desc);
+free_empty_img:
+ ipahal_free_dma_mem(&mem);
+ return retval;
+}
+
+static int ipa3_q6_clean_q6_tables(void)
+{
+ struct ipa3_desc *desc;
+ struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+ struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
+ int retval;
+ struct ipahal_reg_fltrt_hash_flush flush;
+ struct ipahal_reg_valmask valmask;
+
+ IPADBG("Entry\n");
+
+
+ if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
+ IPAERR("failed to clean q6 flt tbls (v4/hashable)\n");
+ return -EFAULT;
+ }
+ if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
+ IPAERR("failed to clean q6 flt tbls (v6/hashable)\n");
+ return -EFAULT;
+ }
+ if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
+ IPAERR("failed to clean q6 flt tbls (v4/non-hashable)\n");
+ return -EFAULT;
+ }
+ if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
+ IPAERR("failed to clean q6 flt tbls (v6/non-hashable)\n");
+ return -EFAULT;
+ }
+
+ if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
+ IPAERR("failed to clean q6 rt tbls (v4/hashable)\n");
+ return -EFAULT;
+ }
+ if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
+ IPAERR("failed to clean q6 rt tbls (v6/hashable)\n");
+ return -EFAULT;
+ }
+ if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
+ IPAERR("failed to clean q6 rt tbls (v4/non-hashable)\n");
+ return -EFAULT;
+ }
+ if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
+ IPAERR("failed to clean q6 rt tbls (v6/non-hashable)\n");
+ return -EFAULT;
+ }
+
+ /* Flush rules cache */
+ desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
+ if (!desc) {
+ IPAERR("failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ flush.v4_flt = true;
+ flush.v4_rt = true;
+ flush.v6_flt = true;
+ flush.v6_rt = true;
+ ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
+ reg_write_cmd.skip_pipeline_clear = false;
+ reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
+ reg_write_cmd.value = valmask.val;
+ reg_write_cmd.value_mask = valmask.mask;
+ cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+ ®_write_cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("fail construct register_write imm cmd\n");
+ retval = -EFAULT;
+ goto bail_desc;
+ }
+ desc->opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+ desc->pyld = cmd_pyld->data;
+ desc->len = cmd_pyld->len;
+ desc->type = IPA_IMM_CMD_DESC;
+
+ IPADBG("Sending 1 descriptor for tbls flush\n");
+ retval = ipa3_send_cmd(1, desc);
+ if (retval) {
+ IPAERR("failed to send immediate command (err %d)\n", retval);
+ retval = -EFAULT;
+ }
+
+ ipahal_destroy_imm_cmd(cmd_pyld);
+
+bail_desc:
+ kfree(desc);
+ IPADBG("Done - retval = %d\n", retval);
+ return retval;
+}
+
+static int ipa3_q6_set_ex_path_to_apps(void)
+{
+ int ep_idx;
+ int client_idx;
+ struct ipa3_desc *desc;
+ int num_descs = 0;
+ int index;
+ struct ipahal_imm_cmd_register_write reg_write;
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+ int retval;
+ struct ipahal_reg_valmask valmask;
+
+ desc = kcalloc(ipa3_ctx->ipa_num_pipes, sizeof(struct ipa3_desc),
+ GFP_KERNEL);
+ if (!desc) {
+ IPAERR("failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ /* Set the exception path to AP */
+ for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+ ep_idx = ipa3_get_ep_mapping(client_idx);
+ if (ep_idx == -1)
+ continue;
+
+ if (ipa3_ctx->ep[ep_idx].valid &&
+ ipa3_ctx->ep[ep_idx].skip_ep_cfg) {
+ BUG_ON(num_descs >= ipa3_ctx->ipa_num_pipes);
+
+ reg_write.skip_pipeline_clear = false;
+ reg_write.pipeline_clear_options =
+ IPAHAL_HPS_CLEAR;
+ reg_write.offset =
+ ipahal_get_reg_ofst(IPA_ENDP_STATUS_n);
+ ipahal_get_status_ep_valmask(
+ ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS),
+ &valmask);
+ reg_write.value = valmask.val;
+ reg_write.value_mask = valmask.mask;
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_REGISTER_WRITE, ®_write, false);
+ if (!cmd_pyld) {
+ IPAERR("fail construct register_write cmd\n");
+ BUG();
+ }
+
+ desc[num_descs].opcode = ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_REGISTER_WRITE);
+ desc[num_descs].type = IPA_IMM_CMD_DESC;
+ desc[num_descs].callback = ipa3_destroy_imm;
+ desc[num_descs].user1 = cmd_pyld;
+ desc[num_descs].pyld = cmd_pyld->data;
+ desc[num_descs].len = cmd_pyld->len;
+ num_descs++;
+ }
+ }
+
+ /* Will wait 150msecs for IPA tag process completion */
+ retval = ipa3_tag_process(desc, num_descs,
+ msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT));
+ if (retval) {
+ IPAERR("TAG process failed! (error %d)\n", retval);
+ /* For timeout error ipa3_destroy_imm cb will destroy user1 */
+ if (retval != -ETIME) {
+ for (index = 0; index < num_descs; index++)
+ if (desc[index].callback)
+ desc[index].callback(desc[index].user1,
+ desc[index].user2);
+ retval = -EINVAL;
+ }
+ }
+
+ kfree(desc);
+
+ return retval;
+}
+
+/**
+* ipa3_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration
+* in IPA HW. This is performed in case of SSR.
+*
+* This is a mandatory procedure, in case one of the steps fails, the
+* AP needs to restart.
+*/
+void ipa3_q6_pre_shutdown_cleanup(void)
+{
+ IPADBG_LOW("ENTER\n");
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ ipa3_q6_pipe_delay(true);
+ ipa3_q6_avoid_holb();
+ if (ipa3_q6_clean_q6_tables()) {
+ IPAERR("Failed to clean Q6 tables\n");
+ BUG();
+ }
+ if (ipa3_q6_set_ex_path_to_apps()) {
+ IPAERR("Failed to redirect exceptions to APPS\n");
+ BUG();
+ }
+ /* Remove delay from Q6 PRODs to avoid pending descriptors
+ * on pipe reset procedure
+ */
+ ipa3_q6_pipe_delay(false);
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPADBG_LOW("Exit with success\n");
+}
+
+/*
+ * ipa3_q6_post_shutdown_cleanup() - As part of this cleanup
+ * check if GSI channel related to Q6 producer client is empty.
+ *
+ * Q6 GSI channel emptiness is needed to garantee no descriptors with invalid
+ * info are injected into IPA RX from IPA_IF, while modem is restarting.
+ */
+void ipa3_q6_post_shutdown_cleanup(void)
+{
+ int client_idx;
+
+ IPADBG_LOW("ENTER\n");
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ if (!ipa3_ctx->uc_ctx.uc_loaded) {
+ IPAERR("uC is not loaded. Skipping\n");
+ return;
+ }
+
+ for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
+ if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
+ if (ipa3_uc_is_gsi_channel_empty(client_idx)) {
+ IPAERR("fail to validate Q6 ch emptiness %d\n",
+ client_idx);
+ BUG();
+ return;
+ }
+ }
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPADBG_LOW("Exit with success\n");
+}
+
+static inline void ipa3_sram_set_canary(u32 *sram_mmio, int offset)
+{
+ /* Set 4 bytes of CANARY before the offset */
+ sram_mmio[(offset - 4) / 4] = IPA_MEM_CANARY_VAL;
+}
+
+/**
+ * _ipa_init_sram_v3_0() - Initialize IPA local SRAM.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_sram_v3_0(void)
+{
+ u32 *ipa_sram_mmio;
+ unsigned long phys_addr;
+
+ phys_addr = ipa3_ctx->ipa_wrapper_base +
+ ipa3_ctx->ctrl->ipa_reg_base_ofst +
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
+ ipa3_ctx->smem_restricted_bytes / 4);
+
+ ipa_sram_mmio = ioremap(phys_addr, ipa3_ctx->smem_sz);
+ if (!ipa_sram_mmio) {
+ IPAERR("fail to ioremap IPA SRAM\n");
+ return -ENOMEM;
+ }
+
+ /* Consult with ipa_i.h on the location of the CANARY values */
+ ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst) - 4);
+ ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst));
+ ipa3_sram_set_canary(ipa_sram_mmio,
+ IPA_MEM_PART(v4_flt_nhash_ofst) - 4);
+ ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_nhash_ofst));
+ ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst) - 4);
+ ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst));
+ ipa3_sram_set_canary(ipa_sram_mmio,
+ IPA_MEM_PART(v6_flt_nhash_ofst) - 4);
+ ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_nhash_ofst));
+ ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst) - 4);
+ ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst));
+ ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst) - 4);
+ ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst));
+ ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst) - 4);
+ ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst));
+ ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst) - 4);
+ ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst));
+ ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst) - 4);
+ ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst));
+ ipa3_sram_set_canary(ipa_sram_mmio,
+ IPA_MEM_PART(modem_hdr_proc_ctx_ofst) - 4);
+ ipa3_sram_set_canary(ipa_sram_mmio,
+ IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
+ ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst) - 4);
+ ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst));
+ ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(end_ofst));
+
+ iounmap(ipa_sram_mmio);
+
+ return 0;
+}
+
+/**
+ * _ipa_init_hdr_v3_0() - Initialize IPA header block.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_hdr_v3_0(void)
+{
+ struct ipa3_desc desc = { 0 };
+ struct ipa_mem_buffer mem;
+ struct ipahal_imm_cmd_hdr_init_local cmd = {0};
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+ struct ipahal_imm_cmd_dma_shared_mem dma_cmd = { 0 };
+
+ mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
+ mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+ return -ENOMEM;
+ }
+ memset(mem.base, 0, mem.size);
+
+ cmd.hdr_table_addr = mem.phys_base;
+ cmd.size_hdr_table = mem.size;
+ cmd.hdr_addr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(modem_hdr_ofst);
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_HDR_INIT_LOCAL, &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("fail to construct hdr_init_local imm cmd\n");
+ dma_free_coherent(ipa3_ctx->pdev,
+ mem.size, mem.base,
+ mem.phys_base);
+ return -EFAULT;
+ }
+ desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_HDR_INIT_LOCAL);
+ desc.type = IPA_IMM_CMD_DESC;
+ desc.pyld = cmd_pyld->data;
+ desc.len = cmd_pyld->len;
+ IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+ if (ipa3_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ ipahal_destroy_imm_cmd(cmd_pyld);
+ dma_free_coherent(ipa3_ctx->pdev,
+ mem.size, mem.base,
+ mem.phys_base);
+ return -EFAULT;
+ }
+
+ ipahal_destroy_imm_cmd(cmd_pyld);
+ dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+ mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) +
+ IPA_MEM_PART(apps_hdr_proc_ctx_size);
+ mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+ return -ENOMEM;
+ }
+ memset(mem.base, 0, mem.size);
+ memset(&desc, 0, sizeof(desc));
+
+ dma_cmd.is_read = false;
+ dma_cmd.skip_pipeline_clear = false;
+ dma_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ dma_cmd.system_addr = mem.phys_base;
+ dma_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(modem_hdr_proc_ctx_ofst);
+ dma_cmd.size = mem.size;
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &dma_cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("fail to construct dma_shared_mem imm\n");
+ dma_free_coherent(ipa3_ctx->pdev,
+ mem.size, mem.base,
+ mem.phys_base);
+ return -EFAULT;
+ }
+ desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc.pyld = cmd_pyld->data;
+ desc.len = cmd_pyld->len;
+ desc.type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+ if (ipa3_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ ipahal_destroy_imm_cmd(cmd_pyld);
+ dma_free_coherent(ipa3_ctx->pdev,
+ mem.size,
+ mem.base,
+ mem.phys_base);
+ return -EFAULT;
+ }
+ ipahal_destroy_imm_cmd(cmd_pyld);
+
+ ipahal_write_reg(IPA_LOCAL_PKT_PROC_CNTXT_BASE, dma_cmd.local_addr);
+
+ dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+ return 0;
+}
+
+/**
+ * _ipa_init_rt4_v3() - Initialize IPA routing block for IPv4.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_rt4_v3(void)
+{
+ struct ipa3_desc desc = { 0 };
+ struct ipa_mem_buffer mem;
+ struct ipahal_imm_cmd_ip_v4_routing_init v4_cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+ int i;
+ int rc = 0;
+
+ for (i = IPA_MEM_PART(v4_modem_rt_index_lo);
+ i <= IPA_MEM_PART(v4_modem_rt_index_hi);
+ i++)
+ ipa3_ctx->rt_idx_bitmap[IPA_IP_v4] |= (1 << i);
+ IPADBG("v4 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v4]);
+
+ rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v4_rt_num_index),
+ IPA_MEM_PART(v4_rt_hash_size), IPA_MEM_PART(v4_rt_nhash_size),
+ &mem);
+ if (rc) {
+ IPAERR("fail generate empty v4 rt img\n");
+ return rc;
+ }
+
+ v4_cmd.hash_rules_addr = mem.phys_base;
+ v4_cmd.hash_rules_size = mem.size;
+ v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v4_rt_hash_ofst);
+ v4_cmd.nhash_rules_addr = mem.phys_base;
+ v4_cmd.nhash_rules_size = mem.size;
+ v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v4_rt_nhash_ofst);
+ IPADBG("putting hashable routing IPv4 rules to phys 0x%x\n",
+ v4_cmd.hash_local_addr);
+ IPADBG("putting non-hashable routing IPv4 rules to phys 0x%x\n",
+ v4_cmd.nhash_local_addr);
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_V4_ROUTING_INIT, &v4_cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("fail construct ip_v4_rt_init imm cmd\n");
+ rc = -EPERM;
+ goto free_mem;
+ }
+
+ desc.opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_ROUTING_INIT);
+ desc.type = IPA_IMM_CMD_DESC;
+ desc.pyld = cmd_pyld->data;
+ desc.len = cmd_pyld->len;
+ IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+ if (ipa3_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ rc = -EFAULT;
+ }
+
+ ipahal_destroy_imm_cmd(cmd_pyld);
+
+free_mem:
+ ipahal_free_dma_mem(&mem);
+ return rc;
+}
+
+/**
+ * _ipa_init_rt6_v3() - Initialize IPA routing block for IPv6.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_rt6_v3(void)
+{
+ struct ipa3_desc desc = { 0 };
+ struct ipa_mem_buffer mem;
+ struct ipahal_imm_cmd_ip_v6_routing_init v6_cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+ int i;
+ int rc = 0;
+
+ for (i = IPA_MEM_PART(v6_modem_rt_index_lo);
+ i <= IPA_MEM_PART(v6_modem_rt_index_hi);
+ i++)
+ ipa3_ctx->rt_idx_bitmap[IPA_IP_v6] |= (1 << i);
+ IPADBG("v6 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v6]);
+
+ rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v6_rt_num_index),
+ IPA_MEM_PART(v6_rt_hash_size), IPA_MEM_PART(v6_rt_nhash_size),
+ &mem);
+ if (rc) {
+ IPAERR("fail generate empty v6 rt img\n");
+ return rc;
+ }
+
+ v6_cmd.hash_rules_addr = mem.phys_base;
+ v6_cmd.hash_rules_size = mem.size;
+ v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v6_rt_hash_ofst);
+ v6_cmd.nhash_rules_addr = mem.phys_base;
+ v6_cmd.nhash_rules_size = mem.size;
+ v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v6_rt_nhash_ofst);
+ IPADBG("putting hashable routing IPv6 rules to phys 0x%x\n",
+ v6_cmd.hash_local_addr);
+ IPADBG("putting non-hashable routing IPv6 rules to phys 0x%x\n",
+ v6_cmd.nhash_local_addr);
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_V6_ROUTING_INIT, &v6_cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("fail construct ip_v6_rt_init imm cmd\n");
+ rc = -EPERM;
+ goto free_mem;
+ }
+
+ desc.opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V6_ROUTING_INIT);
+ desc.type = IPA_IMM_CMD_DESC;
+ desc.pyld = cmd_pyld->data;
+ desc.len = cmd_pyld->len;
+ IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+ if (ipa3_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ rc = -EFAULT;
+ }
+
+ ipahal_destroy_imm_cmd(cmd_pyld);
+
+free_mem:
+ ipahal_free_dma_mem(&mem);
+ return rc;
+}
+
+/**
+ * _ipa_init_flt4_v3() - Initialize IPA filtering block for IPv4.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_flt4_v3(void)
+{
+ struct ipa3_desc desc = { 0 };
+ struct ipa_mem_buffer mem;
+ struct ipahal_imm_cmd_ip_v4_filter_init v4_cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+ int rc;
+
+ rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
+ IPA_MEM_PART(v4_flt_hash_size),
+ IPA_MEM_PART(v4_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
+ &mem);
+ if (rc) {
+ IPAERR("fail generate empty v4 flt img\n");
+ return rc;
+ }
+
+ v4_cmd.hash_rules_addr = mem.phys_base;
+ v4_cmd.hash_rules_size = mem.size;
+ v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v4_flt_hash_ofst);
+ v4_cmd.nhash_rules_addr = mem.phys_base;
+ v4_cmd.nhash_rules_size = mem.size;
+ v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v4_flt_nhash_ofst);
+ IPADBG("putting hashable filtering IPv4 rules to phys 0x%x\n",
+ v4_cmd.hash_local_addr);
+ IPADBG("putting non-hashable filtering IPv4 rules to phys 0x%x\n",
+ v4_cmd.nhash_local_addr);
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_V4_FILTER_INIT, &v4_cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("fail construct ip_v4_flt_init imm cmd\n");
+ rc = -EPERM;
+ goto free_mem;
+ }
+
+ desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_FILTER_INIT);
+ desc.type = IPA_IMM_CMD_DESC;
+ desc.pyld = cmd_pyld->data;
+ desc.len = cmd_pyld->len;
+ IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+ if (ipa3_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ rc = -EFAULT;
+ }
+
+ ipahal_destroy_imm_cmd(cmd_pyld);
+
+free_mem:
+ ipahal_free_dma_mem(&mem);
+ return rc;
+}
+
+/**
+ * _ipa_init_flt6_v3() - Initialize IPA filtering block for IPv6.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_flt6_v3(void)
+{
+ struct ipa3_desc desc = { 0 };
+ struct ipa_mem_buffer mem;
+ struct ipahal_imm_cmd_ip_v6_filter_init v6_cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+ int rc;
+
+ rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
+ IPA_MEM_PART(v6_flt_hash_size),
+ IPA_MEM_PART(v6_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
+ &mem);
+ if (rc) {
+ IPAERR("fail generate empty v6 flt img\n");
+ return rc;
+ }
+
+ v6_cmd.hash_rules_addr = mem.phys_base;
+ v6_cmd.hash_rules_size = mem.size;
+ v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v6_flt_hash_ofst);
+ v6_cmd.nhash_rules_addr = mem.phys_base;
+ v6_cmd.nhash_rules_size = mem.size;
+ v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v6_flt_nhash_ofst);
+ IPADBG("putting hashable filtering IPv6 rules to phys 0x%x\n",
+ v6_cmd.hash_local_addr);
+ IPADBG("putting non-hashable filtering IPv6 rules to phys 0x%x\n",
+ v6_cmd.nhash_local_addr);
+
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_V6_FILTER_INIT, &v6_cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("fail construct ip_v6_flt_init imm cmd\n");
+ rc = -EPERM;
+ goto free_mem;
+ }
+
+ desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V6_FILTER_INIT);
+ desc.type = IPA_IMM_CMD_DESC;
+ desc.pyld = cmd_pyld->data;
+ desc.len = cmd_pyld->len;
+ IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+ if (ipa3_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ rc = -EFAULT;
+ }
+
+ ipahal_destroy_imm_cmd(cmd_pyld);
+
+free_mem:
+ ipahal_free_dma_mem(&mem);
+ return rc;
+}
+
+static int ipa3_setup_flt_hash_tuple(void)
+{
+ int pipe_idx;
+ struct ipahal_reg_hash_tuple tuple;
+
+ memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
+
+ for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes ; pipe_idx++) {
+ if (!ipa_is_ep_support_flt(pipe_idx))
+ continue;
+
+ if (ipa_is_modem_pipe(pipe_idx))
+ continue;
+
+ if (ipa3_set_flt_tuple_mask(pipe_idx, &tuple)) {
+ IPAERR("failed to setup pipe %d flt tuple\n", pipe_idx);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static int ipa3_setup_rt_hash_tuple(void)
+{
+ int tbl_idx;
+ struct ipahal_reg_hash_tuple tuple;
+
+ memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
+
+ for (tbl_idx = 0;
+ tbl_idx < max(IPA_MEM_PART(v6_rt_num_index),
+ IPA_MEM_PART(v4_rt_num_index));
+ tbl_idx++) {
+
+ if (tbl_idx >= IPA_MEM_PART(v4_modem_rt_index_lo) &&
+ tbl_idx <= IPA_MEM_PART(v4_modem_rt_index_hi))
+ continue;
+
+ if (tbl_idx >= IPA_MEM_PART(v6_modem_rt_index_lo) &&
+ tbl_idx <= IPA_MEM_PART(v6_modem_rt_index_hi))
+ continue;
+
+ if (ipa3_set_rt_tuple_mask(tbl_idx, &tuple)) {
+ IPAERR("failed to setup tbl %d rt tuple\n", tbl_idx);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static int ipa3_setup_apps_pipes(void)
+{
+ struct ipa_sys_connect_params sys_in;
+ int result = 0;
+
+ if (ipa3_ctx->gsi_ch20_wa) {
+ IPADBG("Allocating GSI physical channel 20\n");
+ result = ipa_gsi_ch20_wa();
+ if (result) {
+ IPAERR("ipa_gsi_ch20_wa failed %d\n", result);
+ goto fail_cmd;
+ }
+ }
+
+ /* CMD OUT (AP->IPA) */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_APPS_CMD_PROD;
+ sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+ sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS;
+ if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_cmd)) {
+ IPAERR(":setup sys pipe failed.\n");
+ result = -EPERM;
+ goto fail_cmd;
+ }
+ IPADBG("Apps to IPA cmd pipe is connected\n");
+
+ ipa3_ctx->ctrl->ipa_init_sram();
+ IPADBG("SRAM initialized\n");
+
+ ipa3_ctx->ctrl->ipa_init_hdr();
+ IPADBG("HDR initialized\n");
+
+ ipa3_ctx->ctrl->ipa_init_rt4();
+ IPADBG("V4 RT initialized\n");
+
+ ipa3_ctx->ctrl->ipa_init_rt6();
+ IPADBG("V6 RT initialized\n");
+
+ ipa3_ctx->ctrl->ipa_init_flt4();
+ IPADBG("V4 FLT initialized\n");
+
+ ipa3_ctx->ctrl->ipa_init_flt6();
+ IPADBG("V6 FLT initialized\n");
+
+ if (ipa3_setup_flt_hash_tuple()) {
+ IPAERR(":fail to configure flt hash tuple\n");
+ result = -EPERM;
+ goto fail_schedule_delayed_work;
+ }
+ IPADBG("flt hash tuple is configured\n");
+
+ if (ipa3_setup_rt_hash_tuple()) {
+ IPAERR(":fail to configure rt hash tuple\n");
+ result = -EPERM;
+ goto fail_schedule_delayed_work;
+ }
+ IPADBG("rt hash tuple is configured\n");
+
+ if (ipa3_setup_exception_path()) {
+ IPAERR(":fail to setup excp path\n");
+ result = -EPERM;
+ goto fail_schedule_delayed_work;
+ }
+ IPADBG("Exception path was successfully set");
+
+ if (ipa3_setup_dflt_rt_tables()) {
+ IPAERR(":fail to setup dflt routes\n");
+ result = -EPERM;
+ goto fail_schedule_delayed_work;
+ }
+ IPADBG("default routing was set\n");
+
+ /* LAN IN (IPA->A5) */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_APPS_LAN_CONS;
+ sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ sys_in.notify = ipa3_lan_rx_cb;
+ sys_in.priv = NULL;
+ sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
+ sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_ENABLE_CS_OFFLOAD_DL;
+
+ /**
+ * ipa_lan_rx_cb() intended to notify the source EP about packet
+ * being received on the LAN_CONS via calling the source EP call-back.
+ * There could be a race condition with calling this call-back. Other
+ * thread may nullify it - e.g. on EP disconnect.
+ * This lock intended to protect the access to the source EP call-back
+ */
+ spin_lock_init(&ipa3_ctx->disconnect_lock);
+ if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
+ IPAERR(":setup sys pipe failed.\n");
+ result = -EPERM;
+ goto fail_schedule_delayed_work;
+ }
+
+ /* LAN-WAN OUT (AP->IPA) */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_APPS_LAN_WAN_PROD;
+ sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
+ sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_out)) {
+ IPAERR(":setup sys pipe failed.\n");
+ result = -EPERM;
+ goto fail_data_out;
+ }
+
+ return 0;
+
+fail_data_out:
+ ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
+fail_schedule_delayed_work:
+ if (ipa3_ctx->dflt_v6_rt_rule_hdl)
+ __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
+ if (ipa3_ctx->dflt_v4_rt_rule_hdl)
+ __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
+ if (ipa3_ctx->excp_hdr_hdl)
+ __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl);
+ ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
+fail_cmd:
+ return result;
+}
+
+static void ipa3_teardown_apps_pipes(void)
+{
+ ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out);
+ ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
+ __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
+ __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
+ __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl);
+ ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
+}
+
+#ifdef CONFIG_COMPAT
+long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int retval = 0;
+ struct ipa3_ioc_nat_alloc_mem32 nat_mem32;
+ struct ipa_ioc_nat_alloc_mem nat_mem;
+
+ switch (cmd) {
+ case IPA_IOC_ADD_HDR32:
+ cmd = IPA_IOC_ADD_HDR;
+ break;
+ case IPA_IOC_DEL_HDR32:
+ cmd = IPA_IOC_DEL_HDR;
+ break;
+ case IPA_IOC_ADD_RT_RULE32:
+ cmd = IPA_IOC_ADD_RT_RULE;
+ break;
+ case IPA_IOC_DEL_RT_RULE32:
+ cmd = IPA_IOC_DEL_RT_RULE;
+ break;
+ case IPA_IOC_ADD_FLT_RULE32:
+ cmd = IPA_IOC_ADD_FLT_RULE;
+ break;
+ case IPA_IOC_DEL_FLT_RULE32:
+ cmd = IPA_IOC_DEL_FLT_RULE;
+ break;
+ case IPA_IOC_GET_RT_TBL32:
+ cmd = IPA_IOC_GET_RT_TBL;
+ break;
+ case IPA_IOC_COPY_HDR32:
+ cmd = IPA_IOC_COPY_HDR;
+ break;
+ case IPA_IOC_QUERY_INTF32:
+ cmd = IPA_IOC_QUERY_INTF;
+ break;
+ case IPA_IOC_QUERY_INTF_TX_PROPS32:
+ cmd = IPA_IOC_QUERY_INTF_TX_PROPS;
+ break;
+ case IPA_IOC_QUERY_INTF_RX_PROPS32:
+ cmd = IPA_IOC_QUERY_INTF_RX_PROPS;
+ break;
+ case IPA_IOC_QUERY_INTF_EXT_PROPS32:
+ cmd = IPA_IOC_QUERY_INTF_EXT_PROPS;
+ break;
+ case IPA_IOC_GET_HDR32:
+ cmd = IPA_IOC_GET_HDR;
+ break;
+ case IPA_IOC_ALLOC_NAT_MEM32:
+ if (copy_from_user((u8 *)&nat_mem32, (u8 *)arg,
+ sizeof(struct ipa3_ioc_nat_alloc_mem32))) {
+ retval = -EFAULT;
+ goto ret;
+ }
+ memcpy(nat_mem.dev_name, nat_mem32.dev_name,
+ IPA_RESOURCE_NAME_MAX);
+ nat_mem.size = (size_t)nat_mem32.size;
+ nat_mem.offset = (off_t)nat_mem32.offset;
+
+ /* null terminate the string */
+ nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
+
+ if (ipa3_allocate_nat_device(&nat_mem)) {
+ retval = -EFAULT;
+ goto ret;
+ }
+ nat_mem32.offset = (compat_off_t)nat_mem.offset;
+ if (copy_to_user((u8 *)arg, (u8 *)&nat_mem32,
+ sizeof(struct ipa3_ioc_nat_alloc_mem32))) {
+ retval = -EFAULT;
+ }
+ret:
+ return retval;
+ case IPA_IOC_V4_INIT_NAT32:
+ cmd = IPA_IOC_V4_INIT_NAT;
+ break;
+ case IPA_IOC_NAT_DMA32:
+ cmd = IPA_IOC_NAT_DMA;
+ break;
+ case IPA_IOC_V4_DEL_NAT32:
+ cmd = IPA_IOC_V4_DEL_NAT;
+ break;
+ case IPA_IOC_GET_NAT_OFFSET32:
+ cmd = IPA_IOC_GET_NAT_OFFSET;
+ break;
+ case IPA_IOC_PULL_MSG32:
+ cmd = IPA_IOC_PULL_MSG;
+ break;
+ case IPA_IOC_RM_ADD_DEPENDENCY32:
+ cmd = IPA_IOC_RM_ADD_DEPENDENCY;
+ break;
+ case IPA_IOC_RM_DEL_DEPENDENCY32:
+ cmd = IPA_IOC_RM_DEL_DEPENDENCY;
+ break;
+ case IPA_IOC_GENERATE_FLT_EQ32:
+ cmd = IPA_IOC_GENERATE_FLT_EQ;
+ break;
+ case IPA_IOC_QUERY_RT_TBL_INDEX32:
+ cmd = IPA_IOC_QUERY_RT_TBL_INDEX;
+ break;
+ case IPA_IOC_WRITE_QMAPID32:
+ cmd = IPA_IOC_WRITE_QMAPID;
+ break;
+ case IPA_IOC_MDFY_FLT_RULE32:
+ cmd = IPA_IOC_MDFY_FLT_RULE;
+ break;
+ case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32:
+ cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD;
+ break;
+ case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32:
+ cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL;
+ break;
+ case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32:
+ cmd = IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED;
+ break;
+ case IPA_IOC_MDFY_RT_RULE32:
+ cmd = IPA_IOC_MDFY_RT_RULE;
+ break;
+ case IPA_IOC_COMMIT_HDR:
+ case IPA_IOC_RESET_HDR:
+ case IPA_IOC_COMMIT_RT:
+ case IPA_IOC_RESET_RT:
+ case IPA_IOC_COMMIT_FLT:
+ case IPA_IOC_RESET_FLT:
+ case IPA_IOC_DUMP:
+ case IPA_IOC_PUT_RT_TBL:
+ case IPA_IOC_PUT_HDR:
+ case IPA_IOC_SET_FLT:
+ case IPA_IOC_QUERY_EP_MAPPING:
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return ipa3_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
+static ssize_t ipa3_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos);
+
+static const struct file_operations ipa3_drv_fops = {
+ .owner = THIS_MODULE,
+ .open = ipa3_open,
+ .read = ipa3_read,
+ .write = ipa3_write,
+ .unlocked_ioctl = ipa3_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_ipa3_ioctl,
+#endif
+};
+
+static int ipa3_get_clks(struct device *dev)
+{
+ ipa3_clk = clk_get(dev, "core_clk");
+ if (IS_ERR(ipa3_clk)) {
+ if (ipa3_clk != ERR_PTR(-EPROBE_DEFER))
+ IPAERR("fail to get ipa clk\n");
+ return PTR_ERR(ipa3_clk);
+ }
+ return 0;
+}
+
+/**
+ * _ipa_enable_clks_v3_0() - Enable IPA clocks.
+ */
+void _ipa_enable_clks_v3_0(void)
+{
+ IPADBG_LOW("enabling gcc_ipa_clk\n");
+ if (ipa3_clk) {
+ clk_prepare(ipa3_clk);
+ clk_enable(ipa3_clk);
+ IPADBG_LOW("curr_ipa_clk_rate=%d", ipa3_ctx->curr_ipa_clk_rate);
+ clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
+ ipa3_uc_notify_clk_state(true);
+ } else {
+ WARN_ON(1);
+ }
+
+ ipa3_suspend_apps_pipes(false);
+}
+
+static unsigned int ipa3_get_bus_vote(void)
+{
+ unsigned int idx = 1;
+
+ if (ipa3_ctx->curr_ipa_clk_rate == ipa3_ctx->ctrl->ipa_clk_rate_svs) {
+ idx = 1;
+ } else if (ipa3_ctx->curr_ipa_clk_rate ==
+ ipa3_ctx->ctrl->ipa_clk_rate_nominal) {
+ if (ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases <= 2)
+ idx = 1;
+ else
+ idx = 2;
+ } else if (ipa3_ctx->curr_ipa_clk_rate ==
+ ipa3_ctx->ctrl->ipa_clk_rate_turbo) {
+ idx = ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases - 1;
+ } else {
+ WARN_ON(1);
+ }
+
+ IPADBG("curr %d idx %d\n", ipa3_ctx->curr_ipa_clk_rate, idx);
+
+ return idx;
+}
+
+/**
+* ipa3_enable_clks() - Turn on IPA clocks
+*
+* Return codes:
+* None
+*/
+void ipa3_enable_clks(void)
+{
+ IPADBG("enabling IPA clocks and bus voting\n");
+
+ ipa3_ctx->ctrl->ipa3_enable_clks();
+
+ if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL)
+ if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
+ ipa3_get_bus_vote()))
+ WARN_ON(1);
+}
+
+
+/**
+ * _ipa_disable_clks_v3_0() - Disable IPA clocks.
+ */
+void _ipa_disable_clks_v3_0(void)
+{
+ IPADBG_LOW("disabling gcc_ipa_clk\n");
+ ipa3_suspend_apps_pipes(true);
+ ipa3_uc_notify_clk_state(false);
+ if (ipa3_clk)
+ clk_disable_unprepare(ipa3_clk);
+ else
+ WARN_ON(1);
+}
+
+/**
+* ipa3_disable_clks() - Turn off IPA clocks
+*
+* Return codes:
+* None
+*/
+void ipa3_disable_clks(void)
+{
+ IPADBG("disabling IPA clocks and bus voting\n");
+
+ ipa3_ctx->ctrl->ipa3_disable_clks();
+
+ if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL)
+ if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
+ 0))
+ WARN_ON(1);
+}
+
+/**
+ * ipa3_start_tag_process() - Send TAG packet and wait for it to come back
+ *
+ * This function is called prior to clock gating when active client counter
+ * is 1. TAG process ensures that there are no packets inside IPA HW that
+ * were not submitted to peer's BAM. During TAG process all aggregation frames
+ * are (force) closed.
+ *
+ * Return codes:
+ * None
+ */
+static void ipa3_start_tag_process(struct work_struct *work)
+{
+ int res;
+
+ IPADBG("starting TAG process\n");
+ /* close aggregation frames on all pipes */
+ res = ipa3_tag_aggr_force_close(-1);
+ if (res)
+ IPAERR("ipa3_tag_aggr_force_close failed %d\n", res);
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS");
+
+ IPADBG("TAG process done\n");
+}
+
+/**
+* ipa3_active_clients_log_mod() - Log a modification in the active clients
+* reference count
+*
+* This method logs any modification in the active clients reference count:
+* It logs the modification in the circular history buffer
+* It logs the modification in the hash table - looking for an entry,
+* creating one if needed and deleting one if needed.
+*
+* @id: ipa3_active client logging info struct to hold the log information
+* @inc: a boolean variable to indicate whether the modification is an increase
+* or decrease
+* @int_ctx: a boolean variable to indicate whether this call is being made from
+* an interrupt context and therefore should allocate GFP_ATOMIC memory
+*
+* Method process:
+* - Hash the unique identifier string
+* - Find the hash in the table
+* 1)If found, increase or decrease the reference count
+* 2)If not found, allocate a new hash table entry struct and initialize it
+* - Remove and deallocate unneeded data structure
+* - Log the call in the circular history buffer (unless it is a simple call)
+*/
+void ipa3_active_clients_log_mod(struct ipa_active_client_logging_info *id,
+ bool inc, bool int_ctx)
+{
+ char temp_str[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN];
+ unsigned long long t;
+ unsigned long nanosec_rem;
+ struct ipa3_active_client_htable_entry *hentry;
+ struct ipa3_active_client_htable_entry *hfound;
+ u32 hkey;
+ char str_to_hash[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
+
+ hfound = NULL;
+ memset(str_to_hash, 0, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
+ strlcpy(str_to_hash, id->id_string, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
+ hkey = arch_fast_hash(str_to_hash, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN,
+ 0);
+ hash_for_each_possible(ipa3_ctx->ipa3_active_clients_logging.htable,
+ hentry, list, hkey) {
+ if (!strcmp(hentry->id_string, id->id_string)) {
+ hentry->count = hentry->count + (inc ? 1 : -1);
+ hfound = hentry;
+ }
+ }
+ if (hfound == NULL) {
+ hentry = NULL;
+ hentry = kzalloc(sizeof(
+ struct ipa3_active_client_htable_entry),
+ int_ctx ? GFP_ATOMIC : GFP_KERNEL);
+ if (hentry == NULL) {
+ IPAERR("failed allocating active clients hash entry");
+ return;
+ }
+ hentry->type = id->type;
+ strlcpy(hentry->id_string, id->id_string,
+ IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
+ INIT_HLIST_NODE(&hentry->list);
+ hentry->count = inc ? 1 : -1;
+ hash_add(ipa3_ctx->ipa3_active_clients_logging.htable,
+ &hentry->list, hkey);
+ } else if (hfound->count == 0) {
+ hash_del(&hfound->list);
+ kfree(hfound);
+ }
+
+ if (id->type != SIMPLE) {
+ t = local_clock();
+ nanosec_rem = do_div(t, 1000000000) / 1000;
+ snprintf(temp_str, IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN,
+ inc ? "[%5lu.%06lu] ^ %s, %s: %d" :
+ "[%5lu.%06lu] v %s, %s: %d",
+ (unsigned long)t, nanosec_rem,
+ id->id_string, id->file, id->line);
+ ipa3_active_clients_log_insert(temp_str);
+ }
+}
+
+void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
+ bool int_ctx)
+{
+ ipa3_active_clients_log_mod(id, false, int_ctx);
+}
+
+void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id,
+ bool int_ctx)
+{
+ ipa3_active_clients_log_mod(id, true, int_ctx);
+}
+
+/**
+* ipa3_inc_client_enable_clks() - Increase active clients counter, and
+* enable ipa clocks if necessary
+*
+* Return codes:
+* None
+*/
+void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
+{
+ ipa3_active_clients_lock();
+ ipa3_active_clients_log_inc(id, false);
+ ipa3_ctx->ipa3_active_clients.cnt++;
+ if (ipa3_ctx->ipa3_active_clients.cnt == 1)
+ ipa3_enable_clks();
+ IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
+ ipa3_active_clients_unlock();
+}
+
+/**
+* ipa3_inc_client_enable_clks_no_block() - Only increment the number of active
+* clients if no asynchronous actions should be done. Asynchronous actions are
+* locking a mutex and waking up IPA HW.
+*
+* Return codes: 0 for success
+* -EPERM if an asynchronous action should have been done
+*/
+int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
+ *id)
+{
+ int res = 0;
+ unsigned long flags;
+
+ if (ipa3_active_clients_trylock(&flags) == 0)
+ return -EPERM;
+
+ if (ipa3_ctx->ipa3_active_clients.cnt == 0) {
+ res = -EPERM;
+ goto bail;
+ }
+ ipa3_active_clients_log_inc(id, true);
+ ipa3_ctx->ipa3_active_clients.cnt++;
+ IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
+bail:
+ ipa3_active_clients_trylock_unlock(&flags);
+
+ return res;
+}
+
+/**
+ * ipa3_dec_client_disable_clks() - Decrease active clients counter
+ *
+ * In case that there are no active clients this function also starts
+ * TAG process. When TAG progress ends ipa clocks will be gated.
+ * start_tag_process_again flag is set during this function to signal TAG
+ * process to start again as there was another client that may send data to ipa
+ *
+ * Return codes:
+ * None
+ */
+void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
+{
+ struct ipa_active_client_logging_info log_info;
+
+ ipa3_active_clients_lock();
+ ipa3_active_clients_log_dec(id, false);
+ ipa3_ctx->ipa3_active_clients.cnt--;
+ IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
+ if (ipa3_ctx->ipa3_active_clients.cnt == 0) {
+ if (ipa3_ctx->tag_process_before_gating) {
+ ipa3_ctx->tag_process_before_gating = false;
+ /*
+ * When TAG process ends, active clients will be
+ * decreased
+ */
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info,
+ "TAG_PROCESS");
+ ipa3_active_clients_log_inc(&log_info, false);
+ ipa3_ctx->ipa3_active_clients.cnt = 1;
+ queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_tag_work);
+ } else {
+ ipa3_disable_clks();
+ }
+ }
+ ipa3_active_clients_unlock();
+}
+
+/**
+* ipa3_inc_acquire_wakelock() - Increase active clients counter, and
+* acquire wakelock if necessary
+*
+* Return codes:
+* None
+*/
+void ipa3_inc_acquire_wakelock(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
+ ipa3_ctx->wakelock_ref_cnt.cnt++;
+ if (ipa3_ctx->wakelock_ref_cnt.cnt == 1)
+ __pm_stay_awake(&ipa3_ctx->w_lock);
+ IPADBG_LOW("active wakelock ref cnt = %d\n",
+ ipa3_ctx->wakelock_ref_cnt.cnt);
+ spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
+}
+
+/**
+ * ipa3_dec_release_wakelock() - Decrease active clients counter
+ *
+ * In case if the ref count is 0, release the wakelock.
+ *
+ * Return codes:
+ * None
+ */
+void ipa3_dec_release_wakelock(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
+ ipa3_ctx->wakelock_ref_cnt.cnt--;
+ IPADBG_LOW("active wakelock ref cnt = %d\n",
+ ipa3_ctx->wakelock_ref_cnt.cnt);
+ if (ipa3_ctx->wakelock_ref_cnt.cnt == 0)
+ __pm_relax(&ipa3_ctx->w_lock);
+ spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
+}
+
+int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+ u32 bandwidth_mbps)
+{
+ enum ipa_voltage_level needed_voltage;
+ u32 clk_rate;
+
+ IPADBG_LOW("floor_voltage=%d, bandwidth_mbps=%u",
+ floor_voltage, bandwidth_mbps);
+
+ if (floor_voltage < IPA_VOLTAGE_UNSPECIFIED ||
+ floor_voltage >= IPA_VOLTAGE_MAX) {
+ IPAERR("bad voltage\n");
+ return -EINVAL;
+ }
+
+ if (ipa3_ctx->enable_clock_scaling) {
+ IPADBG_LOW("Clock scaling is enabled\n");
+ if (bandwidth_mbps >=
+ ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo)
+ needed_voltage = IPA_VOLTAGE_TURBO;
+ else if (bandwidth_mbps >=
+ ipa3_ctx->ctrl->clock_scaling_bw_threshold_nominal)
+ needed_voltage = IPA_VOLTAGE_NOMINAL;
+ else
+ needed_voltage = IPA_VOLTAGE_SVS;
+ } else {
+ IPADBG_LOW("Clock scaling is disabled\n");
+ needed_voltage = IPA_VOLTAGE_NOMINAL;
+ }
+
+ needed_voltage = max(needed_voltage, floor_voltage);
+ switch (needed_voltage) {
+ case IPA_VOLTAGE_SVS:
+ clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs;
+ break;
+ case IPA_VOLTAGE_NOMINAL:
+ clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal;
+ break;
+ case IPA_VOLTAGE_TURBO:
+ clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
+ break;
+ default:
+ IPAERR("bad voltage\n");
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) {
+ IPADBG_LOW("Same voltage\n");
+ return 0;
+ }
+
+ ipa3_active_clients_lock();
+ ipa3_ctx->curr_ipa_clk_rate = clk_rate;
+ IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
+ if (ipa3_ctx->ipa3_active_clients.cnt > 0) {
+ clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
+ if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL)
+ if (msm_bus_scale_client_update_request(
+ ipa3_ctx->ipa_bus_hdl, ipa3_get_bus_vote()))
+ WARN_ON(1);
+ } else {
+ IPADBG_LOW("clocks are gated, not setting rate\n");
+ }
+ ipa3_active_clients_unlock();
+ IPADBG_LOW("Done\n");
+ return 0;
+}
+
+static void ipa3_sps_process_irq_schedule_rel(void)
+{
+ queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq,
+ &ipa3_sps_release_resource_work,
+ msecs_to_jiffies(IPA_TRANSPORT_PROD_TIMEOUT_MSEC));
+}
+
+/**
+* ipa3_suspend_handler() - Handles the suspend interrupt:
+* wakes up the suspended peripheral by requesting its consumer
+* @interrupt: Interrupt type
+* @private_data: The client's private data
+* @interrupt_data: Interrupt specific information data
+*/
+void ipa3_suspend_handler(enum ipa_irq_type interrupt,
+ void *private_data,
+ void *interrupt_data)
+{
+ enum ipa_rm_resource_name resource;
+ u32 suspend_data =
+ ((struct ipa_tx_suspend_irq_data *)interrupt_data)->endpoints;
+ u32 bmsk = 1;
+ u32 i = 0;
+ int res;
+ struct ipa_ep_cfg_holb holb_cfg;
+
+ IPADBG("interrupt=%d, interrupt_data=%u\n",
+ interrupt, suspend_data);
+ memset(&holb_cfg, 0, sizeof(holb_cfg));
+ holb_cfg.tmr_val = 0;
+
+ for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+ if ((suspend_data & bmsk) && (ipa3_ctx->ep[i].valid)) {
+ if (IPA_CLIENT_IS_APPS_CONS(ipa3_ctx->ep[i].client)) {
+ /*
+ * pipe will be unsuspended as part of
+ * enabling IPA clocks
+ */
+ if (!atomic_read(
+ &ipa3_ctx->transport_pm.dec_clients)
+ ) {
+ IPA_ACTIVE_CLIENTS_INC_EP(
+ ipa3_ctx->ep[i].client);
+ IPADBG_LOW("Pipes un-suspended.\n");
+ IPADBG_LOW("Enter poll mode.\n");
+ atomic_set(
+ &ipa3_ctx->transport_pm.dec_clients,
+ 1);
+ ipa3_sps_process_irq_schedule_rel();
+ }
+ } else {
+ resource = ipa3_get_rm_resource_from_ep(i);
+ res =
+ ipa_rm_request_resource_with_timer(resource);
+ if (res == -EPERM &&
+ IPA_CLIENT_IS_CONS(
+ ipa3_ctx->ep[i].client)) {
+ holb_cfg.en = 1;
+ res = ipa3_cfg_ep_holb_by_client(
+ ipa3_ctx->ep[i].client, &holb_cfg);
+ if (res) {
+ IPAERR("holb en fail, stall\n");
+ BUG();
+ }
+ }
+ }
+ }
+ bmsk = bmsk << 1;
+ }
+}
+
+/**
+* ipa3_restore_suspend_handler() - restores the original suspend IRQ handler
+* as it was registered in the IPA init sequence.
+* Return codes:
+* 0: success
+* -EPERM: failed to remove current handler or failed to add original handler
+*/
+int ipa3_restore_suspend_handler(void)
+{
+ int result = 0;
+
+ result = ipa3_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ);
+ if (result) {
+ IPAERR("remove handler for suspend interrupt failed\n");
+ return -EPERM;
+ }
+
+ result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
+ ipa3_suspend_handler, false, NULL);
+ if (result) {
+ IPAERR("register handler for suspend interrupt failed\n");
+ result = -EPERM;
+ }
+
+ IPADBG("suspend handler successfully restored\n");
+
+ return result;
+}
+
+static int ipa3_apps_cons_release_resource(void)
+{
+ return 0;
+}
+
+static int ipa3_apps_cons_request_resource(void)
+{
+ return 0;
+}
+
+static void ipa3_sps_release_resource(struct work_struct *work)
+{
+ /* check whether still need to decrease client usage */
+ if (atomic_read(&ipa3_ctx->transport_pm.dec_clients)) {
+ if (atomic_read(&ipa3_ctx->transport_pm.eot_activity)) {
+ IPADBG("EOT pending Re-scheduling\n");
+ ipa3_sps_process_irq_schedule_rel();
+ } else {
+ atomic_set(&ipa3_ctx->transport_pm.dec_clients, 0);
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("SPS_RESOURCE");
+ }
+ }
+ atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
+}
+
+int ipa3_create_apps_resource(void)
+{
+ struct ipa_rm_create_params apps_cons_create_params;
+ struct ipa_rm_perf_profile profile;
+ int result = 0;
+
+ memset(&apps_cons_create_params, 0,
+ sizeof(apps_cons_create_params));
+ apps_cons_create_params.name = IPA_RM_RESOURCE_APPS_CONS;
+ apps_cons_create_params.request_resource =
+ ipa3_apps_cons_request_resource;
+ apps_cons_create_params.release_resource =
+ ipa3_apps_cons_release_resource;
+ result = ipa_rm_create_resource(&apps_cons_create_params);
+ if (result) {
+ IPAERR("ipa_rm_create_resource failed\n");
+ return result;
+ }
+
+ profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
+ ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile);
+
+ return result;
+}
+
+/**
+ * ipa3_init_interrupts() - Register to IPA IRQs
+ *
+ * Return codes: 0 in success, negative in failure
+ *
+ */
+int ipa3_init_interrupts(void)
+{
+ int result;
+
+ /*register IPA IRQ handler*/
+ result = ipa3_interrupts_init(ipa3_res.ipa_irq, 0,
+ master_dev);
+ if (result) {
+ IPAERR("ipa interrupts initialization failed\n");
+ return -ENODEV;
+ }
+
+ /*add handler for suspend interrupt*/
+ result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
+ ipa3_suspend_handler, false, NULL);
+ if (result) {
+ IPAERR("register handler for suspend interrupt failed\n");
+ result = -ENODEV;
+ goto fail_add_interrupt_handler;
+ }
+
+ return 0;
+
+fail_add_interrupt_handler:
+ free_irq(ipa3_res.ipa_irq, master_dev);
+ return result;
+}
+
+/**
+ * ipa3_destroy_flt_tbl_idrs() - destroy the idr structure for flt tables
+ * The idr strcuture per filtering table is intended for rule id generation
+ * per filtering rule.
+ */
+static void ipa3_destroy_flt_tbl_idrs(void)
+{
+ int i;
+ struct ipa3_flt_tbl *flt_tbl;
+
+ for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+ if (!ipa_is_ep_support_flt(i))
+ continue;
+
+ flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
+ idr_destroy(&flt_tbl->rule_ids);
+ flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
+ idr_destroy(&flt_tbl->rule_ids);
+ }
+}
+
+static void ipa3_freeze_clock_vote_and_notify_modem(void)
+{
+ int res;
+ u32 ipa_clk_state;
+ struct ipa_active_client_logging_info log_info;
+
+ if (ipa3_ctx->smp2p_info.res_sent)
+ return;
+
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "FREEZE_VOTE");
+ res = ipa3_inc_client_enable_clks_no_block(&log_info);
+ if (res)
+ ipa_clk_state = 0;
+ else
+ ipa_clk_state = 1;
+
+ if (ipa3_ctx->smp2p_info.out_base_id) {
+ gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
+ IPA_GPIO_OUT_CLK_VOTE_IDX, ipa_clk_state);
+ gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
+ IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 1);
+ ipa3_ctx->smp2p_info.res_sent = true;
+ } else {
+ IPAERR("smp2p out gpio not assigned\n");
+ }
+
+ IPADBG("IPA clocks are %s\n", ipa_clk_state ? "ON" : "OFF");
+}
+
+static int ipa3_panic_notifier(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ int res;
+
+ ipa3_freeze_clock_vote_and_notify_modem();
+
+ IPADBG("Calling uC panic handler\n");
+ res = ipa3_uc_panic_notifier(this, event, ptr);
+ if (res)
+ IPAERR("uC panic handler failed %d\n", res);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ipa3_panic_blk = {
+ .notifier_call = ipa3_panic_notifier,
+ /* IPA panic handler needs to run before modem shuts down */
+ .priority = INT_MAX,
+};
+
+static void ipa3_register_panic_hdlr(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &ipa3_panic_blk);
+}
+
+static void ipa3_trigger_ipa_ready_cbs(void)
+{
+ struct ipa3_ready_cb_info *info;
+
+ mutex_lock(&ipa3_ctx->lock);
+
+ /* Call all the CBs */
+ list_for_each_entry(info, &ipa3_ctx->ipa_ready_cb_list, link)
+ if (info->ready_cb)
+ info->ready_cb(info->user_data);
+
+ mutex_unlock(&ipa3_ctx->lock);
+}
+
+static int ipa3_gsi_pre_fw_load_init(void)
+{
+ int result;
+
+ result = gsi_configure_regs(ipa3_res.transport_mem_base,
+ ipa3_res.transport_mem_size,
+ ipa3_res.ipa_mem_base);
+ if (result) {
+ IPAERR("Failed to configure GSI registers\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ipa3_post_init() - Initialize the IPA Driver (Part II).
+ * This part contains all initialization which requires interaction with
+ * IPA HW (via SPS BAM or GSI).
+ *
+ * @resource_p: contain platform specific values from DST file
+ * @pdev: The platform device structure representing the IPA driver
+ *
+ * Function initialization process:
+ * - Register BAM/SPS or GSI
+ * - Setup APPS pipes
+ * - Initialize tethering bridge
+ * - Initialize IPA debugfs
+ * - Initialize IPA uC interface
+ * - Initialize WDI interface
+ * - Initialize USB interface
+ * - Register for panic handler
+ * - Trigger IPA ready callbacks (to all subscribers)
+ * - Trigger IPA completion object (to all who wait on it)
+ */
+static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
+ struct device *ipa_dev)
+{
+ int result;
+ struct sps_bam_props bam_props = { 0 };
+ struct gsi_per_props gsi_props;
+
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+ memset(&gsi_props, 0, sizeof(gsi_props));
+ gsi_props.ee = resource_p->ee;
+ gsi_props.intr = GSI_INTR_IRQ;
+ gsi_props.irq = resource_p->transport_irq;
+ gsi_props.phys_addr = resource_p->transport_mem_base;
+ gsi_props.size = resource_p->transport_mem_size;
+ gsi_props.notify_cb = ipa_gsi_notify_cb;
+ gsi_props.req_clk_cb = NULL;
+ gsi_props.rel_clk_cb = NULL;
+
+ result = gsi_register_device(&gsi_props,
+ &ipa3_ctx->gsi_dev_hdl);
+ if (result != GSI_STATUS_SUCCESS) {
+ IPAERR(":gsi register error - %d\n", result);
+ result = -ENODEV;
+ goto fail_register_device;
+ }
+ IPADBG("IPA gsi is registered\n");
+ } else {
+ /* register IPA with SPS driver */
+ bam_props.phys_addr = resource_p->transport_mem_base;
+ bam_props.virt_size = resource_p->transport_mem_size;
+ bam_props.irq = resource_p->transport_irq;
+ bam_props.num_pipes = ipa3_ctx->ipa_num_pipes;
+ bam_props.summing_threshold = IPA_SUMMING_THRESHOLD;
+ bam_props.event_threshold = IPA_EVENT_THRESHOLD;
+ bam_props.options |= SPS_BAM_NO_LOCAL_CLK_GATING;
+ if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL)
+ bam_props.options |= SPS_BAM_OPT_IRQ_WAKEUP;
+ if (ipa3_ctx->ipa_bam_remote_mode == true)
+ bam_props.manage |= SPS_BAM_MGR_DEVICE_REMOTE;
+ if (!ipa3_ctx->smmu_s1_bypass)
+ bam_props.options |= SPS_BAM_SMMU_EN;
+ bam_props.ee = resource_p->ee;
+ bam_props.ipc_loglevel = 3;
+
+ result = sps_register_bam_device(&bam_props,
+ &ipa3_ctx->bam_handle);
+ if (result) {
+ IPAERR(":bam register error - %d\n", result);
+ result = -EPROBE_DEFER;
+ goto fail_register_device;
+ }
+ IPADBG("IPA BAM is registered\n");
+ }
+
+ /* setup the AP-IPA pipes */
+ if (ipa3_setup_apps_pipes()) {
+ IPAERR(":failed to setup IPA-Apps pipes\n");
+ result = -ENODEV;
+ goto fail_setup_apps_pipes;
+ }
+ IPADBG("IPA System2Bam pipes were connected\n");
+
+ if (ipa3_ctx->use_ipa_teth_bridge) {
+ /* Initialize the tethering bridge driver */
+ result = ipa3_teth_bridge_driver_init();
+ if (result) {
+ IPAERR(":teth_bridge init failed (%d)\n", -result);
+ result = -ENODEV;
+ goto fail_teth_bridge_driver_init;
+ }
+ IPADBG("teth_bridge initialized");
+ }
+
+ ipa3_debugfs_init();
+
+ result = ipa3_uc_interface_init();
+ if (result)
+ IPAERR(":ipa Uc interface init failed (%d)\n", -result);
+ else
+ IPADBG(":ipa Uc interface init ok\n");
+
+ result = ipa3_wdi_init();
+ if (result)
+ IPAERR(":wdi init failed (%d)\n", -result);
+ else
+ IPADBG(":wdi init ok\n");
+
+ result = ipa3_ntn_init();
+ if (result)
+ IPAERR(":ntn init failed (%d)\n", -result);
+ else
+ IPADBG(":ntn init ok\n");
+
+ ipa3_register_panic_hdlr();
+
+ ipa3_ctx->q6_proxy_clk_vote_valid = true;
+
+ mutex_lock(&ipa3_ctx->lock);
+ ipa3_ctx->ipa_initialization_complete = true;
+ mutex_unlock(&ipa3_ctx->lock);
+
+ ipa3_trigger_ipa_ready_cbs();
+ complete_all(&ipa3_ctx->init_completion_obj);
+ pr_info("IPA driver initialization was successful.\n");
+
+ return 0;
+
+fail_teth_bridge_driver_init:
+ ipa3_teardown_apps_pipes();
+fail_setup_apps_pipes:
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
+ gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
+ else
+ sps_deregister_bam_device(ipa3_ctx->bam_handle);
+fail_register_device:
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
+ ipa_rm_exit();
+ cdev_del(&ipa3_ctx->cdev);
+ device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
+ unregister_chrdev_region(ipa3_ctx->dev_num, 1);
+ if (ipa3_ctx->pipe_mem_pool)
+ gen_pool_destroy(ipa3_ctx->pipe_mem_pool);
+ ipa3_destroy_flt_tbl_idrs();
+ idr_destroy(&ipa3_ctx->ipa_idr);
+ kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
+ kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
+ kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
+ kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
+ kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
+ kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
+ kmem_cache_destroy(ipa3_ctx->hdr_cache);
+ kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
+ kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
+ destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
+ destroy_workqueue(ipa3_ctx->power_mgmt_wq);
+ iounmap(ipa3_ctx->mmio);
+ ipa3_disable_clks();
+ msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
+ if (ipa3_bus_scale_table) {
+ msm_bus_cl_clear_pdata(ipa3_bus_scale_table);
+ ipa3_bus_scale_table = NULL;
+ }
+ kfree(ipa3_ctx->ctrl);
+ kfree(ipa3_ctx);
+ ipa3_ctx = NULL;
+ return result;
+}
+
+static int ipa3_trigger_fw_loading_mdms(void)
+{
+ int result;
+ const struct firmware *fw;
+
+ IPADBG("FW loading process initiated\n");
+
+ result = request_firmware(&fw, IPA_FWS_PATH, ipa3_ctx->dev);
+ if (result < 0) {
+ IPAERR("request_firmware failed, error %d\n", result);
+ return result;
+ }
+ if (fw == NULL) {
+ IPAERR("Firmware is NULL!\n");
+ return -EINVAL;
+ }
+
+ IPADBG("FWs are available for loading\n");
+
+ result = ipa3_load_fws(fw);
+ if (result) {
+ IPAERR("IPA FWs loading has failed\n");
+ release_firmware(fw);
+ return result;
+ }
+
+ result = gsi_enable_fw(ipa3_res.transport_mem_base,
+ ipa3_res.transport_mem_size);
+ if (result) {
+ IPAERR("Failed to enable GSI FW\n");
+ release_firmware(fw);
+ return result;
+ }
+
+ release_firmware(fw);
+
+ IPADBG("FW loading process is complete\n");
+ return 0;
+}
+
+static int ipa3_trigger_fw_loading_msms(void)
+{
+ void *subsystem_get_retval = NULL;
+
+ IPADBG("FW loading process initiated\n");
+
+ subsystem_get_retval = subsystem_get(IPA_SUBSYSTEM_NAME);
+ if (IS_ERR_OR_NULL(subsystem_get_retval)) {
+ IPAERR("Unable to trigger PIL process for FW loading\n");
+ return -EINVAL;
+ }
+
+ IPADBG("FW loading process is complete\n");
+ return 0;
+}
+
+static ssize_t ipa3_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ int result = -EINVAL;
+
+ char dbg_buff[16] = { 0 };
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, buf, count);
+
+ if (missing) {
+ IPAERR("Unable to copy data from user\n");
+ return -EFAULT;
+ }
+
+ /* Prevent consequent calls from trying to load the FW again. */
+ if (ipa3_is_ready())
+ return count;
+
+ /*
+ * We will trigger the process only if we're in GSI mode, otherwise,
+ * we just ignore the write.
+ */
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ if (ipa3_is_msm_device())
+ result = ipa3_trigger_fw_loading_msms();
+ else
+ result = ipa3_trigger_fw_loading_mdms();
+ /* No IPAv3.x chipsets that don't support FW loading */
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ if (result) {
+ IPAERR("FW loading process has failed\n");
+ BUG();
+ } else
+ ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
+ }
+ return count;
+}
+
+/**
+* ipa3_pre_init() - Initialize the IPA Driver.
+* This part contains all initialization which doesn't require IPA HW, such
+* as structure allocations and initializations, register writes, etc.
+*
+* @resource_p: contain platform specific values from DST file
+* @pdev: The platform device structure representing the IPA driver
+*
+* Function initialization process:
+* - Allocate memory for the driver context data struct
+* - Initializing the ipa3_ctx with:
+* 1)parsed values from the dts file
+* 2)parameters passed to the module initialization
+* 3)read HW values(such as core memory size)
+* - Map IPA core registers to CPU memory
+* - Restart IPA core(HW reset)
+* - Set configuration for IPA BAM via BAM_CNFG_BITS
+* - Initialize the look-aside caches(kmem_cache/slab) for filter,
+* routing and IPA-tree
+* - Create memory pool with 4 objects for DMA operations(each object
+* is 512Bytes long), this object will be use for tx(A5->IPA)
+* - Initialize lists head(routing,filter,hdr,system pipes)
+* - Initialize mutexes (for ipa_ctx and NAT memory mutexes)
+* - Initialize spinlocks (for list related to A5<->IPA pipes)
+* - Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
+* - Initialize Red-Black-Tree(s) for handles of header,routing rule,
+* routing table ,filtering rule
+* - Initialize the filter block by committing IPV4 and IPV6 default rules
+* - Create empty routing table in system memory(no committing)
+* - Initialize pipes memory pool with ipa3_pipe_mem_init for supported platforms
+* - Create a char-device for IPA
+* - Initialize IPA RM (resource manager)
+* - Configure GSI registers (in GSI case)
+*/
+static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
+ struct device *ipa_dev)
+{
+ int result = 0;
+ int i;
+ struct ipa3_flt_tbl *flt_tbl;
+ struct ipa3_rt_tbl_set *rset;
+ struct ipa_active_client_logging_info log_info;
+
+ IPADBG("IPA Driver initialization started\n");
+
+ ipa3_ctx = kzalloc(sizeof(*ipa3_ctx), GFP_KERNEL);
+ if (!ipa3_ctx) {
+ IPAERR(":kzalloc err.\n");
+ result = -ENOMEM;
+ goto fail_mem_ctx;
+ }
+
+ ipa3_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0);
+ if (ipa3_ctx->logbuf == NULL) {
+ IPAERR("failed to get logbuf\n");
+ result = -ENOMEM;
+ goto fail_logbuf;
+ }
+
+ ipa3_ctx->pdev = ipa_dev;
+ ipa3_ctx->uc_pdev = ipa_dev;
+ ipa3_ctx->smmu_present = smmu_info.present;
+ if (!ipa3_ctx->smmu_present)
+ ipa3_ctx->smmu_s1_bypass = true;
+ else
+ ipa3_ctx->smmu_s1_bypass = smmu_info.s1_bypass;
+ ipa3_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
+ ipa3_ctx->ipa_wrapper_size = resource_p->ipa_mem_size;
+ ipa3_ctx->ipa_hw_type = resource_p->ipa_hw_type;
+ ipa3_ctx->ipa3_hw_mode = resource_p->ipa3_hw_mode;
+ ipa3_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
+ ipa3_ctx->ipa_bam_remote_mode = resource_p->ipa_bam_remote_mode;
+ ipa3_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt;
+ ipa3_ctx->ipa_wdi2 = resource_p->ipa_wdi2;
+ ipa3_ctx->use_64_bit_dma_mask = resource_p->use_64_bit_dma_mask;
+ ipa3_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size;
+ ipa3_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size;
+ ipa3_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset;
+ ipa3_ctx->tethered_flow_control = resource_p->tethered_flow_control;
+ ipa3_ctx->transport_prototype = resource_p->transport_prototype;
+ ipa3_ctx->ee = resource_p->ee;
+ ipa3_ctx->apply_rg10_wa = resource_p->apply_rg10_wa;
+ ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
+ ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
+
+ /* default aggregation parameters */
+ ipa3_ctx->aggregation_type = IPA_MBIM_16;
+ ipa3_ctx->aggregation_byte_limit = 1;
+ ipa3_ctx->aggregation_time_limit = 0;
+
+ ipa3_ctx->ctrl = kzalloc(sizeof(*ipa3_ctx->ctrl), GFP_KERNEL);
+ if (!ipa3_ctx->ctrl) {
+ IPAERR("memory allocation error for ctrl\n");
+ result = -ENOMEM;
+ goto fail_mem_ctrl;
+ }
+ result = ipa3_controller_static_bind(ipa3_ctx->ctrl,
+ ipa3_ctx->ipa_hw_type);
+ if (result) {
+ IPAERR("fail to static bind IPA ctrl.\n");
+ result = -EFAULT;
+ goto fail_bind;
+ }
+
+ result = ipa3_init_mem_partition(master_dev->of_node);
+ if (result) {
+ IPAERR(":ipa3_init_mem_partition failed!\n");
+ result = -ENODEV;
+ goto fail_init_mem_partition;
+ }
+
+ if (ipa3_bus_scale_table) {
+ IPADBG("Use bus scaling info from device tree\n");
+ ipa3_ctx->ctrl->msm_bus_data_ptr = ipa3_bus_scale_table;
+ }
+
+ if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL) {
+ /* get BUS handle */
+ ipa3_ctx->ipa_bus_hdl =
+ msm_bus_scale_register_client(
+ ipa3_ctx->ctrl->msm_bus_data_ptr);
+ if (!ipa3_ctx->ipa_bus_hdl) {
+ IPAERR("fail to register with bus mgr!\n");
+ result = -ENODEV;
+ goto fail_bus_reg;
+ }
+ } else {
+ IPADBG("Skipping bus scaling registration on Virtual plat\n");
+ }
+
+ /* get IPA clocks */
+ result = ipa3_get_clks(master_dev);
+ if (result)
+ goto fail_clk;
+
+ /* init active_clients_log after getting ipa-clk */
+ if (ipa3_active_clients_log_init())
+ goto fail_init_active_client;
+
+ /* Enable ipa3_ctx->enable_clock_scaling */
+ ipa3_ctx->enable_clock_scaling = 1;
+ ipa3_ctx->curr_ipa_clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
+
+ /* enable IPA clocks explicitly to allow the initialization */
+ ipa3_enable_clks();
+
+ /* setup IPA register access */
+ IPADBG("Mapping 0x%x\n", resource_p->ipa_mem_base +
+ ipa3_ctx->ctrl->ipa_reg_base_ofst);
+ ipa3_ctx->mmio = ioremap(resource_p->ipa_mem_base +
+ ipa3_ctx->ctrl->ipa_reg_base_ofst,
+ resource_p->ipa_mem_size);
+ if (!ipa3_ctx->mmio) {
+ IPAERR(":ipa-base ioremap err.\n");
+ result = -EFAULT;
+ goto fail_remap;
+ }
+
+ if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
+ ipa3_ctx->pdev)) {
+ IPAERR("fail to init ipahal\n");
+ result = -EFAULT;
+ goto fail_ipahal;
+ }
+
+ result = ipa3_init_hw();
+ if (result) {
+ IPAERR(":error initializing HW.\n");
+ result = -ENODEV;
+ goto fail_init_hw;
+ }
+ IPADBG("IPA HW initialization sequence completed");
+
+ ipa3_ctx->ipa_num_pipes = ipa3_get_num_pipes();
+ if (ipa3_ctx->ipa_num_pipes > IPA3_MAX_NUM_PIPES) {
+ IPAERR("IPA has more pipes then supported! has %d, max %d\n",
+ ipa3_ctx->ipa_num_pipes, IPA3_MAX_NUM_PIPES);
+ result = -ENODEV;
+ goto fail_init_hw;
+ }
+
+ ipa_init_ep_flt_bitmap();
+ IPADBG("EP with flt support bitmap 0x%x (%u pipes)\n",
+ ipa3_ctx->ep_flt_bitmap, ipa3_ctx->ep_flt_num);
+
+ ipa3_ctx->ctrl->ipa_sram_read_settings();
+ IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n",
+ ipa3_ctx->smem_sz, ipa3_ctx->smem_restricted_bytes);
+
+ IPADBG("hdr_lcl=%u ip4_rt_hash=%u ip4_rt_nonhash=%u\n",
+ ipa3_ctx->hdr_tbl_lcl, ipa3_ctx->ip4_rt_tbl_hash_lcl,
+ ipa3_ctx->ip4_rt_tbl_nhash_lcl);
+
+ IPADBG("ip6_rt_hash=%u ip6_rt_nonhash=%u\n",
+ ipa3_ctx->ip6_rt_tbl_hash_lcl, ipa3_ctx->ip6_rt_tbl_nhash_lcl);
+
+ IPADBG("ip4_flt_hash=%u ip4_flt_nonhash=%u\n",
+ ipa3_ctx->ip4_flt_tbl_hash_lcl,
+ ipa3_ctx->ip4_flt_tbl_nhash_lcl);
+
+ IPADBG("ip6_flt_hash=%u ip6_flt_nonhash=%u\n",
+ ipa3_ctx->ip6_flt_tbl_hash_lcl,
+ ipa3_ctx->ip6_flt_tbl_nhash_lcl);
+
+ if (ipa3_ctx->smem_reqd_sz > ipa3_ctx->smem_sz) {
+ IPAERR("SW expect more core memory, needed %d, avail %d\n",
+ ipa3_ctx->smem_reqd_sz, ipa3_ctx->smem_sz);
+ result = -ENOMEM;
+ goto fail_init_hw;
+ }
+
+ mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
+ spin_lock_init(&ipa3_ctx->ipa3_active_clients.spinlock);
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
+ ipa3_active_clients_log_inc(&log_info, false);
+ ipa3_ctx->ipa3_active_clients.cnt = 1;
+
+ /* Assign resource limitation to each group */
+ ipa3_set_resorce_groups_min_max_limits();
+
+ /* Create workqueues for power management */
+ ipa3_ctx->power_mgmt_wq =
+ create_singlethread_workqueue("ipa_power_mgmt");
+ if (!ipa3_ctx->power_mgmt_wq) {
+ IPAERR("failed to create power mgmt wq\n");
+ result = -ENOMEM;
+ goto fail_init_hw;
+ }
+
+ ipa3_ctx->transport_power_mgmt_wq =
+ create_singlethread_workqueue("transport_power_mgmt");
+ if (!ipa3_ctx->transport_power_mgmt_wq) {
+ IPAERR("failed to create transport power mgmt wq\n");
+ result = -ENOMEM;
+ goto fail_create_transport_wq;
+ }
+
+ spin_lock_init(&ipa3_ctx->transport_pm.lock);
+ ipa3_ctx->transport_pm.res_granted = false;
+ ipa3_ctx->transport_pm.res_rel_in_prog = false;
+
+ /* init the lookaside cache */
+ ipa3_ctx->flt_rule_cache = kmem_cache_create("IPA_FLT",
+ sizeof(struct ipa3_flt_entry), 0, 0, NULL);
+ if (!ipa3_ctx->flt_rule_cache) {
+ IPAERR(":ipa flt cache create failed\n");
+ result = -ENOMEM;
+ goto fail_flt_rule_cache;
+ }
+ ipa3_ctx->rt_rule_cache = kmem_cache_create("IPA_RT",
+ sizeof(struct ipa3_rt_entry), 0, 0, NULL);
+ if (!ipa3_ctx->rt_rule_cache) {
+ IPAERR(":ipa rt cache create failed\n");
+ result = -ENOMEM;
+ goto fail_rt_rule_cache;
+ }
+ ipa3_ctx->hdr_cache = kmem_cache_create("IPA_HDR",
+ sizeof(struct ipa3_hdr_entry), 0, 0, NULL);
+ if (!ipa3_ctx->hdr_cache) {
+ IPAERR(":ipa hdr cache create failed\n");
+ result = -ENOMEM;
+ goto fail_hdr_cache;
+ }
+ ipa3_ctx->hdr_offset_cache =
+ kmem_cache_create("IPA_HDR_OFFSET",
+ sizeof(struct ipa_hdr_offset_entry), 0, 0, NULL);
+ if (!ipa3_ctx->hdr_offset_cache) {
+ IPAERR(":ipa hdr off cache create failed\n");
+ result = -ENOMEM;
+ goto fail_hdr_offset_cache;
+ }
+ ipa3_ctx->hdr_proc_ctx_cache = kmem_cache_create("IPA_HDR_PROC_CTX",
+ sizeof(struct ipa3_hdr_proc_ctx_entry), 0, 0, NULL);
+ if (!ipa3_ctx->hdr_proc_ctx_cache) {
+ IPAERR(":ipa hdr proc ctx cache create failed\n");
+ result = -ENOMEM;
+ goto fail_hdr_proc_ctx_cache;
+ }
+ ipa3_ctx->hdr_proc_ctx_offset_cache =
+ kmem_cache_create("IPA_HDR_PROC_CTX_OFFSET",
+ sizeof(struct ipa3_hdr_proc_ctx_offset_entry), 0, 0, NULL);
+ if (!ipa3_ctx->hdr_proc_ctx_offset_cache) {
+ IPAERR(":ipa hdr proc ctx off cache create failed\n");
+ result = -ENOMEM;
+ goto fail_hdr_proc_ctx_offset_cache;
+ }
+ ipa3_ctx->rt_tbl_cache = kmem_cache_create("IPA_RT_TBL",
+ sizeof(struct ipa3_rt_tbl), 0, 0, NULL);
+ if (!ipa3_ctx->rt_tbl_cache) {
+ IPAERR(":ipa rt tbl cache create failed\n");
+ result = -ENOMEM;
+ goto fail_rt_tbl_cache;
+ }
+ ipa3_ctx->tx_pkt_wrapper_cache =
+ kmem_cache_create("IPA_TX_PKT_WRAPPER",
+ sizeof(struct ipa3_tx_pkt_wrapper), 0, 0, NULL);
+ if (!ipa3_ctx->tx_pkt_wrapper_cache) {
+ IPAERR(":ipa tx pkt wrapper cache create failed\n");
+ result = -ENOMEM;
+ goto fail_tx_pkt_wrapper_cache;
+ }
+ ipa3_ctx->rx_pkt_wrapper_cache =
+ kmem_cache_create("IPA_RX_PKT_WRAPPER",
+ sizeof(struct ipa3_rx_pkt_wrapper), 0, 0, NULL);
+ if (!ipa3_ctx->rx_pkt_wrapper_cache) {
+ IPAERR(":ipa rx pkt wrapper cache create failed\n");
+ result = -ENOMEM;
+ goto fail_rx_pkt_wrapper_cache;
+ }
+
+ /* Setup DMA pool */
+ ipa3_ctx->dma_pool = dma_pool_create("ipa_tx", ipa3_ctx->pdev,
+ IPA_NUM_DESC_PER_SW_TX * sizeof(struct sps_iovec),
+ 0, 0);
+ if (!ipa3_ctx->dma_pool) {
+ IPAERR("cannot alloc DMA pool.\n");
+ result = -ENOMEM;
+ goto fail_dma_pool;
+ }
+
+ /* init the various list heads */
+ INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_hdr_entry_list);
+ for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+ INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_offset_list[i]);
+ INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_free_offset_list[i]);
+ }
+ INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list);
+ for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
+ INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i]);
+ INIT_LIST_HEAD(&ipa3_ctx->
+ hdr_proc_ctx_tbl.head_free_offset_list[i]);
+ }
+ INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
+ INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
+ for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+ if (!ipa_is_ep_support_flt(i))
+ continue;
+
+ flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
+ INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+ flt_tbl->in_sys[IPA_RULE_HASHABLE] =
+ !ipa3_ctx->ip4_flt_tbl_hash_lcl;
+ flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
+ !ipa3_ctx->ip4_flt_tbl_nhash_lcl;
+ idr_init(&flt_tbl->rule_ids);
+
+ flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
+ INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+ flt_tbl->in_sys[IPA_RULE_HASHABLE] =
+ !ipa3_ctx->ip6_flt_tbl_hash_lcl;
+ flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
+ !ipa3_ctx->ip6_flt_tbl_nhash_lcl;
+ idr_init(&flt_tbl->rule_ids);
+ }
+
+ rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
+ INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+ rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
+ INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+
+ INIT_LIST_HEAD(&ipa3_ctx->intf_list);
+ INIT_LIST_HEAD(&ipa3_ctx->msg_list);
+ INIT_LIST_HEAD(&ipa3_ctx->pull_msg_list);
+ init_waitqueue_head(&ipa3_ctx->msg_waitq);
+ mutex_init(&ipa3_ctx->msg_lock);
+
+ mutex_init(&ipa3_ctx->lock);
+ mutex_init(&ipa3_ctx->nat_mem.lock);
+
+ idr_init(&ipa3_ctx->ipa_idr);
+ spin_lock_init(&ipa3_ctx->idr_lock);
+
+ /* wlan related member */
+ memset(&ipa3_ctx->wc_memb, 0, sizeof(ipa3_ctx->wc_memb));
+ spin_lock_init(&ipa3_ctx->wc_memb.wlan_spinlock);
+ spin_lock_init(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
+ INIT_LIST_HEAD(&ipa3_ctx->wc_memb.wlan_comm_desc_list);
+
+ /* setup the IPA pipe mem pool */
+ if (resource_p->ipa_pipe_mem_size)
+ ipa3_pipe_mem_init(resource_p->ipa_pipe_mem_start_ofst,
+ resource_p->ipa_pipe_mem_size);
+
+ ipa3_ctx->class = class_create(THIS_MODULE, DRV_NAME);
+
+ result = alloc_chrdev_region(&ipa3_ctx->dev_num, 0, 1, DRV_NAME);
+ if (result) {
+ IPAERR("alloc_chrdev_region err.\n");
+ result = -ENODEV;
+ goto fail_alloc_chrdev_region;
+ }
+
+ ipa3_ctx->dev = device_create(ipa3_ctx->class, NULL, ipa3_ctx->dev_num,
+ ipa3_ctx, DRV_NAME);
+ if (IS_ERR(ipa3_ctx->dev)) {
+ IPAERR(":device_create err.\n");
+ result = -ENODEV;
+ goto fail_device_create;
+ }
+
+ cdev_init(&ipa3_ctx->cdev, &ipa3_drv_fops);
+ ipa3_ctx->cdev.owner = THIS_MODULE;
+ ipa3_ctx->cdev.ops = &ipa3_drv_fops; /* from LDD3 */
+
+ result = cdev_add(&ipa3_ctx->cdev, ipa3_ctx->dev_num, 1);
+ if (result) {
+ IPAERR(":cdev_add err=%d\n", -result);
+ result = -ENODEV;
+ goto fail_cdev_add;
+ }
+ IPADBG("ipa cdev added successful. major:%d minor:%d\n",
+ MAJOR(ipa3_ctx->dev_num),
+ MINOR(ipa3_ctx->dev_num));
+
+ if (ipa3_create_nat_device()) {
+ IPAERR("unable to create nat device\n");
+ result = -ENODEV;
+ goto fail_nat_dev_add;
+ }
+
+ /* Create a wakeup source. */
+ wakeup_source_init(&ipa3_ctx->w_lock, "IPA_WS");
+ spin_lock_init(&ipa3_ctx->wakelock_ref_cnt.spinlock);
+
+ /* Initialize IPA RM (resource manager) */
+ result = ipa_rm_initialize();
+ if (result) {
+ IPAERR("RM initialization failed (%d)\n", -result);
+ result = -ENODEV;
+ goto fail_ipa_rm_init;
+ }
+ IPADBG("IPA resource manager initialized");
+
+ result = ipa3_create_apps_resource();
+ if (result) {
+ IPAERR("Failed to create APPS_CONS resource\n");
+ result = -ENODEV;
+ goto fail_create_apps_resource;
+ }
+
+ if (!ipa3_ctx->apply_rg10_wa) {
+ result = ipa3_init_interrupts();
+ if (result) {
+ IPAERR("ipa initialization of interrupts failed\n");
+ result = -ENODEV;
+ goto fail_ipa_init_interrupts;
+ }
+ } else {
+ IPADBG("Initialization of ipa interrupts skipped\n");
+ }
+
+ INIT_LIST_HEAD(&ipa3_ctx->ipa_ready_cb_list);
+
+ init_completion(&ipa3_ctx->init_completion_obj);
+
+ /*
+ * For GSI, we can't register the GSI driver yet, as it expects
+ * the GSI FW to be up and running before the registration.
+ */
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+ /*
+ * For IPA3.0, the GSI configuration is done by the GSI driver.
+ * For IPA3.1 (and on), the GSI configuration is done by TZ.
+ */
+ if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0) {
+ result = ipa3_gsi_pre_fw_load_init();
+ if (result) {
+ IPAERR("gsi pre FW loading config failed\n");
+ result = -ENODEV;
+ goto fail_ipa_init_interrupts;
+ }
+ }
+ }
+ /* For BAM (No other mode), we can just carry on with initialization */
+ else
+ return ipa3_post_init(resource_p, ipa_dev);
+
+ return 0;
+
+fail_ipa_init_interrupts:
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
+fail_create_apps_resource:
+ ipa_rm_exit();
+fail_ipa_rm_init:
+fail_nat_dev_add:
+ cdev_del(&ipa3_ctx->cdev);
+fail_cdev_add:
+ device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
+fail_device_create:
+ unregister_chrdev_region(ipa3_ctx->dev_num, 1);
+fail_alloc_chrdev_region:
+ if (ipa3_ctx->pipe_mem_pool)
+ gen_pool_destroy(ipa3_ctx->pipe_mem_pool);
+ ipa3_destroy_flt_tbl_idrs();
+ idr_destroy(&ipa3_ctx->ipa_idr);
+fail_dma_pool:
+ kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
+fail_rx_pkt_wrapper_cache:
+ kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
+fail_tx_pkt_wrapper_cache:
+ kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
+fail_rt_tbl_cache:
+ kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
+fail_hdr_proc_ctx_offset_cache:
+ kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
+fail_hdr_proc_ctx_cache:
+ kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
+fail_hdr_offset_cache:
+ kmem_cache_destroy(ipa3_ctx->hdr_cache);
+fail_hdr_cache:
+ kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
+fail_rt_rule_cache:
+ kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
+fail_flt_rule_cache:
+ destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
+fail_create_transport_wq:
+ destroy_workqueue(ipa3_ctx->power_mgmt_wq);
+fail_init_hw:
+ ipahal_destroy();
+fail_ipahal:
+ iounmap(ipa3_ctx->mmio);
+fail_remap:
+ ipa3_disable_clks();
+ ipa3_active_clients_log_destroy();
+fail_init_active_client:
+fail_clk:
+ msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
+fail_bus_reg:
+fail_init_mem_partition:
+fail_bind:
+ kfree(ipa3_ctx->ctrl);
+fail_mem_ctrl:
+ ipc_log_context_destroy(ipa3_ctx->logbuf);
+fail_logbuf:
+ kfree(ipa3_ctx);
+ ipa3_ctx = NULL;
+fail_mem_ctx:
+ return result;
+}
+
+static int get_ipa_dts_configuration(struct platform_device *pdev,
+ struct ipa3_plat_drv_res *ipa_drv_res)
+{
+ int result;
+ struct resource *resource;
+
+ /* initialize ipa3_res */
+ ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
+ ipa_drv_res->ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
+ ipa_drv_res->ipa_hw_type = 0;
+ ipa_drv_res->ipa3_hw_mode = 0;
+ ipa_drv_res->ipa_bam_remote_mode = false;
+ ipa_drv_res->modem_cfg_emb_pipe_flt = false;
+ ipa_drv_res->ipa_wdi2 = false;
+ ipa_drv_res->use_64_bit_dma_mask = false;
+ ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
+ ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
+ ipa_drv_res->apply_rg10_wa = false;
+ ipa_drv_res->gsi_ch20_wa = false;
+
+ smmu_info.disable_htw = of_property_read_bool(pdev->dev.of_node,
+ "qcom,smmu-disable-htw");
+
+ /* Get IPA HW Version */
+ result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
+ &ipa_drv_res->ipa_hw_type);
+ if ((result) || (ipa_drv_res->ipa_hw_type == 0)) {
+ IPAERR(":get resource failed for ipa-hw-ver!\n");
+ return -ENODEV;
+ }
+ IPADBG(": ipa_hw_type = %d", ipa_drv_res->ipa_hw_type);
+
+ if (ipa_drv_res->ipa_hw_type < IPA_HW_v3_0) {
+ IPAERR(":IPA version below 3.0 not supported!\n");
+ return -ENODEV;
+ }
+
+ /* Get IPA HW mode */
+ result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-mode",
+ &ipa_drv_res->ipa3_hw_mode);
+ if (result)
+ IPADBG("using default (IPA_MODE_NORMAL) for ipa-hw-mode\n");
+ else
+ IPADBG(": found ipa_drv_res->ipa3_hw_mode = %d",
+ ipa_drv_res->ipa3_hw_mode);
+
+ /* Get IPA WAN / LAN RX pool size */
+ result = of_property_read_u32(pdev->dev.of_node,
+ "qcom,wan-rx-ring-size",
+ &ipa_drv_res->wan_rx_ring_size);
+ if (result)
+ IPADBG("using default for wan-rx-ring-size = %u\n",
+ ipa_drv_res->wan_rx_ring_size);
+ else
+ IPADBG(": found ipa_drv_res->wan-rx-ring-size = %u",
+ ipa_drv_res->wan_rx_ring_size);
+
+ result = of_property_read_u32(pdev->dev.of_node,
+ "qcom,lan-rx-ring-size",
+ &ipa_drv_res->lan_rx_ring_size);
+ if (result)
+ IPADBG("using default for lan-rx-ring-size = %u\n",
+ ipa_drv_res->lan_rx_ring_size);
+ else
+ IPADBG(": found ipa_drv_res->lan-rx-ring-size = %u",
+ ipa_drv_res->lan_rx_ring_size);
+
+ ipa_drv_res->use_ipa_teth_bridge =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,use-ipa-tethering-bridge");
+ IPADBG(": using TBDr = %s",
+ ipa_drv_res->use_ipa_teth_bridge
+ ? "True" : "False");
+
+ ipa_drv_res->ipa_bam_remote_mode =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,ipa-bam-remote-mode");
+ IPADBG(": ipa bam remote mode = %s\n",
+ ipa_drv_res->ipa_bam_remote_mode
+ ? "True" : "False");
+
+ ipa_drv_res->modem_cfg_emb_pipe_flt =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,modem-cfg-emb-pipe-flt");
+ IPADBG(": modem configure embedded pipe filtering = %s\n",
+ ipa_drv_res->modem_cfg_emb_pipe_flt
+ ? "True" : "False");
+
+ ipa_drv_res->ipa_wdi2 =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,ipa-wdi2");
+ IPADBG(": WDI-2.0 = %s\n",
+ ipa_drv_res->ipa_wdi2
+ ? "True" : "False");
+
+ ipa_drv_res->use_64_bit_dma_mask =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,use-64-bit-dma-mask");
+ IPADBG(": use_64_bit_dma_mask = %s\n",
+ ipa_drv_res->use_64_bit_dma_mask
+ ? "True" : "False");
+
+ ipa_drv_res->skip_uc_pipe_reset =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,skip-uc-pipe-reset");
+ IPADBG(": skip uC pipe reset = %s\n",
+ ipa_drv_res->skip_uc_pipe_reset
+ ? "True" : "False");
+
+ ipa_drv_res->tethered_flow_control =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,tethered-flow-control");
+ IPADBG(": Use apps based flow control = %s\n",
+ ipa_drv_res->tethered_flow_control
+ ? "True" : "False");
+
+ if (of_property_read_bool(pdev->dev.of_node,
+ "qcom,use-gsi"))
+ ipa_drv_res->transport_prototype = IPA_TRANSPORT_TYPE_GSI;
+ else
+ ipa_drv_res->transport_prototype = IPA_TRANSPORT_TYPE_SPS;
+
+ IPADBG(": transport type = %s\n",
+ ipa_drv_res->transport_prototype == IPA_TRANSPORT_TYPE_SPS
+ ? "SPS" : "GSI");
+
+ /* Get IPA wrapper address */
+ resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ipa-base");
+ if (!resource) {
+ IPAERR(":get resource failed for ipa-base!\n");
+ return -ENODEV;
+ }
+ ipa_drv_res->ipa_mem_base = resource->start;
+ ipa_drv_res->ipa_mem_size = resource_size(resource);
+ IPADBG(": ipa-base = 0x%x, size = 0x%x\n",
+ ipa_drv_res->ipa_mem_base,
+ ipa_drv_res->ipa_mem_size);
+
+ smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
+ smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
+
+ if (ipa_drv_res->transport_prototype == IPA_TRANSPORT_TYPE_SPS) {
+ /* Get IPA BAM address */
+ resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "bam-base");
+ if (!resource) {
+ IPAERR(":get resource failed for bam-base!\n");
+ return -ENODEV;
+ }
+ ipa_drv_res->transport_mem_base = resource->start;
+ ipa_drv_res->transport_mem_size = resource_size(resource);
+ IPADBG(": bam-base = 0x%x, size = 0x%x\n",
+ ipa_drv_res->transport_mem_base,
+ ipa_drv_res->transport_mem_size);
+
+ /* Get IPA BAM IRQ number */
+ resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "bam-irq");
+ if (!resource) {
+ IPAERR(":get resource failed for bam-irq!\n");
+ return -ENODEV;
+ }
+ ipa_drv_res->transport_irq = resource->start;
+ IPADBG(": bam-irq = %d\n", ipa_drv_res->transport_irq);
+ } else {
+ /* Get IPA GSI address */
+ resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "gsi-base");
+ if (!resource) {
+ IPAERR(":get resource failed for gsi-base!\n");
+ return -ENODEV;
+ }
+ ipa_drv_res->transport_mem_base = resource->start;
+ ipa_drv_res->transport_mem_size = resource_size(resource);
+ IPADBG(": gsi-base = 0x%x, size = 0x%x\n",
+ ipa_drv_res->transport_mem_base,
+ ipa_drv_res->transport_mem_size);
+
+ /* Get IPA GSI IRQ number */
+ resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "gsi-irq");
+ if (!resource) {
+ IPAERR(":get resource failed for gsi-irq!\n");
+ return -ENODEV;
+ }
+ ipa_drv_res->transport_irq = resource->start;
+ IPADBG(": gsi-irq = %d\n", ipa_drv_res->transport_irq);
+ }
+
+ /* Get IPA pipe mem start ofst */
+ resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ipa-pipe-mem");
+ if (!resource) {
+ IPADBG(":not using pipe memory - resource nonexisting\n");
+ } else {
+ ipa_drv_res->ipa_pipe_mem_start_ofst = resource->start;
+ ipa_drv_res->ipa_pipe_mem_size = resource_size(resource);
+ IPADBG(":using pipe memory - at 0x%x of size 0x%x\n",
+ ipa_drv_res->ipa_pipe_mem_start_ofst,
+ ipa_drv_res->ipa_pipe_mem_size);
+ }
+
+ /* Get IPA IRQ number */
+ resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "ipa-irq");
+ if (!resource) {
+ IPAERR(":get resource failed for ipa-irq!\n");
+ return -ENODEV;
+ }
+ ipa_drv_res->ipa_irq = resource->start;
+ IPADBG(":ipa-irq = %d\n", ipa_drv_res->ipa_irq);
+
+ result = of_property_read_u32(pdev->dev.of_node, "qcom,ee",
+ &ipa_drv_res->ee);
+ if (result)
+ ipa_drv_res->ee = 0;
+
+ ipa_drv_res->apply_rg10_wa =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,use-rg10-limitation-mitigation");
+ IPADBG(": Use Register Group 10 limitation mitigation = %s\n",
+ ipa_drv_res->apply_rg10_wa
+ ? "True" : "False");
+
+ ipa_drv_res->gsi_ch20_wa =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,do-not-use-ch-gsi-20");
+ IPADBG(": GSI CH 20 WA is = %s\n",
+ ipa_drv_res->apply_rg10_wa
+ ? "Needed" : "Not needed");
+
+ return 0;
+}
+
+static int ipa_smmu_wlan_cb_probe(struct device *dev)
+{
+ struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx();
+ int disable_htw = 1;
+ int atomic_ctx = 1;
+ int fast = 1;
+ int bypass = 1;
+ int ret;
+ u32 add_map_size;
+ const u32 *add_map;
+ int i;
+
+ IPADBG("sub pdev=%p\n", dev);
+
+ cb->dev = dev;
+ cb->iommu = iommu_domain_alloc(msm_iommu_get_bus(dev));
+ if (!cb->iommu) {
+ IPAERR("could not alloc iommu domain\n");
+ /* assume this failure is because iommu driver is not ready */
+ return -EPROBE_DEFER;
+ }
+ cb->valid = true;
+
+ if (smmu_info.disable_htw) {
+ ret = iommu_domain_set_attr(cb->iommu,
+ DOMAIN_ATTR_COHERENT_HTW_DISABLE,
+ &disable_htw);
+ if (ret) {
+ IPAERR("couldn't disable coherent HTW\n");
+ cb->valid = false;
+ return -EIO;
+ }
+ }
+
+ if (smmu_info.s1_bypass) {
+ if (iommu_domain_set_attr(cb->iommu,
+ DOMAIN_ATTR_S1_BYPASS,
+ &bypass)) {
+ IPAERR("couldn't set bypass\n");
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU S1 BYPASS\n");
+ } else {
+ if (iommu_domain_set_attr(cb->iommu,
+ DOMAIN_ATTR_ATOMIC,
+ &atomic_ctx)) {
+ IPAERR("couldn't disable coherent HTW\n");
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU ATTR ATOMIC\n");
+
+ if (smmu_info.fast_map) {
+ if (iommu_domain_set_attr(cb->iommu,
+ DOMAIN_ATTR_FAST,
+ &fast)) {
+ IPAERR("couldn't set fast map\n");
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU fast map set\n");
+ }
+ }
+
+ ret = iommu_attach_device(cb->iommu, dev);
+ if (ret) {
+ IPAERR("could not attach device ret=%d\n", ret);
+ cb->valid = false;
+ return ret;
+ }
+ /* MAP ipa-uc ram */
+ add_map = of_get_property(dev->of_node,
+ "qcom,additional-mapping", &add_map_size);
+ if (add_map) {
+ /* mapping size is an array of 3-tuple of u32 */
+ if (add_map_size % (3 * sizeof(u32))) {
+ IPAERR("wrong additional mapping format\n");
+ cb->valid = false;
+ return -EFAULT;
+ }
+
+ /* iterate of each entry of the additional mapping array */
+ for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
+ u32 iova = be32_to_cpu(add_map[i]);
+ u32 pa = be32_to_cpu(add_map[i + 1]);
+ u32 size = be32_to_cpu(add_map[i + 2]);
+ unsigned long iova_p;
+ phys_addr_t pa_p;
+ u32 size_p;
+
+ IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
+ iova_p, pa_p, size_p);
+ IPADBG("mapping 0x%lx to 0x%pa size %d\n",
+ iova_p, &pa_p, size_p);
+ ipa3_iommu_map(cb->iommu,
+ iova_p, pa_p, size_p,
+ IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+ }
+ }
+ return 0;
+}
+
+static int ipa_smmu_uc_cb_probe(struct device *dev)
+{
+ struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
+ int disable_htw = 1;
+ int atomic_ctx = 1;
+ int bypass = 1;
+ int fast = 1;
+ int ret;
+ u32 iova_ap_mapping[2];
+
+ IPADBG("UC CB PROBE sub pdev=%p\n", dev);
+
+ ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
+ iova_ap_mapping, 2);
+ if (ret) {
+ IPAERR("Fail to read UC start/size iova addresses\n");
+ return ret;
+ }
+ cb->va_start = iova_ap_mapping[0];
+ cb->va_size = iova_ap_mapping[1];
+ cb->va_end = cb->va_start + cb->va_size;
+ IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
+
+ if (smmu_info.use_64_bit_dma_mask) {
+ if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
+ IPAERR("DMA set 64bit mask failed\n");
+ return -EOPNOTSUPP;
+ }
+ } else {
+ if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
+ IPAERR("DMA set 32bit mask failed\n");
+ return -EOPNOTSUPP;
+ }
+ }
+ IPADBG("UC CB PROBE=%p create IOMMU mapping\n", dev);
+
+ cb->dev = dev;
+ cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
+ cb->va_start, cb->va_size);
+ if (IS_ERR_OR_NULL(cb->mapping)) {
+ IPADBG("Fail to create mapping\n");
+ /* assume this failure is because iommu driver is not ready */
+ return -EPROBE_DEFER;
+ }
+ IPADBG("SMMU mapping created\n");
+ cb->valid = true;
+
+ IPADBG("UC CB PROBE sub pdev=%p disable htw\n", dev);
+ if (smmu_info.disable_htw) {
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_COHERENT_HTW_DISABLE,
+ &disable_htw)) {
+ IPAERR("couldn't disable coherent HTW\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
+ }
+ }
+
+ IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
+ if (smmu_info.s1_bypass) {
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_S1_BYPASS,
+ &bypass)) {
+ IPAERR("couldn't set bypass\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU S1 BYPASS\n");
+ } else {
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_ATOMIC,
+ &atomic_ctx)) {
+ IPAERR("couldn't set domain as atomic\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU atomic set\n");
+
+ if (smmu_info.fast_map) {
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_FAST,
+ &fast)) {
+ IPAERR("couldn't set fast map\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU fast map set\n");
+ }
+ }
+
+ IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev);
+ ret = arm_iommu_attach_device(cb->dev, cb->mapping);
+ if (ret) {
+ IPAERR("could not attach device ret=%d\n", ret);
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return ret;
+ }
+
+ cb->next_addr = cb->va_end;
+ ipa3_ctx->uc_pdev = dev;
+
+ return 0;
+}
+
+static int ipa_smmu_ap_cb_probe(struct device *dev)
+{
+ struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
+ int result;
+ int disable_htw = 1;
+ int atomic_ctx = 1;
+ int fast = 1;
+ int bypass = 1;
+ u32 iova_ap_mapping[2];
+ u32 add_map_size;
+ const u32 *add_map;
+ void *smem_addr;
+ int i;
+
+ IPADBG("AP CB probe: sub pdev=%p\n", dev);
+
+ result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
+ iova_ap_mapping, 2);
+ if (result) {
+ IPAERR("Fail to read AP start/size iova addresses\n");
+ return result;
+ }
+ cb->va_start = iova_ap_mapping[0];
+ cb->va_size = iova_ap_mapping[1];
+ cb->va_end = cb->va_start + cb->va_size;
+ IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
+
+ if (smmu_info.use_64_bit_dma_mask) {
+ if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
+ IPAERR("DMA set 64bit mask failed\n");
+ return -EOPNOTSUPP;
+ }
+ } else {
+ if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
+ IPAERR("DMA set 32bit mask failed\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ cb->dev = dev;
+ cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
+ cb->va_start, cb->va_size);
+ if (IS_ERR_OR_NULL(cb->mapping)) {
+ IPADBG("Fail to create mapping\n");
+ /* assume this failure is because iommu driver is not ready */
+ return -EPROBE_DEFER;
+ }
+ IPADBG("SMMU mapping created\n");
+ cb->valid = true;
+
+ if (smmu_info.disable_htw) {
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_COHERENT_HTW_DISABLE,
+ &disable_htw)) {
+ IPAERR("couldn't disable coherent HTW\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU disable HTW\n");
+ }
+ if (smmu_info.s1_bypass) {
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_S1_BYPASS,
+ &bypass)) {
+ IPAERR("couldn't set bypass\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU S1 BYPASS\n");
+ } else {
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_ATOMIC,
+ &atomic_ctx)) {
+ IPAERR("couldn't set domain as atomic\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU atomic set\n");
+
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_FAST,
+ &fast)) {
+ IPAERR("couldn't set fast map\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU fast map set\n");
+ }
+
+ result = arm_iommu_attach_device(cb->dev, cb->mapping);
+ if (result) {
+ IPAERR("couldn't attach to IOMMU ret=%d\n", result);
+ cb->valid = false;
+ return result;
+ }
+
+ add_map = of_get_property(dev->of_node,
+ "qcom,additional-mapping", &add_map_size);
+ if (add_map) {
+ /* mapping size is an array of 3-tuple of u32 */
+ if (add_map_size % (3 * sizeof(u32))) {
+ IPAERR("wrong additional mapping format\n");
+ cb->valid = false;
+ return -EFAULT;
+ }
+
+ /* iterate of each entry of the additional mapping array */
+ for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
+ u32 iova = be32_to_cpu(add_map[i]);
+ u32 pa = be32_to_cpu(add_map[i + 1]);
+ u32 size = be32_to_cpu(add_map[i + 2]);
+ unsigned long iova_p;
+ phys_addr_t pa_p;
+ u32 size_p;
+
+ IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
+ iova_p, pa_p, size_p);
+ IPADBG("mapping 0x%lx to 0x%pa size %d\n",
+ iova_p, &pa_p, size_p);
+ ipa3_iommu_map(cb->mapping->domain,
+ iova_p, pa_p, size_p,
+ IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+ }
+ }
+
+ /* map SMEM memory for IPA table accesses */
+ smem_addr = smem_alloc(SMEM_IPA_FILTER_TABLE, IPA_SMEM_SIZE,
+ SMEM_MODEM, 0);
+ if (smem_addr) {
+ phys_addr_t iova = smem_virt_to_phys(smem_addr);
+ phys_addr_t pa = iova;
+ unsigned long iova_p;
+ phys_addr_t pa_p;
+ u32 size_p;
+
+ IPA_SMMU_ROUND_TO_PAGE(iova, pa, IPA_SMEM_SIZE,
+ iova_p, pa_p, size_p);
+ IPADBG("mapping 0x%lx to 0x%pa size %d\n",
+ iova_p, &pa_p, size_p);
+ ipa3_iommu_map(cb->mapping->domain,
+ iova_p, pa_p, size_p,
+ IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+ }
+
+
+ smmu_info.present = true;
+
+ if (!ipa3_bus_scale_table)
+ ipa3_bus_scale_table = msm_bus_cl_get_pdata(ipa3_pdev);
+
+ /* Proceed to real initialization */
+ result = ipa3_pre_init(&ipa3_res, dev);
+ if (result) {
+ IPAERR("ipa_init failed\n");
+ arm_iommu_detach_device(cb->dev);
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return result;
+ }
+
+ return result;
+}
+
+static irqreturn_t ipa3_smp2p_modem_clk_query_isr(int irq, void *ctxt)
+{
+ ipa3_freeze_clock_vote_and_notify_modem();
+
+ return IRQ_HANDLED;
+}
+
+static int ipa3_smp2p_probe(struct device *dev)
+{
+ struct device_node *node = dev->of_node;
+ int res;
+
+ IPADBG("node->name=%s\n", node->name);
+ if (strcmp("qcom,smp2pgpio_map_ipa_1_out", node->name) == 0) {
+ res = of_get_gpio(node, 0);
+ if (res < 0) {
+ IPADBG("of_get_gpio returned %d\n", res);
+ return res;
+ }
+
+ ipa3_ctx->smp2p_info.out_base_id = res;
+ IPADBG("smp2p out_base_id=%d\n",
+ ipa3_ctx->smp2p_info.out_base_id);
+ } else if (strcmp("qcom,smp2pgpio_map_ipa_1_in", node->name) == 0) {
+ int irq;
+
+ res = of_get_gpio(node, 0);
+ if (res < 0) {
+ IPADBG("of_get_gpio returned %d\n", res);
+ return res;
+ }
+
+ ipa3_ctx->smp2p_info.in_base_id = res;
+ IPADBG("smp2p in_base_id=%d\n",
+ ipa3_ctx->smp2p_info.in_base_id);
+
+ /* register for modem clk query */
+ irq = gpio_to_irq(ipa3_ctx->smp2p_info.in_base_id +
+ IPA_GPIO_IN_QUERY_CLK_IDX);
+ if (irq < 0) {
+ IPAERR("gpio_to_irq failed %d\n", irq);
+ return -ENODEV;
+ }
+ IPADBG("smp2p irq#=%d\n", irq);
+ res = request_irq(irq,
+ (irq_handler_t)ipa3_smp2p_modem_clk_query_isr,
+ IRQF_TRIGGER_RISING, "ipa_smp2p_clk_vote", dev);
+ if (res) {
+ IPAERR("fail to register smp2p irq=%d\n", irq);
+ return -ENODEV;
+ }
+ res = enable_irq_wake(ipa3_ctx->smp2p_info.in_base_id +
+ IPA_GPIO_IN_QUERY_CLK_IDX);
+ if (res)
+ IPAERR("failed to enable irq wake\n");
+ }
+
+ return 0;
+}
+
+int ipa3_plat_drv_probe(struct platform_device *pdev_p,
+ struct ipa_api_controller *api_ctrl,
+ const struct of_device_id *pdrv_match)
+{
+ int result;
+ struct device *dev = &pdev_p->dev;
+
+ IPADBG("IPA driver probing started\n");
+ IPADBG("dev->of_node->name = %s\n", dev->of_node->name);
+
+ if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb"))
+ return ipa_smmu_ap_cb_probe(dev);
+
+ if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-wlan-cb"))
+ return ipa_smmu_wlan_cb_probe(dev);
+
+ if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-uc-cb"))
+ return ipa_smmu_uc_cb_probe(dev);
+
+ if (of_device_is_compatible(dev->of_node,
+ "qcom,smp2pgpio-map-ipa-1-in"))
+ return ipa3_smp2p_probe(dev);
+
+ if (of_device_is_compatible(dev->of_node,
+ "qcom,smp2pgpio-map-ipa-1-out"))
+ return ipa3_smp2p_probe(dev);
+
+ master_dev = dev;
+ if (!ipa3_pdev)
+ ipa3_pdev = pdev_p;
+
+ result = get_ipa_dts_configuration(pdev_p, &ipa3_res);
+ if (result) {
+ IPAERR("IPA dts parsing failed\n");
+ return result;
+ }
+
+ result = ipa3_bind_api_controller(ipa3_res.ipa_hw_type, api_ctrl);
+ if (result) {
+ IPAERR("IPA API binding failed\n");
+ return result;
+ }
+
+ result = of_platform_populate(pdev_p->dev.of_node,
+ pdrv_match, NULL, &pdev_p->dev);
+ if (result) {
+ IPAERR("failed to populate platform\n");
+ return result;
+ }
+
+ if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
+ if (of_property_read_bool(pdev_p->dev.of_node,
+ "qcom,smmu-s1-bypass"))
+ smmu_info.s1_bypass = true;
+ if (of_property_read_bool(pdev_p->dev.of_node,
+ "qcom,smmu-fast-map"))
+ smmu_info.fast_map = true;
+ if (of_property_read_bool(pdev_p->dev.of_node,
+ "qcom,use-64-bit-dma-mask"))
+ smmu_info.use_64_bit_dma_mask = true;
+ smmu_info.arm_smmu = true;
+ pr_info("IPA smmu_info.s1_bypass=%d smmu_info.fast_map=%d\n",
+ smmu_info.s1_bypass, smmu_info.fast_map);
+ } else if (of_property_read_bool(pdev_p->dev.of_node,
+ "qcom,msm-smmu")) {
+ IPAERR("Legacy IOMMU not supported\n");
+ result = -EOPNOTSUPP;
+ } else {
+ if (of_property_read_bool(pdev_p->dev.of_node,
+ "qcom,use-64-bit-dma-mask")) {
+ if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(64)) ||
+ dma_set_coherent_mask(&pdev_p->dev,
+ DMA_BIT_MASK(64))) {
+ IPAERR("DMA set 64bit mask failed\n");
+ return -EOPNOTSUPP;
+ }
+ } else {
+ if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(32)) ||
+ dma_set_coherent_mask(&pdev_p->dev,
+ DMA_BIT_MASK(32))) {
+ IPAERR("DMA set 32bit mask failed\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!ipa3_bus_scale_table)
+ ipa3_bus_scale_table = msm_bus_cl_get_pdata(pdev_p);
+ /* Proceed to real initialization */
+ result = ipa3_pre_init(&ipa3_res, dev);
+ if (result) {
+ IPAERR("ipa3_init failed\n");
+ return result;
+ }
+ }
+
+ return result;
+}
+
+/**
+ * ipa3_ap_suspend() - suspend callback for runtime_pm
+ * @dev: pointer to device
+ *
+ * This callback will be invoked by the runtime_pm framework when an AP suspend
+ * operation is invoked, usually by pressing a suspend button.
+ *
+ * Returns -EAGAIN to runtime_pm framework in case IPA is in use by AP.
+ * This will postpone the suspend operation until IPA is no longer used by AP.
+*/
+int ipa3_ap_suspend(struct device *dev)
+{
+ int i;
+
+ IPADBG("Enter...\n");
+
+ /* In case there is a tx/rx handler in polling mode fail to suspend */
+ for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+ if (ipa3_ctx->ep[i].sys &&
+ atomic_read(&ipa3_ctx->ep[i].sys->curr_polling_state)) {
+ IPAERR("EP %d is in polling state, do not suspend\n",
+ i);
+ return -EAGAIN;
+ }
+ }
+
+ /* release SPS IPA resource without waiting for inactivity timer */
+ atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
+ ipa3_sps_release_resource(NULL);
+ IPADBG("Exit\n");
+
+ return 0;
+}
+
+/**
+* ipa3_ap_resume() - resume callback for runtime_pm
+* @dev: pointer to device
+*
+* This callback will be invoked by the runtime_pm framework when an AP resume
+* operation is invoked.
+*
+* Always returns 0 since resume should always succeed.
+*/
+int ipa3_ap_resume(struct device *dev)
+{
+ return 0;
+}
+
+struct ipa3_context *ipa3_get_ctx(void)
+{
+ return ipa3_ctx;
+}
+
+static void ipa_gsi_request_resource(struct work_struct *work)
+{
+ unsigned long flags;
+ int ret;
+
+ /* request IPA clocks */
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ /* mark transport resource as granted */
+ spin_lock_irqsave(&ipa3_ctx->transport_pm.lock, flags);
+ ipa3_ctx->transport_pm.res_granted = true;
+
+ IPADBG("IPA is ON, calling gsi driver\n");
+ ret = gsi_complete_clk_grant(ipa3_ctx->gsi_dev_hdl);
+ if (ret != GSI_STATUS_SUCCESS)
+ IPAERR("gsi_complete_clk_grant failed %d\n", ret);
+
+ spin_unlock_irqrestore(&ipa3_ctx->transport_pm.lock, flags);
+}
+
+void ipa_gsi_req_res_cb(void *user_data, bool *granted)
+{
+ unsigned long flags;
+ struct ipa_active_client_logging_info log_info;
+
+ spin_lock_irqsave(&ipa3_ctx->transport_pm.lock, flags);
+
+ /* make sure no release will happen */
+ cancel_delayed_work(&ipa_gsi_release_resource_work);
+ ipa3_ctx->transport_pm.res_rel_in_prog = false;
+
+ if (ipa3_ctx->transport_pm.res_granted) {
+ *granted = true;
+ } else {
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "GSI_RESOURCE");
+ if (ipa3_inc_client_enable_clks_no_block(&log_info) == 0) {
+ ipa3_ctx->transport_pm.res_granted = true;
+ *granted = true;
+ } else {
+ queue_work(ipa3_ctx->transport_power_mgmt_wq,
+ &ipa_gsi_request_resource_work);
+ *granted = false;
+ }
+ }
+ spin_unlock_irqrestore(&ipa3_ctx->transport_pm.lock, flags);
+}
+
+static void ipa_gsi_release_resource(struct work_struct *work)
+{
+ unsigned long flags;
+ bool dec_clients = false;
+
+ spin_lock_irqsave(&ipa3_ctx->transport_pm.lock, flags);
+ /* check whether still need to decrease client usage */
+ if (ipa3_ctx->transport_pm.res_rel_in_prog) {
+ dec_clients = true;
+ ipa3_ctx->transport_pm.res_rel_in_prog = false;
+ ipa3_ctx->transport_pm.res_granted = false;
+ }
+ spin_unlock_irqrestore(&ipa3_ctx->transport_pm.lock, flags);
+ if (dec_clients)
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("GSI_RESOURCE");
+}
+
+int ipa_gsi_rel_res_cb(void *user_data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipa3_ctx->transport_pm.lock, flags);
+
+ ipa3_ctx->transport_pm.res_rel_in_prog = true;
+ queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq,
+ &ipa_gsi_release_resource_work,
+ msecs_to_jiffies(IPA_TRANSPORT_PROD_TIMEOUT_MSEC));
+
+ spin_unlock_irqrestore(&ipa3_ctx->transport_pm.lock, flags);
+ return 0;
+}
+
+static void ipa_gsi_notify_cb(struct gsi_per_notify *notify)
+{
+ switch (notify->evt_id) {
+ case GSI_PER_EVT_GLOB_ERROR:
+ IPAERR("Got GSI_PER_EVT_GLOB_ERROR\n");
+ IPAERR("Err_desc = 0x%04x\n", notify->data.err_desc);
+ break;
+ case GSI_PER_EVT_GLOB_GP1:
+ IPAERR("Got GSI_PER_EVT_GLOB_GP1\n");
+ BUG();
+ break;
+ case GSI_PER_EVT_GLOB_GP2:
+ IPAERR("Got GSI_PER_EVT_GLOB_GP2\n");
+ BUG();
+ break;
+ case GSI_PER_EVT_GLOB_GP3:
+ IPAERR("Got GSI_PER_EVT_GLOB_GP3\n");
+ BUG();
+ break;
+ case GSI_PER_EVT_GENERAL_BREAK_POINT:
+ IPAERR("Got GSI_PER_EVT_GENERAL_BREAK_POINT\n");
+ break;
+ case GSI_PER_EVT_GENERAL_BUS_ERROR:
+ IPAERR("Got GSI_PER_EVT_GENERAL_BUS_ERROR\n");
+ BUG();
+ break;
+ case GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW:
+ IPAERR("Got GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW\n");
+ BUG();
+ break;
+ case GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW:
+ IPAERR("Got GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW\n");
+ BUG();
+ break;
+ default:
+ IPAERR("Received unexpected evt: %d\n",
+ notify->evt_id);
+ BUG();
+ }
+}
+
+int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data)
+{
+ struct ipa3_ready_cb_info *cb_info = NULL;
+
+ /* check ipa3_ctx existed or not */
+ if (!ipa3_ctx) {
+ IPADBG("IPA driver haven't initialized\n");
+ return -ENXIO;
+ }
+ mutex_lock(&ipa3_ctx->lock);
+ if (ipa3_ctx->ipa_initialization_complete) {
+ mutex_unlock(&ipa3_ctx->lock);
+ IPADBG("IPA driver finished initialization already\n");
+ return -EEXIST;
+ }
+
+ cb_info = kmalloc(sizeof(struct ipa3_ready_cb_info), GFP_KERNEL);
+ if (!cb_info) {
+ mutex_unlock(&ipa3_ctx->lock);
+ return -ENOMEM;
+ }
+
+ cb_info->ready_cb = ipa_ready_cb;
+ cb_info->user_data = user_data;
+
+ list_add_tail(&cb_info->link, &ipa3_ctx->ipa_ready_cb_list);
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return 0;
+}
+
+int ipa3_iommu_map(struct iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr, size_t size, int prot)
+{
+ struct ipa_smmu_cb_ctx *ap_cb = ipa3_get_smmu_ctx();
+ struct ipa_smmu_cb_ctx *uc_cb = ipa3_get_uc_smmu_ctx();
+
+ IPADBG("domain =0x%p iova 0x%lx\n", domain, iova);
+ IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);
+
+ /* make sure no overlapping */
+ if (domain == ipa3_get_smmu_domain()) {
+ if (iova >= ap_cb->va_start && iova < ap_cb->va_end) {
+ IPAERR("iommu AP overlap addr 0x%lx\n", iova);
+ ipa_assert();
+ return -EFAULT;
+ }
+ } else if (domain == ipa3_get_wlan_smmu_domain()) {
+ /* wlan is one time map */
+ } else if (domain == ipa3_get_uc_smmu_domain()) {
+ if (iova >= uc_cb->va_start && iova < uc_cb->va_end) {
+ IPAERR("iommu uC overlap addr 0x%lx\n", iova);
+ ipa_assert();
+ return -EFAULT;
+ }
+ } else {
+ IPAERR("Unexpected domain 0x%p\n", domain);
+ ipa_assert();
+ return -EFAULT;
+ }
+
+ return iommu_map(domain, iova, paddr, size, prot);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA HW device driver");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
new file mode 100644
index 0000000..f583a36
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -0,0 +1,1988 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <asm/barrier.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include "ipa_i.h"
+#include "linux/msm_gsi.h"
+
+/*
+ * These values were determined empirically and shows good E2E bi-
+ * directional throughputs
+ */
+#define IPA_HOLB_TMR_EN 0x1
+#define IPA_HOLB_TMR_DIS 0x0
+#define IPA_HOLB_TMR_DEFAULT_VAL 0x1ff
+#define IPA_POLL_AGGR_STATE_RETRIES_NUM 3
+#define IPA_POLL_AGGR_STATE_SLEEP_MSEC 1
+
+#define IPA_PKT_FLUSH_TO_US 100
+
+#define IPA_POLL_FOR_EMPTINESS_NUM 50
+#define IPA_POLL_FOR_EMPTINESS_SLEEP_USEC 20
+#define IPA_CHANNEL_STOP_IN_PROC_TO_MSEC 5
+#define IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC 200
+
+/* xfer_rsc_idx should be 7 bits */
+#define IPA_XFER_RSC_IDX_MAX 127
+
+static int ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep,
+ bool *is_empty);
+
+int ipa3_enable_data_path(u32 clnt_hdl)
+{
+ struct ipa3_ep_context *ep = &ipa3_ctx->ep[clnt_hdl];
+ struct ipa_ep_cfg_holb holb_cfg;
+ struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+ int res = 0;
+ struct ipahal_reg_endp_init_rsrc_grp rsrc_grp;
+
+ IPADBG("Enabling data path\n");
+ if (IPA_CLIENT_IS_CONS(ep->client)) {
+ memset(&holb_cfg, 0, sizeof(holb_cfg));
+ holb_cfg.en = IPA_HOLB_TMR_DIS;
+ holb_cfg.tmr_val = 0;
+ res = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
+ }
+
+ /* Enable the pipe */
+ if (IPA_CLIENT_IS_CONS(ep->client) &&
+ (ep->keep_ipa_awake ||
+ ipa3_ctx->resume_on_connect[ep->client] ||
+ !ipa3_should_pipe_be_suspended(ep->client))) {
+ memset(&ep_cfg_ctrl, 0, sizeof(ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_suspend = false;
+ ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ }
+
+ /* Assign the resource group for pipe */
+ memset(&rsrc_grp, 0, sizeof(rsrc_grp));
+ rsrc_grp.rsrc_grp = ipa_get_ep_group(ep->client);
+ if (rsrc_grp.rsrc_grp == -1) {
+ IPAERR("invalid group for client %d\n", ep->client);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ IPADBG("Setting group %d for pipe %d\n",
+ rsrc_grp.rsrc_grp, clnt_hdl);
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_RSRC_GRP_n, clnt_hdl,
+ &rsrc_grp);
+
+ return res;
+}
+
+int ipa3_disable_data_path(u32 clnt_hdl)
+{
+ struct ipa3_ep_context *ep = &ipa3_ctx->ep[clnt_hdl];
+ struct ipa_ep_cfg_holb holb_cfg;
+ struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+ struct ipa_ep_cfg_aggr ep_aggr;
+ int res = 0;
+
+ IPADBG("Disabling data path\n");
+ if (IPA_CLIENT_IS_CONS(ep->client)) {
+ memset(&holb_cfg, 0, sizeof(holb_cfg));
+ holb_cfg.en = IPA_HOLB_TMR_EN;
+ holb_cfg.tmr_val = 0;
+ res = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
+ }
+
+ /* Suspend the pipe */
+ if (IPA_CLIENT_IS_CONS(ep->client)) {
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_suspend = true;
+ ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ }
+
+ udelay(IPA_PKT_FLUSH_TO_US);
+ ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl, &ep_aggr);
+ if (ep_aggr.aggr_en) {
+ res = ipa3_tag_aggr_force_close(clnt_hdl);
+ if (res) {
+ IPAERR("tag process timeout, client:%d err:%d\n",
+ clnt_hdl, res);
+ BUG();
+ }
+ }
+
+ return res;
+}
+
+static int ipa3_smmu_map_peer_bam(unsigned long dev)
+{
+ phys_addr_t base;
+ u32 size;
+ struct iommu_domain *smmu_domain;
+ struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
+
+ if (!ipa3_ctx->smmu_s1_bypass) {
+ if (ipa3_ctx->peer_bam_map_cnt == 0) {
+ if (sps_get_bam_addr(dev, &base, &size)) {
+ IPAERR("Fail to get addr\n");
+ return -EINVAL;
+ }
+ smmu_domain = ipa3_get_smmu_domain();
+ if (smmu_domain != NULL) {
+ if (ipa3_iommu_map(smmu_domain,
+ cb->va_end,
+ rounddown(base, PAGE_SIZE),
+ roundup(size + base -
+ rounddown(base, PAGE_SIZE), PAGE_SIZE),
+ IOMMU_READ | IOMMU_WRITE |
+ IOMMU_DEVICE)) {
+ IPAERR("Fail to ipa3_iommu_map\n");
+ return -EINVAL;
+ }
+ }
+
+ ipa3_ctx->peer_bam_iova = cb->va_end;
+ ipa3_ctx->peer_bam_pa = base;
+ ipa3_ctx->peer_bam_map_size = size;
+ ipa3_ctx->peer_bam_dev = dev;
+
+ IPADBG("Peer bam %lu mapped\n", dev);
+ } else {
+ WARN_ON(dev != ipa3_ctx->peer_bam_dev);
+ }
+
+ ipa3_ctx->peer_bam_map_cnt++;
+ }
+
+ return 0;
+}
+
+static int ipa3_connect_configure_sps(const struct ipa_connect_params *in,
+ struct ipa3_ep_context *ep, int ipa_ep_idx)
+{
+ int result = -EFAULT;
+
+ /* Default Config */
+ ep->ep_hdl = sps_alloc_endpoint();
+
+ if (ipa3_smmu_map_peer_bam(in->client_bam_hdl)) {
+ IPAERR("fail to iommu map peer BAM.\n");
+ return -EFAULT;
+ }
+
+ if (ep->ep_hdl == NULL) {
+ IPAERR("SPS EP alloc failed EP.\n");
+ return -EFAULT;
+ }
+
+ result = sps_get_config(ep->ep_hdl,
+ &ep->connect);
+ if (result) {
+ IPAERR("fail to get config.\n");
+ return -EFAULT;
+ }
+
+ /* Specific Config */
+ if (IPA_CLIENT_IS_CONS(in->client)) {
+ ep->connect.mode = SPS_MODE_SRC;
+ ep->connect.destination =
+ in->client_bam_hdl;
+ ep->connect.dest_iova = ipa3_ctx->peer_bam_iova;
+ ep->connect.source = ipa3_ctx->bam_handle;
+ ep->connect.dest_pipe_index =
+ in->client_ep_idx;
+ ep->connect.src_pipe_index = ipa_ep_idx;
+ } else {
+ ep->connect.mode = SPS_MODE_DEST;
+ ep->connect.source = in->client_bam_hdl;
+ ep->connect.source_iova = ipa3_ctx->peer_bam_iova;
+ ep->connect.destination = ipa3_ctx->bam_handle;
+ ep->connect.src_pipe_index = in->client_ep_idx;
+ ep->connect.dest_pipe_index = ipa_ep_idx;
+ }
+
+ return 0;
+}
+
+static int ipa3_connect_allocate_fifo(const struct ipa_connect_params *in,
+ struct sps_mem_buffer *mem_buff_ptr,
+ bool *fifo_in_pipe_mem_ptr,
+ u32 *fifo_pipe_mem_ofst_ptr,
+ u32 fifo_size, int ipa_ep_idx)
+{
+ dma_addr_t dma_addr;
+ u32 ofst;
+ int result = -EFAULT;
+ struct iommu_domain *smmu_domain;
+
+ mem_buff_ptr->size = fifo_size;
+ if (in->pipe_mem_preferred) {
+ if (ipa3_pipe_mem_alloc(&ofst, fifo_size)) {
+ IPAERR("FIFO pipe mem alloc fail ep %u\n",
+ ipa_ep_idx);
+ mem_buff_ptr->base =
+ dma_alloc_coherent(ipa3_ctx->pdev,
+ mem_buff_ptr->size,
+ &dma_addr, GFP_KERNEL);
+ } else {
+ memset(mem_buff_ptr, 0, sizeof(struct sps_mem_buffer));
+ result = sps_setup_bam2bam_fifo(mem_buff_ptr, ofst,
+ fifo_size, 1);
+ WARN_ON(result);
+ *fifo_in_pipe_mem_ptr = 1;
+ dma_addr = mem_buff_ptr->phys_base;
+ *fifo_pipe_mem_ofst_ptr = ofst;
+ }
+ } else {
+ mem_buff_ptr->base =
+ dma_alloc_coherent(ipa3_ctx->pdev, mem_buff_ptr->size,
+ &dma_addr, GFP_KERNEL);
+ }
+ if (ipa3_ctx->smmu_s1_bypass) {
+ mem_buff_ptr->phys_base = dma_addr;
+ } else {
+ mem_buff_ptr->iova = dma_addr;
+ smmu_domain = ipa_get_smmu_domain();
+ if (smmu_domain != NULL) {
+ mem_buff_ptr->phys_base =
+ iommu_iova_to_phys(smmu_domain, dma_addr);
+ }
+ }
+ if (mem_buff_ptr->base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * ipa3_connect() - low-level IPA client connect
+ * @in: [in] input parameters from client
+ * @sps: [out] sps output from IPA needed by client for sps_connect
+ * @clnt_hdl: [out] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to connect to
+ * IPA in BAM-BAM mode. these peripherals are USB and HSIC. this api
+ * expects caller to take responsibility to add any needed headers, routing
+ * and filtering tables and rules as needed.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_connect(const struct ipa_connect_params *in,
+ struct ipa_sps_params *sps,
+ u32 *clnt_hdl)
+{
+ int ipa_ep_idx;
+ int result = -EFAULT;
+ struct ipa3_ep_context *ep;
+ struct ipahal_reg_ep_cfg_status ep_status;
+ unsigned long base;
+ struct iommu_domain *smmu_domain;
+
+ IPADBG("connecting client\n");
+
+ if (in == NULL || sps == NULL || clnt_hdl == NULL ||
+ in->client >= IPA_CLIENT_MAX ||
+ in->desc_fifo_sz == 0 || in->data_fifo_sz == 0) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ ipa_ep_idx = ipa3_get_ep_mapping(in->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("fail to alloc EP.\n");
+ goto fail;
+ }
+
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+ if (ep->valid) {
+ IPAERR("EP already allocated.\n");
+ goto fail;
+ }
+
+ memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+ IPA_ACTIVE_CLIENTS_INC_EP(in->client);
+
+ ep->skip_ep_cfg = in->skip_ep_cfg;
+ ep->valid = 1;
+ ep->client = in->client;
+ ep->client_notify = in->notify;
+ ep->priv = in->priv;
+ ep->keep_ipa_awake = in->keep_ipa_awake;
+
+ result = ipa3_enable_data_path(ipa_ep_idx);
+ if (result) {
+ IPAERR("enable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx);
+ goto ipa_cfg_ep_fail;
+ }
+
+ if (!ep->skip_ep_cfg) {
+ if (ipa3_cfg_ep(ipa_ep_idx, &in->ipa_ep_cfg)) {
+ IPAERR("fail to configure EP.\n");
+ goto ipa_cfg_ep_fail;
+ }
+ /* Setting EP status 0 */
+ memset(&ep_status, 0, sizeof(ep_status));
+ if (ipa3_cfg_ep_status(ipa_ep_idx, &ep_status)) {
+ IPAERR("fail to configure status of EP.\n");
+ goto ipa_cfg_ep_fail;
+ }
+ IPADBG("ep configuration successful\n");
+ } else {
+ IPADBG("Skipping endpoint configuration.\n");
+ }
+
+ result = ipa3_connect_configure_sps(in, ep, ipa_ep_idx);
+ if (result) {
+ IPAERR("fail to configure SPS.\n");
+ goto ipa_cfg_ep_fail;
+ }
+
+ if (!ipa3_ctx->smmu_s1_bypass &&
+ (in->desc.base == NULL ||
+ in->data.base == NULL)) {
+ IPAERR(" allocate FIFOs data_fifo=0x%p desc_fifo=0x%p.\n",
+ in->data.base, in->desc.base);
+ goto desc_mem_alloc_fail;
+ }
+
+ if (in->desc.base == NULL) {
+ result = ipa3_connect_allocate_fifo(in, &ep->connect.desc,
+ &ep->desc_fifo_in_pipe_mem,
+ &ep->desc_fifo_pipe_mem_ofst,
+ in->desc_fifo_sz, ipa_ep_idx);
+ if (result) {
+ IPAERR("fail to allocate DESC FIFO.\n");
+ goto desc_mem_alloc_fail;
+ }
+ } else {
+ IPADBG("client allocated DESC FIFO\n");
+ ep->connect.desc = in->desc;
+ ep->desc_fifo_client_allocated = 1;
+ }
+ IPADBG("Descriptor FIFO pa=%pa, size=%d\n", &ep->connect.desc.phys_base,
+ ep->connect.desc.size);
+
+ if (in->data.base == NULL) {
+ result = ipa3_connect_allocate_fifo(in, &ep->connect.data,
+ &ep->data_fifo_in_pipe_mem,
+ &ep->data_fifo_pipe_mem_ofst,
+ in->data_fifo_sz, ipa_ep_idx);
+ if (result) {
+ IPAERR("fail to allocate DATA FIFO.\n");
+ goto data_mem_alloc_fail;
+ }
+ } else {
+ IPADBG("client allocated DATA FIFO\n");
+ ep->connect.data = in->data;
+ ep->data_fifo_client_allocated = 1;
+ }
+ IPADBG("Data FIFO pa=%pa, size=%d\n", &ep->connect.data.phys_base,
+ ep->connect.data.size);
+
+ if (!ipa3_ctx->smmu_s1_bypass) {
+ ep->connect.data.iova = ep->connect.data.phys_base;
+ base = ep->connect.data.iova;
+ smmu_domain = ipa_get_smmu_domain();
+ if (smmu_domain != NULL) {
+ if (ipa3_iommu_map(smmu_domain,
+ rounddown(base, PAGE_SIZE),
+ rounddown(base, PAGE_SIZE),
+ roundup(ep->connect.data.size + base -
+ rounddown(base, PAGE_SIZE), PAGE_SIZE),
+ IOMMU_READ | IOMMU_WRITE)) {
+ IPAERR("Fail to ipa3_iommu_map data FIFO\n");
+ goto iommu_map_data_fail;
+ }
+ }
+ ep->connect.desc.iova = ep->connect.desc.phys_base;
+ base = ep->connect.desc.iova;
+ if (smmu_domain != NULL) {
+ if (ipa3_iommu_map(smmu_domain,
+ rounddown(base, PAGE_SIZE),
+ rounddown(base, PAGE_SIZE),
+ roundup(ep->connect.desc.size + base -
+ rounddown(base, PAGE_SIZE), PAGE_SIZE),
+ IOMMU_READ | IOMMU_WRITE)) {
+ IPAERR("Fail to ipa3_iommu_map desc FIFO\n");
+ goto iommu_map_desc_fail;
+ }
+ }
+ }
+
+ if (IPA_CLIENT_IS_USB_CONS(in->client))
+ ep->connect.event_thresh = IPA_USB_EVENT_THRESHOLD;
+ else
+ ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
+ ep->connect.options = SPS_O_AUTO_ENABLE; /* BAM-to-BAM */
+
+ result = ipa3_sps_connect_safe(ep->ep_hdl, &ep->connect, in->client);
+ if (result) {
+ IPAERR("sps_connect fails.\n");
+ goto sps_connect_fail;
+ }
+
+ sps->ipa_bam_hdl = ipa3_ctx->bam_handle;
+ sps->ipa_ep_idx = ipa_ep_idx;
+ *clnt_hdl = ipa_ep_idx;
+ memcpy(&sps->desc, &ep->connect.desc, sizeof(struct sps_mem_buffer));
+ memcpy(&sps->data, &ep->connect.data, sizeof(struct sps_mem_buffer));
+
+ ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+ if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->client))
+ ipa3_install_dflt_flt_rules(ipa_ep_idx);
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->client);
+
+ IPADBG("client %d (ep: %d) connected\n", in->client, ipa_ep_idx);
+
+ return 0;
+
+sps_connect_fail:
+ if (!ipa3_ctx->smmu_s1_bypass) {
+ base = ep->connect.desc.iova;
+ smmu_domain = ipa_get_smmu_domain();
+ if (smmu_domain != NULL) {
+ iommu_unmap(smmu_domain,
+ rounddown(base, PAGE_SIZE),
+ roundup(ep->connect.desc.size + base -
+ rounddown(base, PAGE_SIZE), PAGE_SIZE));
+ }
+ }
+iommu_map_desc_fail:
+ if (!ipa3_ctx->smmu_s1_bypass) {
+ base = ep->connect.data.iova;
+ smmu_domain = ipa_get_smmu_domain();
+ if (smmu_domain != NULL) {
+ iommu_unmap(smmu_domain,
+ rounddown(base, PAGE_SIZE),
+ roundup(ep->connect.data.size + base -
+ rounddown(base, PAGE_SIZE), PAGE_SIZE));
+ }
+ }
+iommu_map_data_fail:
+ if (!ep->data_fifo_client_allocated) {
+ if (!ep->data_fifo_in_pipe_mem)
+ dma_free_coherent(ipa3_ctx->pdev,
+ ep->connect.data.size,
+ ep->connect.data.base,
+ ep->connect.data.phys_base);
+ else
+ ipa3_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
+ ep->connect.data.size);
+ }
+data_mem_alloc_fail:
+ if (!ep->desc_fifo_client_allocated) {
+ if (!ep->desc_fifo_in_pipe_mem)
+ dma_free_coherent(ipa3_ctx->pdev,
+ ep->connect.desc.size,
+ ep->connect.desc.base,
+ ep->connect.desc.phys_base);
+ else
+ ipa3_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
+ ep->connect.desc.size);
+ }
+desc_mem_alloc_fail:
+ sps_free_endpoint(ep->ep_hdl);
+ipa_cfg_ep_fail:
+ memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->client);
+fail:
+ return result;
+}
+
+static int ipa3_smmu_unmap_peer_bam(unsigned long dev)
+{
+ size_t len;
+ struct iommu_domain *smmu_domain;
+ struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
+
+ if (!ipa3_ctx->smmu_s1_bypass) {
+ WARN_ON(dev != ipa3_ctx->peer_bam_dev);
+ ipa3_ctx->peer_bam_map_cnt--;
+ if (ipa3_ctx->peer_bam_map_cnt == 0) {
+ len = roundup(ipa3_ctx->peer_bam_map_size +
+ ipa3_ctx->peer_bam_pa -
+ rounddown(ipa3_ctx->peer_bam_pa,
+ PAGE_SIZE), PAGE_SIZE);
+ smmu_domain = ipa3_get_smmu_domain();
+ if (smmu_domain != NULL) {
+ if (iommu_unmap(smmu_domain,
+ cb->va_end, len) != len) {
+ IPAERR("Fail to iommu_unmap\n");
+ return -EINVAL;
+ }
+ IPADBG("Peer bam %lu unmapped\n", dev);
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ipa3_disconnect() - low-level IPA client disconnect
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to disconnect
+ * from IPA in BAM-BAM mode. this api expects caller to take responsibility to
+ * free any needed headers, routing and filtering tables and rules as needed.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_disconnect(u32 clnt_hdl)
+{
+ int result;
+ struct ipa3_ep_context *ep;
+ unsigned long peer_bam;
+ unsigned long base;
+ struct iommu_domain *smmu_domain;
+ struct ipa_disable_force_clear_datapath_req_msg_v01 req = {0};
+ int res;
+ enum ipa_client_type client_type;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+ client_type = ipa3_get_client_mapping(clnt_hdl);
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_INC_EP(client_type);
+
+ /* Set Disconnect in Progress flag. */
+ spin_lock(&ipa3_ctx->disconnect_lock);
+ ep->disconnect_in_progress = true;
+ spin_unlock(&ipa3_ctx->disconnect_lock);
+
+ result = ipa3_disable_data_path(clnt_hdl);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ clnt_hdl);
+ return -EPERM;
+ }
+
+ result = sps_disconnect(ep->ep_hdl);
+ if (result) {
+ IPAERR("SPS disconnect failed.\n");
+ return -EPERM;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ep->client))
+ peer_bam = ep->connect.destination;
+ else
+ peer_bam = ep->connect.source;
+
+ if (ipa3_smmu_unmap_peer_bam(peer_bam)) {
+ IPAERR("fail to iommu unmap peer BAM.\n");
+ return -EPERM;
+ }
+
+ if (!ep->desc_fifo_client_allocated &&
+ ep->connect.desc.base) {
+ if (!ep->desc_fifo_in_pipe_mem)
+ dma_free_coherent(ipa3_ctx->pdev,
+ ep->connect.desc.size,
+ ep->connect.desc.base,
+ ep->connect.desc.phys_base);
+ else
+ ipa3_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
+ ep->connect.desc.size);
+ }
+
+ if (!ep->data_fifo_client_allocated &&
+ ep->connect.data.base) {
+ if (!ep->data_fifo_in_pipe_mem)
+ dma_free_coherent(ipa3_ctx->pdev,
+ ep->connect.data.size,
+ ep->connect.data.base,
+ ep->connect.data.phys_base);
+ else
+ ipa3_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
+ ep->connect.data.size);
+ }
+
+ if (!ipa3_ctx->smmu_s1_bypass) {
+ base = ep->connect.desc.iova;
+ smmu_domain = ipa_get_smmu_domain();
+ if (smmu_domain != NULL) {
+ iommu_unmap(smmu_domain,
+ rounddown(base, PAGE_SIZE),
+ roundup(ep->connect.desc.size + base -
+ rounddown(base, PAGE_SIZE), PAGE_SIZE));
+ }
+ }
+
+ if (!ipa3_ctx->smmu_s1_bypass) {
+ base = ep->connect.data.iova;
+ smmu_domain = ipa_get_smmu_domain();
+ if (smmu_domain != NULL) {
+ iommu_unmap(smmu_domain,
+ rounddown(base, PAGE_SIZE),
+ roundup(ep->connect.data.size + base -
+ rounddown(base, PAGE_SIZE), PAGE_SIZE));
+ }
+ }
+
+ result = sps_free_endpoint(ep->ep_hdl);
+ if (result) {
+ IPAERR("SPS de-alloc EP failed.\n");
+ return -EPERM;
+ }
+
+ ipa3_delete_dflt_flt_rules(clnt_hdl);
+
+ /* If APPS flow control is not enabled, send a message to modem to
+ * enable flow control honoring.
+ */
+ if (!ipa3_ctx->tethered_flow_control && ep->qmi_request_sent) {
+ /* Send a message to modem to disable flow control honoring. */
+ req.request_id = clnt_hdl;
+ res = ipa3_qmi_disable_force_clear_datapath_send(&req);
+ if (res) {
+ IPADBG("disable_force_clear_datapath failed %d\n",
+ res);
+ }
+ }
+
+ spin_lock(&ipa3_ctx->disconnect_lock);
+ memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context));
+ spin_unlock(&ipa3_ctx->disconnect_lock);
+ IPA_ACTIVE_CLIENTS_DEC_EP(client_type);
+
+ IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+ return 0;
+}
+
+/**
+* ipa3_reset_endpoint() - reset an endpoint from BAM perspective
+* @clnt_hdl: [in] IPA client handle
+*
+* Returns: 0 on success, negative on failure
+*
+* Note: Should not be called from atomic context
+*/
+int ipa3_reset_endpoint(u32 clnt_hdl)
+{
+ int res;
+ struct ipa3_ep_context *ep;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes) {
+ IPAERR("Bad parameters.\n");
+ return -EFAULT;
+ }
+ ep = &ipa3_ctx->ep[clnt_hdl];
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+ res = sps_disconnect(ep->ep_hdl);
+ if (res) {
+ IPAERR("sps_disconnect() failed, res=%d.\n", res);
+ goto bail;
+ } else {
+ res = ipa3_sps_connect_safe(ep->ep_hdl, &ep->connect,
+ ep->client);
+ if (res) {
+ IPAERR("sps_connect() failed, res=%d.\n", res);
+ goto bail;
+ }
+ }
+
+bail:
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+ return res;
+}
+
+/**
+ * ipa3_sps_connect_safe() - connect endpoint from BAM prespective
+ * @h: [in] sps pipe handle
+ * @connect: [in] sps connect parameters
+ * @ipa_client: [in] ipa client handle representing the pipe
+ *
+ * This function connects a BAM pipe using SPS driver sps_connect() API
+ * and by requesting uC interface to reset the pipe, avoids an IPA HW
+ * limitation that does not allow resetting a BAM pipe during traffic in
+ * IPA TX command queue.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_sps_connect_safe(struct sps_pipe *h, struct sps_connect *connect,
+ enum ipa_client_type ipa_client)
+{
+ int res;
+
+ if (ipa3_ctx->ipa_hw_type > IPA_HW_v2_5 ||
+ ipa3_ctx->skip_uc_pipe_reset) {
+ IPADBG("uC pipe reset is not required\n");
+ } else {
+ res = ipa3_uc_reset_pipe(ipa_client);
+ if (res)
+ return res;
+ }
+ return sps_connect(h, connect);
+}
+
+static void ipa_chan_err_cb(struct gsi_chan_err_notify *notify)
+{
+ if (notify) {
+ switch (notify->evt_id) {
+ case GSI_CHAN_INVALID_TRE_ERR:
+ IPAERR("Received GSI_CHAN_INVALID_TRE_ERR\n");
+ break;
+ case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
+ IPAERR("Received GSI_CHAN_NON_ALLOC_EVT_ACCESS_ERR\n");
+ break;
+ case GSI_CHAN_OUT_OF_BUFFERS_ERR:
+ IPAERR("Received GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
+ break;
+ case GSI_CHAN_OUT_OF_RESOURCES_ERR:
+ IPAERR("Received GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
+ break;
+ case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
+ IPAERR("Received GSI_CHAN_UNSUPP_INTER_EE_OP_ERR\n");
+ break;
+ case GSI_CHAN_HWO_1_ERR:
+ IPAERR("Received GSI_CHAN_HWO_1_ERR\n");
+ break;
+ default:
+ IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+ }
+ BUG();
+ }
+}
+
+static void ipa_xfer_cb(struct gsi_chan_xfer_notify *notify)
+{
+}
+
+static int ipa3_reconfigure_channel_to_gpi(struct ipa3_ep_context *ep,
+ struct gsi_chan_props *orig_chan_props,
+ struct ipa_mem_buffer *chan_dma)
+{
+ struct gsi_chan_props chan_props;
+ enum gsi_status gsi_res;
+ dma_addr_t chan_dma_addr;
+ int result;
+
+ /* Set up channel properties */
+ memset(&chan_props, 0, sizeof(struct gsi_chan_props));
+ chan_props.prot = GSI_CHAN_PROT_GPI;
+ chan_props.dir = GSI_CHAN_DIR_FROM_GSI;
+ chan_props.ch_id = orig_chan_props->ch_id;
+ chan_props.evt_ring_hdl = orig_chan_props->evt_ring_hdl;
+ chan_props.re_size = GSI_CHAN_RE_SIZE_16B;
+ chan_props.ring_len = 2 * GSI_CHAN_RE_SIZE_16B;
+ chan_props.ring_base_vaddr =
+ dma_alloc_coherent(ipa3_ctx->pdev, chan_props.ring_len,
+ &chan_dma_addr, 0);
+ chan_props.ring_base_addr = chan_dma_addr;
+ chan_dma->base = chan_props.ring_base_vaddr;
+ chan_dma->phys_base = chan_props.ring_base_addr;
+ chan_dma->size = chan_props.ring_len;
+ chan_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
+ chan_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+ chan_props.low_weight = 1;
+ chan_props.chan_user_data = NULL;
+ chan_props.err_cb = ipa_chan_err_cb;
+ chan_props.xfer_cb = ipa_xfer_cb;
+
+ gsi_res = gsi_set_channel_cfg(ep->gsi_chan_hdl, &chan_props, NULL);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error setting channel properties\n");
+ result = -EFAULT;
+ goto set_chan_cfg_fail;
+ }
+
+ return 0;
+
+set_chan_cfg_fail:
+ dma_free_coherent(ipa3_ctx->pdev, chan_dma->size,
+ chan_dma->base, chan_dma->phys_base);
+ return result;
+
+}
+
+static int ipa3_restore_channel_properties(struct ipa3_ep_context *ep,
+ struct gsi_chan_props *chan_props,
+ union gsi_channel_scratch *chan_scratch)
+{
+ enum gsi_status gsi_res;
+
+ gsi_res = gsi_set_channel_cfg(ep->gsi_chan_hdl, chan_props,
+ chan_scratch);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error restoring channel properties\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int ipa3_reset_with_open_aggr_frame_wa(u32 clnt_hdl,
+ struct ipa3_ep_context *ep)
+{
+ int result = -EFAULT;
+ enum gsi_status gsi_res;
+ struct gsi_chan_props orig_chan_props;
+ union gsi_channel_scratch orig_chan_scratch;
+ struct ipa_mem_buffer chan_dma;
+ void *buff;
+ dma_addr_t dma_addr;
+ struct gsi_xfer_elem xfer_elem;
+ int i;
+ int aggr_active_bitmap = 0;
+
+ IPADBG("Applying reset channel with open aggregation frame WA\n");
+ ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << clnt_hdl));
+
+ /* Reset channel */
+ gsi_res = gsi_reset_channel(ep->gsi_chan_hdl);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error resetting channel: %d\n", gsi_res);
+ return -EFAULT;
+ }
+
+ /* Reconfigure channel to dummy GPI channel */
+ memset(&orig_chan_props, 0, sizeof(struct gsi_chan_props));
+ memset(&orig_chan_scratch, 0, sizeof(union gsi_channel_scratch));
+ gsi_res = gsi_get_channel_cfg(ep->gsi_chan_hdl, &orig_chan_props,
+ &orig_chan_scratch);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error getting channel properties: %d\n", gsi_res);
+ return -EFAULT;
+ }
+ memset(&chan_dma, 0, sizeof(struct ipa_mem_buffer));
+ result = ipa3_reconfigure_channel_to_gpi(ep, &orig_chan_props,
+ &chan_dma);
+ if (result)
+ return -EFAULT;
+
+ /* Start channel and put 1 Byte descriptor on it */
+ gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error starting channel: %d\n", gsi_res);
+ goto start_chan_fail;
+ }
+
+ memset(&xfer_elem, 0, sizeof(struct gsi_xfer_elem));
+ buff = dma_alloc_coherent(ipa3_ctx->pdev, 1, &dma_addr,
+ GFP_KERNEL);
+ xfer_elem.addr = dma_addr;
+ xfer_elem.len = 1;
+ xfer_elem.flags = GSI_XFER_FLAG_EOT;
+ xfer_elem.type = GSI_XFER_ELEM_DATA;
+
+ gsi_res = gsi_queue_xfer(ep->gsi_chan_hdl, 1, &xfer_elem,
+ true);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error queueing xfer: %d\n", gsi_res);
+ result = -EFAULT;
+ goto queue_xfer_fail;
+ }
+
+ /* Wait for aggregation frame to be closed and stop channel*/
+ for (i = 0; i < IPA_POLL_AGGR_STATE_RETRIES_NUM; i++) {
+ aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+ if (!(aggr_active_bitmap & (1 << clnt_hdl)))
+ break;
+ msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC);
+ }
+
+ if (aggr_active_bitmap & (1 << clnt_hdl)) {
+ IPAERR("Failed closing aggr frame for client: %d\n",
+ clnt_hdl);
+ BUG();
+ }
+
+ dma_free_coherent(ipa3_ctx->pdev, 1, buff, dma_addr);
+
+ result = ipa3_stop_gsi_channel(clnt_hdl);
+ if (result) {
+ IPAERR("Error stopping channel: %d\n", result);
+ goto start_chan_fail;
+ }
+
+ /* Reset channel */
+ gsi_res = gsi_reset_channel(ep->gsi_chan_hdl);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error resetting channel: %d\n", gsi_res);
+ result = -EFAULT;
+ goto start_chan_fail;
+ }
+
+ /*
+ * Need to sleep for 1ms as required by H/W verified
+ * sequence for resetting GSI channel
+ */
+ msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC);
+
+ /* Restore channels properties */
+ result = ipa3_restore_channel_properties(ep, &orig_chan_props,
+ &orig_chan_scratch);
+ if (result)
+ goto restore_props_fail;
+ dma_free_coherent(ipa3_ctx->pdev, chan_dma.size,
+ chan_dma.base, chan_dma.phys_base);
+
+ return 0;
+
+queue_xfer_fail:
+ ipa3_stop_gsi_channel(clnt_hdl);
+ dma_free_coherent(ipa3_ctx->pdev, 1, buff, dma_addr);
+start_chan_fail:
+ ipa3_restore_channel_properties(ep, &orig_chan_props,
+ &orig_chan_scratch);
+restore_props_fail:
+ dma_free_coherent(ipa3_ctx->pdev, chan_dma.size,
+ chan_dma.base, chan_dma.phys_base);
+ return result;
+}
+
+int ipa3_reset_gsi_channel(u32 clnt_hdl)
+{
+ struct ipa3_ep_context *ep;
+ int result = -EFAULT;
+ enum gsi_status gsi_res;
+ int aggr_active_bitmap = 0;
+
+ IPADBG("entry\n");
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("Bad parameter.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+ /*
+ * Check for open aggregation frame on Consumer EP -
+ * reset with open aggregation frame WA
+ */
+ if (IPA_CLIENT_IS_CONS(ep->client)) {
+ aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+ if (aggr_active_bitmap & (1 << clnt_hdl)) {
+ result = ipa3_reset_with_open_aggr_frame_wa(clnt_hdl,
+ ep);
+ if (result)
+ goto reset_chan_fail;
+ goto finish_reset;
+ }
+ }
+
+ /*
+ * Reset channel
+ * If the reset called after stop, need to wait 1ms
+ */
+ msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC);
+ gsi_res = gsi_reset_channel(ep->gsi_chan_hdl);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error resetting channel: %d\n", gsi_res);
+ result = -EFAULT;
+ goto reset_chan_fail;
+ }
+
+finish_reset:
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ IPADBG("exit\n");
+ return 0;
+
+reset_chan_fail:
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+ return result;
+}
+
+int ipa3_reset_gsi_event_ring(u32 clnt_hdl)
+{
+ struct ipa3_ep_context *ep;
+ int result = -EFAULT;
+ enum gsi_status gsi_res;
+
+ IPADBG("entry\n");
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("Bad parameter.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+ /* Reset event ring */
+ gsi_res = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error resetting event: %d\n", gsi_res);
+ result = -EFAULT;
+ goto reset_evt_fail;
+ }
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ IPADBG("exit\n");
+ return 0;
+
+reset_evt_fail:
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+ return result;
+}
+
+static bool ipa3_is_legal_params(struct ipa_request_gsi_channel_params *params)
+{
+ if (params->client >= IPA_CLIENT_MAX)
+ return false;
+ else
+ return true;
+}
+
+int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map)
+{
+ struct iommu_domain *smmu_domain;
+ int res;
+
+ if (ipa3_ctx->smmu_s1_bypass)
+ return 0;
+
+ smmu_domain = ipa3_get_smmu_domain();
+ if (!smmu_domain) {
+ IPAERR("invalid smmu domain\n");
+ return -EINVAL;
+ }
+
+ if (map) {
+ res = ipa3_iommu_map(smmu_domain, phys_addr, phys_addr,
+ PAGE_SIZE, IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+ } else {
+ res = iommu_unmap(smmu_domain, phys_addr, PAGE_SIZE);
+ res = (res != PAGE_SIZE);
+ }
+ if (res) {
+ IPAERR("Fail to %s reg 0x%pa\n", map ? "map" : "unmap",
+ &phys_addr);
+ return -EINVAL;
+ }
+
+ IPADBG("Peer reg 0x%pa %s\n", &phys_addr, map ? "map" : "unmap");
+
+ return 0;
+}
+
+int ipa3_smmu_map_peer_buff(u64 iova, phys_addr_t phys_addr, u32 size, bool map)
+{
+ struct iommu_domain *smmu_domain;
+ int res;
+
+ if (ipa3_ctx->smmu_s1_bypass)
+ return 0;
+
+ smmu_domain = ipa3_get_smmu_domain();
+ if (!smmu_domain) {
+ IPAERR("invalid smmu domain\n");
+ return -EINVAL;
+ }
+
+ if (map) {
+ res = ipa3_iommu_map(smmu_domain,
+ rounddown(iova, PAGE_SIZE),
+ rounddown(phys_addr, PAGE_SIZE),
+ roundup(size + iova - rounddown(iova, PAGE_SIZE),
+ PAGE_SIZE),
+ IOMMU_READ | IOMMU_WRITE);
+ if (res) {
+ IPAERR("Fail to map 0x%llx->0x%pa\n", iova, &phys_addr);
+ return -EINVAL;
+ }
+ } else {
+ res = iommu_unmap(smmu_domain,
+ rounddown(iova, PAGE_SIZE),
+ roundup(size + iova - rounddown(iova, PAGE_SIZE),
+ PAGE_SIZE));
+ if (res != roundup(size + iova - rounddown(iova, PAGE_SIZE),
+ PAGE_SIZE)) {
+ IPAERR("Fail to unmap 0x%llx->0x%pa\n",
+ iova, &phys_addr);
+ return -EINVAL;
+ }
+ }
+
+ IPADBG("Peer buff %s 0x%llx->0x%pa\n", map ? "map" : "unmap",
+ iova, &phys_addr);
+
+ return 0;
+}
+
+
+int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
+ struct ipa_req_chan_out_params *out_params)
+{
+ int ipa_ep_idx;
+ int result = -EFAULT;
+ struct ipa3_ep_context *ep;
+ struct ipahal_reg_ep_cfg_status ep_status;
+ unsigned long gsi_dev_hdl;
+ enum gsi_status gsi_res;
+ struct ipa_gsi_ep_config gsi_ep_cfg;
+ struct ipa_gsi_ep_config *gsi_ep_cfg_ptr = &gsi_ep_cfg;
+
+ IPADBG("entry\n");
+ if (params == NULL || out_params == NULL ||
+ !ipa3_is_legal_params(params)) {
+ IPAERR("bad parameters\n");
+ return -EINVAL;
+ }
+
+ ipa_ep_idx = ipa3_get_ep_mapping(params->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("fail to alloc EP.\n");
+ goto fail;
+ }
+
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+ if (ep->valid) {
+ IPAERR("EP already allocated.\n");
+ goto fail;
+ }
+
+ memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ ep->skip_ep_cfg = params->skip_ep_cfg;
+ ep->valid = 1;
+ ep->client = params->client;
+ ep->client_notify = params->notify;
+ ep->priv = params->priv;
+ ep->keep_ipa_awake = params->keep_ipa_awake;
+
+ if (!ep->skip_ep_cfg) {
+ if (ipa3_cfg_ep(ipa_ep_idx, ¶ms->ipa_ep_cfg)) {
+ IPAERR("fail to configure EP.\n");
+ goto ipa_cfg_ep_fail;
+ }
+ /* Setting EP status 0 */
+ memset(&ep_status, 0, sizeof(ep_status));
+ if (ipa3_cfg_ep_status(ipa_ep_idx, &ep_status)) {
+ IPAERR("fail to configure status of EP.\n");
+ goto ipa_cfg_ep_fail;
+ }
+ IPADBG("ep configuration successful\n");
+ } else {
+ IPADBG("Skipping endpoint configuration.\n");
+ }
+
+ out_params->clnt_hdl = ipa_ep_idx;
+
+ result = ipa3_enable_data_path(out_params->clnt_hdl);
+ if (result) {
+ IPAERR("enable data path failed res=%d clnt=%d.\n", result,
+ out_params->clnt_hdl);
+ goto ipa_cfg_ep_fail;
+ }
+
+ gsi_dev_hdl = ipa3_ctx->gsi_dev_hdl;
+ gsi_res = gsi_alloc_evt_ring(¶ms->evt_ring_params, gsi_dev_hdl,
+ &ep->gsi_evt_ring_hdl);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error allocating event ring: %d\n", gsi_res);
+ result = -EFAULT;
+ goto ipa_cfg_ep_fail;
+ }
+
+ gsi_res = gsi_write_evt_ring_scratch(ep->gsi_evt_ring_hdl,
+ params->evt_scratch);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error writing event ring scratch: %d\n", gsi_res);
+ result = -EFAULT;
+ goto write_evt_scratch_fail;
+ }
+
+ memset(gsi_ep_cfg_ptr, 0, sizeof(struct ipa_gsi_ep_config));
+ gsi_ep_cfg_ptr = ipa_get_gsi_ep_info(ipa_ep_idx);
+ params->chan_params.evt_ring_hdl = ep->gsi_evt_ring_hdl;
+ params->chan_params.ch_id = gsi_ep_cfg_ptr->ipa_gsi_chan_num;
+ gsi_res = gsi_alloc_channel(¶ms->chan_params, gsi_dev_hdl,
+ &ep->gsi_chan_hdl);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error allocating channel: %d, chan_id: %d\n", gsi_res,
+ params->chan_params.ch_id);
+ result = -EFAULT;
+ goto write_evt_scratch_fail;
+ }
+
+ memcpy(&ep->chan_scratch, ¶ms->chan_scratch,
+ sizeof(union __packed gsi_channel_scratch));
+ ep->chan_scratch.xdci.max_outstanding_tre =
+ params->chan_params.re_size * gsi_ep_cfg_ptr->ipa_if_tlv;
+ gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
+ params->chan_scratch);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error writing channel scratch: %d\n", gsi_res);
+ result = -EFAULT;
+ goto write_chan_scratch_fail;
+ }
+
+ gsi_res = gsi_query_channel_db_addr(ep->gsi_chan_hdl,
+ &out_params->db_reg_phs_addr_lsb,
+ &out_params->db_reg_phs_addr_msb);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error querying channel DB registers addresses: %d\n",
+ gsi_res);
+ result = -EFAULT;
+ goto write_chan_scratch_fail;
+ }
+
+ ep->gsi_mem_info.evt_ring_len = params->evt_ring_params.ring_len;
+ ep->gsi_mem_info.evt_ring_base_addr =
+ params->evt_ring_params.ring_base_addr;
+ ep->gsi_mem_info.evt_ring_base_vaddr =
+ params->evt_ring_params.ring_base_vaddr;
+ ep->gsi_mem_info.chan_ring_len = params->chan_params.ring_len;
+ ep->gsi_mem_info.chan_ring_base_addr =
+ params->chan_params.ring_base_addr;
+ ep->gsi_mem_info.chan_ring_base_vaddr =
+ params->chan_params.ring_base_vaddr;
+
+ ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+ if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(params->client))
+ ipa3_install_dflt_flt_rules(ipa_ep_idx);
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ IPADBG("client %d (ep: %d) connected\n", params->client, ipa_ep_idx);
+ IPADBG("exit\n");
+
+ return 0;
+
+write_chan_scratch_fail:
+ gsi_dealloc_channel(ep->gsi_chan_hdl);
+write_evt_scratch_fail:
+ gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+ipa_cfg_ep_fail:
+ memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+fail:
+ return result;
+}
+
+int ipa3_set_usb_max_packet_size(
+ enum ipa_usb_max_usb_packet_size usb_max_packet_size)
+{
+ struct gsi_device_scratch dev_scratch;
+ enum gsi_status gsi_res;
+
+ IPADBG("entry\n");
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ memset(&dev_scratch, 0, sizeof(struct gsi_device_scratch));
+ dev_scratch.mhi_base_chan_idx_valid = false;
+ dev_scratch.max_usb_pkt_size_valid = true;
+ dev_scratch.max_usb_pkt_size = usb_max_packet_size;
+
+ gsi_res = gsi_write_device_scratch(ipa3_ctx->gsi_dev_hdl,
+ &dev_scratch);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error writing device scratch: %d\n", gsi_res);
+ return -EFAULT;
+ }
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ IPADBG("exit\n");
+ return 0;
+}
+
+int ipa3_xdci_connect(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
+{
+ struct ipa3_ep_context *ep;
+ int result = -EFAULT;
+ enum gsi_status gsi_res;
+
+ IPADBG("entry\n");
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0 ||
+ xferrscidx < 0 || xferrscidx > IPA_XFER_RSC_IDX_MAX) {
+ IPAERR("Bad parameters.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ if (xferrscidx_valid) {
+ ep->chan_scratch.xdci.xferrscidx = xferrscidx;
+ gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
+ ep->chan_scratch);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error writing channel scratch: %d\n", gsi_res);
+ goto write_chan_scratch_fail;
+ }
+ }
+ gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error starting channel: %d\n", gsi_res);
+ goto write_chan_scratch_fail;
+ }
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ IPADBG("exit\n");
+ return 0;
+
+write_chan_scratch_fail:
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+ return result;
+}
+
+static int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info,
+ unsigned long chan_hdl)
+{
+ enum gsi_status gsi_res;
+
+ memset(gsi_chan_info, 0, sizeof(struct gsi_chan_info));
+ gsi_res = gsi_query_channel_info(chan_hdl, gsi_chan_info);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error querying channel info: %d\n", gsi_res);
+ return -EFAULT;
+ }
+ if (!gsi_chan_info->evt_valid) {
+ IPAERR("Event info invalid\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static bool ipa3_is_xdci_channel_with_given_info_empty(
+ struct ipa3_ep_context *ep, struct gsi_chan_info *chan_info)
+{
+ bool is_empty = false;
+
+ if (!IPA_CLIENT_IS_CONS(ep->client)) {
+ /* For UL channel: chan.RP == chan.WP */
+ is_empty = (chan_info->rp == chan_info->wp);
+ } else {
+ /* For DL channel: */
+ if (chan_info->wp !=
+ (ep->gsi_mem_info.chan_ring_base_addr +
+ ep->gsi_mem_info.chan_ring_len -
+ GSI_CHAN_RE_SIZE_16B)) {
+ /* if chan.WP != LINK TRB: chan.WP == evt.RP */
+ is_empty = (chan_info->wp == chan_info->evt_rp);
+ } else {
+ /*
+ * if chan.WP == LINK TRB: chan.base_xfer_ring_addr
+ * == evt.RP
+ */
+ is_empty = (ep->gsi_mem_info.chan_ring_base_addr ==
+ chan_info->evt_rp);
+ }
+ }
+
+ return is_empty;
+}
+
+static int ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep,
+ bool *is_empty)
+{
+ struct gsi_chan_info chan_info;
+ int res;
+
+ if (!ep || !is_empty || !ep->valid) {
+ IPAERR("Input Error\n");
+ return -EFAULT;
+ }
+
+ res = ipa3_get_gsi_chan_info(&chan_info, ep->gsi_chan_hdl);
+ if (res) {
+ IPAERR("Failed to get GSI channel info\n");
+ return -EFAULT;
+ }
+
+ *is_empty = ipa3_is_xdci_channel_with_given_info_empty(ep, &chan_info);
+
+ return 0;
+}
+
+static int ipa3_enable_force_clear(u32 request_id, bool throttle_source,
+ u32 source_pipe_bitmask)
+{
+ struct ipa_enable_force_clear_datapath_req_msg_v01 req;
+ int result;
+
+ memset(&req, 0, sizeof(req));
+ req.request_id = request_id;
+ req.source_pipe_bitmask = source_pipe_bitmask;
+ if (throttle_source) {
+ req.throttle_source_valid = 1;
+ req.throttle_source = 1;
+ }
+ result = ipa3_qmi_enable_force_clear_datapath_send(&req);
+ if (result) {
+ IPAERR("ipa3_qmi_enable_force_clear_datapath_send failed %d\n",
+ result);
+ return result;
+ }
+
+ return 0;
+}
+
+static int ipa3_disable_force_clear(u32 request_id)
+{
+ struct ipa_disable_force_clear_datapath_req_msg_v01 req;
+ int result;
+
+ memset(&req, 0, sizeof(req));
+ req.request_id = request_id;
+ result = ipa3_qmi_disable_force_clear_datapath_send(&req);
+ if (result) {
+ IPAERR("ipa3_qmi_disable_force_clear_datapath_send failed %d\n",
+ result);
+ return result;
+ }
+
+ return 0;
+}
+
+/* Clocks should be voted before invoking this function */
+static int ipa3_xdci_stop_gsi_channel(u32 clnt_hdl, bool *stop_in_proc)
+{
+ int res;
+
+ IPADBG("entry\n");
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0 ||
+ !stop_in_proc) {
+ IPAERR("Bad parameter.\n");
+ return -EINVAL;
+ }
+
+ res = ipa3_stop_gsi_channel(clnt_hdl);
+ if (res != 0 && res != -GSI_STATUS_AGAIN &&
+ res != -GSI_STATUS_TIMED_OUT) {
+ IPAERR("xDCI stop channel failed res=%d\n", res);
+ return -EFAULT;
+ }
+
+ if (res)
+ *stop_in_proc = true;
+ else
+ *stop_in_proc = false;
+
+ IPADBG("xDCI channel is %s (result=%d)\n",
+ res ? "STOP_IN_PROC/TimeOut" : "STOP", res);
+
+ IPADBG("exit\n");
+ return 0;
+}
+
+/* Clocks should be voted before invoking this function */
+static int ipa3_xdci_stop_gsi_ch_brute_force(u32 clnt_hdl,
+ bool *stop_in_proc)
+{
+ unsigned long jiffies_start;
+ unsigned long jiffies_timeout =
+ msecs_to_jiffies(IPA_CHANNEL_STOP_IN_PROC_TO_MSEC);
+ int res;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0 ||
+ !stop_in_proc) {
+ IPAERR("Bad parameter.\n");
+ return -EINVAL;
+ }
+
+ jiffies_start = jiffies;
+ while (1) {
+ res = ipa3_xdci_stop_gsi_channel(clnt_hdl,
+ stop_in_proc);
+ if (res) {
+ IPAERR("failed to stop xDCI channel hdl=%d\n",
+ clnt_hdl);
+ return res;
+ }
+
+ if (!*stop_in_proc) {
+ IPADBG("xDCI channel STOP hdl=%d\n", clnt_hdl);
+ return res;
+ }
+
+ /*
+ * Give chance to the previous stop request to be accomplished
+ * before the retry
+ */
+ udelay(IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC);
+
+ if (time_after(jiffies, jiffies_start + jiffies_timeout)) {
+ IPADBG("timeout waiting for xDCI channel emptiness\n");
+ return res;
+ }
+ }
+}
+
+/* Clocks should be voted for before invoking this function */
+static int ipa3_stop_ul_chan_with_data_drain(u32 qmi_req_id,
+ u32 source_pipe_bitmask, bool should_force_clear, u32 clnt_hdl)
+{
+ int result;
+ bool is_empty = false;
+ int i;
+ bool stop_in_proc;
+ struct ipa3_ep_context *ep;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("Bad parameter.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ /* first try to stop the channel */
+ result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl,
+ &stop_in_proc);
+ if (result) {
+ IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n",
+ clnt_hdl, ep->client);
+ goto exit;
+ }
+ if (!stop_in_proc)
+ goto exit;
+
+ /* if stop_in_proc, lets wait for emptiness */
+ for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
+ result = ipa3_is_xdci_channel_empty(ep, &is_empty);
+ if (result)
+ goto exit;
+ if (is_empty)
+ break;
+ udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
+ }
+ /* In case of empty, lets try to stop the channel again */
+ if (is_empty) {
+ result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl,
+ &stop_in_proc);
+ if (result) {
+ IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n",
+ clnt_hdl, ep->client);
+ goto exit;
+ }
+ if (!stop_in_proc)
+ goto exit;
+ }
+ /* if still stop_in_proc or not empty, activate force clear */
+ if (should_force_clear) {
+ result = ipa3_enable_force_clear(qmi_req_id, false,
+ source_pipe_bitmask);
+ if (result)
+ goto exit;
+ }
+ /* with force clear, wait for emptiness */
+ for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
+ result = ipa3_is_xdci_channel_empty(ep, &is_empty);
+ if (result)
+ goto disable_force_clear_and_exit;
+ if (is_empty)
+ break;
+
+ udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
+ }
+ /* try to stop for the last time */
+ result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl,
+ &stop_in_proc);
+ if (result) {
+ IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n",
+ clnt_hdl, ep->client);
+ goto disable_force_clear_and_exit;
+ }
+ result = stop_in_proc ? -EFAULT : 0;
+
+disable_force_clear_and_exit:
+ if (should_force_clear)
+ ipa3_disable_force_clear(qmi_req_id);
+exit:
+ return result;
+}
+
+int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id)
+{
+ struct ipa3_ep_context *ep;
+ int result;
+ u32 source_pipe_bitmask = 0;
+
+ IPADBG("entry\n");
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("Bad parameter.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ ipa3_disable_data_path(clnt_hdl);
+
+ if (!IPA_CLIENT_IS_CONS(ep->client)) {
+ IPADBG("Stopping PROD channel - hdl=%d clnt=%d\n",
+ clnt_hdl, ep->client);
+ source_pipe_bitmask = 1 <<
+ ipa3_get_ep_mapping(ep->client);
+ result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id,
+ source_pipe_bitmask, should_force_clear, clnt_hdl);
+ if (result) {
+ IPAERR("Fail to stop UL channel with data drain\n");
+ WARN_ON(1);
+ goto stop_chan_fail;
+ }
+ } else {
+ IPADBG("Stopping CONS channel - hdl=%d clnt=%d\n",
+ clnt_hdl, ep->client);
+ result = ipa3_stop_gsi_channel(clnt_hdl);
+ if (result) {
+ IPAERR("Error stopping channel (CONS client): %d\n",
+ result);
+ goto stop_chan_fail;
+ }
+ }
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ IPADBG("exit\n");
+ return 0;
+
+stop_chan_fail:
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+ return result;
+}
+
+int ipa3_release_gsi_channel(u32 clnt_hdl)
+{
+ struct ipa3_ep_context *ep;
+ int result = -EFAULT;
+ enum gsi_status gsi_res;
+
+ IPADBG("entry\n");
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("Bad parameter.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ gsi_res = gsi_dealloc_channel(ep->gsi_chan_hdl);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error deallocating channel: %d\n", gsi_res);
+ goto dealloc_chan_fail;
+ }
+
+ gsi_res = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error deallocating event: %d\n", gsi_res);
+ goto dealloc_chan_fail;
+ }
+
+ if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client))
+ ipa3_delete_dflt_flt_rules(clnt_hdl);
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context));
+
+ IPADBG("exit\n");
+ return 0;
+
+dealloc_chan_fail:
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+ return result;
+}
+
+int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ bool should_force_clear, u32 qmi_req_id, bool is_dpl)
+{
+ struct ipa3_ep_context *ul_ep, *dl_ep;
+ int result = -EFAULT;
+ u32 source_pipe_bitmask = 0;
+ bool dl_data_pending = true;
+ bool ul_data_pending = true;
+ int i;
+ bool is_empty = false;
+ struct gsi_chan_info ul_gsi_chan_info, dl_gsi_chan_info;
+ int aggr_active_bitmap = 0;
+ struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+ /* In case of DPL, dl is the DPL channel/client */
+
+ IPADBG("entry\n");
+ if (dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[dl_clnt_hdl].valid == 0 ||
+ (!is_dpl && (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[ul_clnt_hdl].valid == 0))) {
+ IPAERR("Bad parameter.\n");
+ return -EINVAL;
+ }
+
+ dl_ep = &ipa3_ctx->ep[dl_clnt_hdl];
+ if (!is_dpl)
+ ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
+
+ result = ipa3_get_gsi_chan_info(&dl_gsi_chan_info,
+ dl_ep->gsi_chan_hdl);
+ if (result)
+ goto disable_clk_and_exit;
+
+ if (!is_dpl) {
+ result = ipa3_get_gsi_chan_info(&ul_gsi_chan_info,
+ ul_ep->gsi_chan_hdl);
+ if (result)
+ goto disable_clk_and_exit;
+ }
+
+ for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
+ if (!dl_data_pending && !ul_data_pending)
+ break;
+ result = ipa3_is_xdci_channel_empty(dl_ep, &is_empty);
+ if (result)
+ goto disable_clk_and_exit;
+ if (!is_empty) {
+ dl_data_pending = true;
+ break;
+ }
+ dl_data_pending = false;
+ if (!is_dpl) {
+ result = ipa3_is_xdci_channel_empty(ul_ep, &is_empty);
+ if (result)
+ goto disable_clk_and_exit;
+ ul_data_pending = !is_empty;
+ } else {
+ ul_data_pending = false;
+ }
+
+ udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
+ }
+
+ if (!dl_data_pending) {
+ aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+ if (aggr_active_bitmap & (1 << dl_clnt_hdl)) {
+ IPADBG("DL/DPL data pending due to open aggr. frame\n");
+ dl_data_pending = true;
+ }
+ }
+ if (dl_data_pending) {
+ IPAERR("DL/DPL data pending, can't suspend\n");
+ result = -EFAULT;
+ goto disable_clk_and_exit;
+ }
+
+ /* Suspend the DL/DPL EP */
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_suspend = true;
+ ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+
+ /*
+ * Check if DL/DPL channel is empty again, data could enter the channel
+ * before its IPA EP was suspended
+ */
+ result = ipa3_is_xdci_channel_empty(dl_ep, &is_empty);
+ if (result)
+ goto unsuspend_dl_and_exit;
+ if (!is_empty) {
+ IPAERR("DL/DPL data pending, can't suspend\n");
+ result = -EFAULT;
+ goto unsuspend_dl_and_exit;
+ }
+
+ /* STOP UL channel */
+ if (!is_dpl) {
+ source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ul_ep->client);
+ result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id,
+ source_pipe_bitmask, should_force_clear, ul_clnt_hdl);
+ if (result) {
+ IPAERR("Error stopping UL channel: result = %d\n",
+ result);
+ goto unsuspend_dl_and_exit;
+ }
+ }
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
+
+ IPADBG("exit\n");
+ return 0;
+
+unsuspend_dl_and_exit:
+ /* Unsuspend the DL EP */
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_suspend = false;
+ ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+disable_clk_and_exit:
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
+ return result;
+}
+
+int ipa3_start_gsi_channel(u32 clnt_hdl)
+{
+ struct ipa3_ep_context *ep;
+ int result = -EFAULT;
+ enum gsi_status gsi_res;
+
+ IPADBG("entry\n");
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("Bad parameters.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error starting channel: %d\n", gsi_res);
+ goto start_chan_fail;
+ }
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ IPADBG("exit\n");
+ return 0;
+
+start_chan_fail:
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+ return result;
+}
+
+int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl)
+{
+ struct ipa3_ep_context *ul_ep, *dl_ep;
+ enum gsi_status gsi_res;
+ struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+ /* In case of DPL, dl is the DPL channel/client */
+
+ IPADBG("entry\n");
+ if (dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[dl_clnt_hdl].valid == 0 ||
+ (!is_dpl && (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[ul_clnt_hdl].valid == 0))) {
+ IPAERR("Bad parameter.\n");
+ return -EINVAL;
+ }
+
+ dl_ep = &ipa3_ctx->ep[dl_clnt_hdl];
+ if (!is_dpl)
+ ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
+
+ /* Unsuspend the DL/DPL EP */
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_suspend = false;
+ ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+
+ /* Start UL channel */
+ if (!is_dpl) {
+ gsi_res = gsi_start_channel(ul_ep->gsi_chan_hdl);
+ if (gsi_res != GSI_STATUS_SUCCESS)
+ IPAERR("Error starting UL channel: %d\n", gsi_res);
+ }
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
+
+ IPADBG("exit\n");
+ return 0;
+}
+/**
+ * ipa3_clear_endpoint_delay() - Remove ep delay set on the IPA pipe before
+ * client disconnect.
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to remove
+ * ep delay on IPA consumer ipe before disconnect in BAM-BAM mode. this api
+ * expects caller to take responsibility to free any needed headers, routing
+ * and filtering tables and rules as needed.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_clear_endpoint_delay(u32 clnt_hdl)
+{
+ struct ipa3_ep_context *ep;
+ struct ipa_ep_cfg_ctrl ep_ctrl = {0};
+ struct ipa_enable_force_clear_datapath_req_msg_v01 req = {0};
+ int res;
+
+ if (unlikely(!ipa3_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ if (!ipa3_ctx->tethered_flow_control) {
+ IPADBG("APPS flow control is not enabled\n");
+ /* Send a message to modem to disable flow control honoring. */
+ req.request_id = clnt_hdl;
+ req.source_pipe_bitmask = 1 << clnt_hdl;
+ res = ipa3_qmi_enable_force_clear_datapath_send(&req);
+ if (res) {
+ IPADBG("enable_force_clear_datapath failed %d\n",
+ res);
+ }
+ ep->qmi_request_sent = true;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+ /* Set disconnect in progress flag so further flow control events are
+ * not honored.
+ */
+ spin_lock(&ipa3_ctx->disconnect_lock);
+ ep->disconnect_in_progress = true;
+ spin_unlock(&ipa3_ctx->disconnect_lock);
+
+ /* If flow is disabled at this point, restore the ep state.*/
+ ep_ctrl.ipa_ep_delay = false;
+ ep_ctrl.ipa_ep_suspend = false;
+ ipa3_cfg_ep_ctrl(clnt_hdl, &ep_ctrl);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ IPADBG("client (ep: %d) removed ep delay\n", clnt_hdl);
+
+ return 0;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
new file mode 100644
index 0000000..2368797
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -0,0 +1,2143 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/stringify.h>
+#include "ipa_i.h"
+#include "../ipa_rm_i.h"
+
+#define IPA_MAX_MSG_LEN 4096
+#define IPA_DBG_MAX_RULE_IN_TBL 128
+#define IPA_DBG_ACTIVE_CLIENT_BUF_SIZE ((IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN \
+ * IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) + IPA_MAX_MSG_LEN)
+
+#define IPA_DUMP_STATUS_FIELD(f) \
+ pr_err(#f "=0x%x\n", status->f)
+
+const char *ipa3_excp_name[] = {
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD0),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD1),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IHL),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_TAG),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_NAT),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IP),
+};
+
+const char *ipa3_event_name[] = {
+ __stringify(WLAN_CLIENT_CONNECT),
+ __stringify(WLAN_CLIENT_DISCONNECT),
+ __stringify(WLAN_CLIENT_POWER_SAVE_MODE),
+ __stringify(WLAN_CLIENT_NORMAL_MODE),
+ __stringify(SW_ROUTING_ENABLE),
+ __stringify(SW_ROUTING_DISABLE),
+ __stringify(WLAN_AP_CONNECT),
+ __stringify(WLAN_AP_DISCONNECT),
+ __stringify(WLAN_STA_CONNECT),
+ __stringify(WLAN_STA_DISCONNECT),
+ __stringify(WLAN_CLIENT_CONNECT_EX),
+ __stringify(WLAN_SWITCH_TO_SCC),
+ __stringify(WLAN_SWITCH_TO_MCC),
+ __stringify(WLAN_WDI_ENABLE),
+ __stringify(WLAN_WDI_DISABLE),
+ __stringify(WAN_UPSTREAM_ROUTE_ADD),
+ __stringify(WAN_UPSTREAM_ROUTE_DEL),
+ __stringify(WAN_EMBMS_CONNECT),
+ __stringify(WAN_XLAT_CONNECT),
+ __stringify(ECM_CONNECT),
+ __stringify(ECM_DISCONNECT),
+ __stringify(IPA_TETHERING_STATS_UPDATE_STATS),
+ __stringify(IPA_TETHERING_STATS_UPDATE_NETWORK_STATS),
+};
+
+const char *ipa3_hdr_l2_type_name[] = {
+ __stringify(IPA_HDR_L2_NONE),
+ __stringify(IPA_HDR_L2_ETHERNET_II),
+ __stringify(IPA_HDR_L2_802_3),
+};
+
+const char *ipa3_hdr_proc_type_name[] = {
+ __stringify(IPA_HDR_PROC_NONE),
+ __stringify(IPA_HDR_PROC_ETHII_TO_ETHII),
+ __stringify(IPA_HDR_PROC_ETHII_TO_802_3),
+ __stringify(IPA_HDR_PROC_802_3_TO_ETHII),
+ __stringify(IPA_HDR_PROC_802_3_TO_802_3),
+};
+
+static struct dentry *dent;
+static struct dentry *dfile_gen_reg;
+static struct dentry *dfile_ep_reg;
+static struct dentry *dfile_keep_awake;
+static struct dentry *dfile_ep_holb;
+static struct dentry *dfile_hdr;
+static struct dentry *dfile_proc_ctx;
+static struct dentry *dfile_ip4_rt;
+static struct dentry *dfile_ip4_rt_hw;
+static struct dentry *dfile_ip6_rt;
+static struct dentry *dfile_ip6_rt_hw;
+static struct dentry *dfile_ip4_flt;
+static struct dentry *dfile_ip4_flt_hw;
+static struct dentry *dfile_ip6_flt;
+static struct dentry *dfile_ip6_flt_hw;
+static struct dentry *dfile_stats;
+static struct dentry *dfile_wstats;
+static struct dentry *dfile_wdi_stats;
+static struct dentry *dfile_ntn_stats;
+static struct dentry *dfile_dbg_cnt;
+static struct dentry *dfile_msg;
+static struct dentry *dfile_ip4_nat;
+static struct dentry *dfile_rm_stats;
+static struct dentry *dfile_status_stats;
+static struct dentry *dfile_active_clients;
+static char dbg_buff[IPA_MAX_MSG_LEN];
+static char *active_clients_buf;
+
+static s8 ep_reg_idx;
+
+
+static ssize_t ipa3_read_gen_reg(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes;
+ struct ipahal_reg_shared_mem_size smem_sz;
+
+ memset(&smem_sz, 0, sizeof(smem_sz));
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ ipahal_read_reg_fields(IPA_SHARED_MEM_SIZE, &smem_sz);
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "IPA_VERSION=0x%x\n"
+ "IPA_COMP_HW_VERSION=0x%x\n"
+ "IPA_ROUTE=0x%x\n"
+ "IPA_SHARED_MEM_RESTRICTED=0x%x\n"
+ "IPA_SHARED_MEM_SIZE=0x%x\n",
+ ipahal_read_reg(IPA_VERSION),
+ ipahal_read_reg(IPA_COMP_HW_VERSION),
+ ipahal_read_reg(IPA_ROUTE),
+ smem_sz.shared_mem_baddr,
+ smem_sz.shared_mem_sz);
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_write_ep_holb(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct ipa_ep_cfg_holb holb;
+ u32 en;
+ u32 tmr_val;
+ u32 ep_idx;
+ unsigned long missing;
+ char *sptr, *token;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, buf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+
+ sptr = dbg_buff;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &ep_idx))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &en))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &tmr_val))
+ return -EINVAL;
+
+ holb.en = en;
+ holb.tmr_val = tmr_val;
+
+ ipa3_cfg_ep_holb(ep_idx, &holb);
+
+ return count;
+}
+
+static ssize_t ipa3_write_ep_reg(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ s8 option = 0;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, buf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+ if (kstrtos8(dbg_buff, 0, &option))
+ return -EFAULT;
+
+ if (option >= ipa3_ctx->ipa_num_pipes) {
+ IPAERR("bad pipe specified %u\n", option);
+ return count;
+ }
+
+ ep_reg_idx = option;
+
+ return count;
+}
+
+/**
+ * _ipa_read_ep_reg_v3_0() - Reads and prints endpoint configuration registers
+ *
+ * Returns the number of characters printed
+ */
+int _ipa_read_ep_reg_v3_0(char *buf, int max_len, int pipe)
+{
+ return scnprintf(
+ dbg_buff, IPA_MAX_MSG_LEN,
+ "IPA_ENDP_INIT_NAT_%u=0x%x\n"
+ "IPA_ENDP_INIT_HDR_%u=0x%x\n"
+ "IPA_ENDP_INIT_HDR_EXT_%u=0x%x\n"
+ "IPA_ENDP_INIT_MODE_%u=0x%x\n"
+ "IPA_ENDP_INIT_AGGR_%u=0x%x\n"
+ "IPA_ENDP_INIT_ROUTE_%u=0x%x\n"
+ "IPA_ENDP_INIT_CTRL_%u=0x%x\n"
+ "IPA_ENDP_INIT_HOL_EN_%u=0x%x\n"
+ "IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n"
+ "IPA_ENDP_INIT_DEAGGR_%u=0x%x\n"
+ "IPA_ENDP_INIT_CFG_%u=0x%x\n",
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_NAT_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_EXT_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_MODE_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_AGGR_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_ROUTE_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CTRL_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_EN_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_DEAGGR_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CFG_n, pipe));
+}
+
+static ssize_t ipa3_read_ep_reg(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes;
+ int i;
+ int start_idx;
+ int end_idx;
+ int size = 0;
+ int ret;
+ loff_t pos;
+
+ /* negative ep_reg_idx means all registers */
+ if (ep_reg_idx < 0) {
+ start_idx = 0;
+ end_idx = ipa3_ctx->ipa_num_pipes;
+ } else {
+ start_idx = ep_reg_idx;
+ end_idx = start_idx + 1;
+ }
+ pos = *ppos;
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ for (i = start_idx; i < end_idx; i++) {
+
+ nbytes = ipa3_ctx->ctrl->ipa3_read_ep_reg(dbg_buff,
+ IPA_MAX_MSG_LEN, i);
+
+ *ppos = pos;
+ ret = simple_read_from_buffer(ubuf, count, ppos, dbg_buff,
+ nbytes);
+ if (ret < 0) {
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return ret;
+ }
+
+ size += ret;
+ ubuf += nbytes;
+ count -= nbytes;
+ }
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ *ppos = pos + size;
+ return size;
+}
+
+static ssize_t ipa3_write_keep_awake(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ s8 option = 0;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, buf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+ if (kstrtos8(dbg_buff, 0, &option))
+ return -EFAULT;
+
+ if (option == 1)
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ else if (option == 0)
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ else
+ return -EFAULT;
+
+ return count;
+}
+
+static ssize_t ipa3_read_keep_awake(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes;
+
+ ipa3_active_clients_lock();
+ if (ipa3_ctx->ipa3_active_clients.cnt)
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "IPA APPS power state is ON\n");
+ else
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "IPA APPS power state is OFF\n");
+ ipa3_active_clients_unlock();
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_read_hdr(struct file *file, char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ int nbytes = 0;
+ int i = 0;
+ struct ipa3_hdr_entry *entry;
+
+ mutex_lock(&ipa3_ctx->lock);
+
+ if (ipa3_ctx->hdr_tbl_lcl)
+ pr_err("Table resides on local memory\n");
+ else
+ pr_err("Table resides on system (ddr) memory\n");
+
+ list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
+ link) {
+ nbytes = scnprintf(
+ dbg_buff,
+ IPA_MAX_MSG_LEN,
+ "name:%s len=%d ref=%d partial=%d type=%s ",
+ entry->name,
+ entry->hdr_len,
+ entry->ref_cnt,
+ entry->is_partial,
+ ipa3_hdr_l2_type_name[entry->type]);
+
+ if (entry->is_hdr_proc_ctx) {
+ nbytes += scnprintf(
+ dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "phys_base=0x%pa ",
+ &entry->phys_base);
+ } else {
+ nbytes += scnprintf(
+ dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "ofst=%u ",
+ entry->offset_entry->offset >> 2);
+ }
+ for (i = 0; i < entry->hdr_len; i++) {
+ scnprintf(dbg_buff + nbytes + i * 2,
+ IPA_MAX_MSG_LEN - nbytes - i * 2,
+ "%02x", entry->hdr[i]);
+ }
+ scnprintf(dbg_buff + nbytes + entry->hdr_len * 2,
+ IPA_MAX_MSG_LEN - nbytes - entry->hdr_len * 2,
+ "\n");
+ pr_err("%s", dbg_buff);
+ }
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return 0;
+}
+
+static int ipa3_attrib_dump(struct ipa_rule_attrib *attrib,
+ enum ipa_ip_type ip)
+{
+ uint32_t addr[4];
+ uint32_t mask[4];
+ int i;
+
+ if (attrib->attrib_mask & IPA_FLT_TOS_MASKED)
+ pr_err("tos_value:%d ", attrib->tos_value);
+
+ if (attrib->attrib_mask & IPA_FLT_TOS_MASKED)
+ pr_err("tos_mask:%d ", attrib->tos_mask);
+
+ if (attrib->attrib_mask & IPA_FLT_PROTOCOL)
+ pr_err("protocol:%d ", attrib->u.v4.protocol);
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (ip == IPA_IP_v4) {
+ addr[0] = htonl(attrib->u.v4.src_addr);
+ mask[0] = htonl(attrib->u.v4.src_addr_mask);
+ pr_err(
+ "src_addr:%pI4 src_addr_mask:%pI4 ",
+ addr + 0, mask + 0);
+ } else if (ip == IPA_IP_v6) {
+ for (i = 0; i < 4; i++) {
+ addr[i] = htonl(attrib->u.v6.src_addr[i]);
+ mask[i] = htonl(attrib->u.v6.src_addr_mask[i]);
+ }
+ pr_err(
+ "src_addr:%pI6 src_addr_mask:%pI6 ",
+ addr + 0, mask + 0);
+ } else {
+ WARN_ON(1);
+ }
+ }
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (ip == IPA_IP_v4) {
+ addr[0] = htonl(attrib->u.v4.dst_addr);
+ mask[0] = htonl(attrib->u.v4.dst_addr_mask);
+ pr_err(
+ "dst_addr:%pI4 dst_addr_mask:%pI4 ",
+ addr + 0, mask + 0);
+ } else if (ip == IPA_IP_v6) {
+ for (i = 0; i < 4; i++) {
+ addr[i] = htonl(attrib->u.v6.dst_addr[i]);
+ mask[i] = htonl(attrib->u.v6.dst_addr_mask[i]);
+ }
+ pr_err(
+ "dst_addr:%pI6 dst_addr_mask:%pI6 ",
+ addr + 0, mask + 0);
+ } else {
+ WARN_ON(1);
+ }
+ }
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ pr_err("src_port_range:%u %u ",
+ attrib->src_port_lo,
+ attrib->src_port_hi);
+ }
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ pr_err("dst_port_range:%u %u ",
+ attrib->dst_port_lo,
+ attrib->dst_port_hi);
+ }
+ if (attrib->attrib_mask & IPA_FLT_TYPE)
+ pr_err("type:%d ", attrib->type);
+
+ if (attrib->attrib_mask & IPA_FLT_CODE)
+ pr_err("code:%d ", attrib->code);
+
+ if (attrib->attrib_mask & IPA_FLT_SPI)
+ pr_err("spi:%x ", attrib->spi);
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT)
+ pr_err("src_port:%u ", attrib->src_port);
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT)
+ pr_err("dst_port:%u ", attrib->dst_port);
+
+ if (attrib->attrib_mask & IPA_FLT_TC)
+ pr_err("tc:%d ", attrib->u.v6.tc);
+
+ if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL)
+ pr_err("flow_label:%x ", attrib->u.v6.flow_label);
+
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR)
+ pr_err("next_hdr:%d ", attrib->u.v6.next_hdr);
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ pr_err(
+ "metadata:%x metadata_mask:%x ",
+ attrib->meta_data, attrib->meta_data_mask);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
+ pr_err("frg ");
+
+ if ((attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) ||
+ (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3)) {
+ pr_err("src_mac_addr:%pM ", attrib->src_mac_addr);
+ }
+
+ if ((attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) ||
+ (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3)) {
+ pr_err("dst_mac_addr:%pM ", attrib->dst_mac_addr);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE)
+ pr_err("ether_type:%x ", attrib->ether_type);
+
+ pr_err("\n");
+ return 0;
+}
+
+static int ipa3_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
+{
+ uint8_t addr[16];
+ uint8_t mask[16];
+ int i;
+ int j;
+
+ if (attrib->tos_eq_present)
+ pr_err("tos_value:%d ", attrib->tos_eq);
+
+ if (attrib->protocol_eq_present)
+ pr_err("protocol:%d ", attrib->protocol_eq);
+
+ if (attrib->tc_eq_present)
+ pr_err("tc:%d ", attrib->tc_eq);
+
+ for (i = 0; i < attrib->num_offset_meq_128; i++) {
+ for (j = 0; j < 16; j++) {
+ addr[j] = attrib->offset_meq_128[i].value[j];
+ mask[j] = attrib->offset_meq_128[i].mask[j];
+ }
+ pr_err(
+ "(ofst_meq128: ofst:%d mask:%pI6 val:%pI6) ",
+ attrib->offset_meq_128[i].offset,
+ mask, addr);
+ }
+
+ for (i = 0; i < attrib->num_offset_meq_32; i++)
+ pr_err(
+ "(ofst_meq32: ofst:%u mask:0x%x val:0x%x) ",
+ attrib->offset_meq_32[i].offset,
+ attrib->offset_meq_32[i].mask,
+ attrib->offset_meq_32[i].value);
+
+ for (i = 0; i < attrib->num_ihl_offset_meq_32; i++)
+ pr_err(
+ "(ihl_ofst_meq32: ofts:%d mask:0x%x val:0x%x) ",
+ attrib->ihl_offset_meq_32[i].offset,
+ attrib->ihl_offset_meq_32[i].mask,
+ attrib->ihl_offset_meq_32[i].value);
+
+ if (attrib->metadata_meq32_present)
+ pr_err(
+ "(metadata: ofst:%u mask:0x%x val:0x%x) ",
+ attrib->metadata_meq32.offset,
+ attrib->metadata_meq32.mask,
+ attrib->metadata_meq32.value);
+
+ for (i = 0; i < attrib->num_ihl_offset_range_16; i++)
+ pr_err(
+ "(ihl_ofst_range16: ofst:%u lo:%u hi:%u) ",
+ attrib->ihl_offset_range_16[i].offset,
+ attrib->ihl_offset_range_16[i].range_low,
+ attrib->ihl_offset_range_16[i].range_high);
+
+ if (attrib->ihl_offset_eq_32_present)
+ pr_err(
+ "(ihl_ofst_eq32:%d val:0x%x) ",
+ attrib->ihl_offset_eq_32.offset,
+ attrib->ihl_offset_eq_32.value);
+
+ if (attrib->ihl_offset_eq_16_present)
+ pr_err(
+ "(ihl_ofst_eq16:%d val:0x%x) ",
+ attrib->ihl_offset_eq_16.offset,
+ attrib->ihl_offset_eq_16.value);
+
+ if (attrib->fl_eq_present)
+ pr_err("flow_label:%d ", attrib->fl_eq);
+
+ if (attrib->ipv4_frag_eq_present)
+ pr_err("frag ");
+
+ pr_err("\n");
+ return 0;
+}
+
+static int ipa3_open_dbg(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t ipa3_read_rt(struct file *file, char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ int i = 0;
+ struct ipa3_rt_tbl *tbl;
+ struct ipa3_rt_entry *entry;
+ struct ipa3_rt_tbl_set *set;
+ enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+ u32 ofst;
+ u32 ofst_words;
+
+ set = &ipa3_ctx->rt_tbl_set[ip];
+
+ mutex_lock(&ipa3_ctx->lock);
+
+ if (ip == IPA_IP_v6) {
+ if (ipa3_ctx->ip6_rt_tbl_hash_lcl)
+ pr_err("Hashable table resides on local memory\n");
+ else
+ pr_err("Hashable table resides on system (ddr) memory\n");
+ if (ipa3_ctx->ip6_rt_tbl_nhash_lcl)
+ pr_err("Non-Hashable table resides on local memory\n");
+ else
+ pr_err("Non-Hashable table resides on system (ddr) memory\n");
+ } else if (ip == IPA_IP_v4) {
+ if (ipa3_ctx->ip4_rt_tbl_hash_lcl)
+ pr_err("Hashable table resides on local memory\n");
+ else
+ pr_err("Hashable table resides on system (ddr) memory\n");
+ if (ipa3_ctx->ip4_rt_tbl_nhash_lcl)
+ pr_err("Non-Hashable table resides on local memory\n");
+ else
+ pr_err("Non-Hashable table resides on system (ddr) memory\n");
+ }
+
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ i = 0;
+ list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+ if (entry->proc_ctx) {
+ ofst = entry->proc_ctx->offset_entry->offset;
+ ofst_words =
+ (ofst +
+ ipa3_ctx->hdr_proc_ctx_tbl.start_offset)
+ >> 5;
+
+ pr_err("tbl_idx:%d tbl_name:%s tbl_ref:%u ",
+ entry->tbl->idx, entry->tbl->name,
+ entry->tbl->ref_cnt);
+ pr_err("rule_idx:%d dst:%d ep:%d S:%u ",
+ i, entry->rule.dst,
+ ipa3_get_ep_mapping(entry->rule.dst),
+ !ipa3_ctx->hdr_proc_ctx_tbl_lcl);
+ pr_err("proc_ctx[32B]:%u attrib_mask:%08x ",
+ ofst_words,
+ entry->rule.attrib.attrib_mask);
+ pr_err("rule_id:%u max_prio:%u prio:%u ",
+ entry->rule_id, entry->rule.max_prio,
+ entry->prio);
+ pr_err("hashable:%u retain_hdr:%u ",
+ entry->rule.hashable,
+ entry->rule.retain_hdr);
+ } else {
+ if (entry->hdr)
+ ofst = entry->hdr->offset_entry->offset;
+ else
+ ofst = 0;
+
+ pr_err("tbl_idx:%d tbl_name:%s tbl_ref:%u ",
+ entry->tbl->idx, entry->tbl->name,
+ entry->tbl->ref_cnt);
+ pr_err("rule_idx:%d dst:%d ep:%d S:%u ",
+ i, entry->rule.dst,
+ ipa3_get_ep_mapping(entry->rule.dst),
+ !ipa3_ctx->hdr_tbl_lcl);
+ pr_err("hdr_ofst[words]:%u attrib_mask:%08x ",
+ ofst >> 2,
+ entry->rule.attrib.attrib_mask);
+ pr_err("rule_id:%u max_prio:%u prio:%u ",
+ entry->rule_id, entry->rule.max_prio,
+ entry->prio);
+ pr_err("hashable:%u retain_hdr:%u ",
+ entry->rule.hashable,
+ entry->rule.retain_hdr);
+ }
+
+ ipa3_attrib_dump(&entry->rule.attrib, ip);
+ i++;
+ }
+ }
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return 0;
+}
+
+static ssize_t ipa3_read_rt_hw(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+ int tbls_num;
+ int rules_num;
+ int tbl;
+ int rl;
+ int res = 0;
+ struct ipahal_rt_rule_entry *rules = NULL;
+
+ switch (ip) {
+ case IPA_IP_v4:
+ tbls_num = IPA_MEM_PART(v4_rt_num_index);
+ break;
+ case IPA_IP_v6:
+ tbls_num = IPA_MEM_PART(v6_rt_num_index);
+ break;
+ default:
+ IPAERR("ip type error %d\n", ip);
+ return -EINVAL;
+ };
+
+ IPADBG("Tring to parse %d H/W routing tables - IP=%d\n", tbls_num, ip);
+
+ rules = kzalloc(sizeof(*rules) * IPA_DBG_MAX_RULE_IN_TBL, GFP_KERNEL);
+ if (!rules) {
+ IPAERR("failed to allocate mem for tbl rules\n");
+ return -ENOMEM;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ mutex_lock(&ipa3_ctx->lock);
+
+ for (tbl = 0 ; tbl < tbls_num ; tbl++) {
+ pr_err("=== Routing Table %d = Hashable Rules ===\n", tbl);
+ rules_num = IPA_DBG_MAX_RULE_IN_TBL;
+ res = ipa3_rt_read_tbl_from_hw(tbl, ip, true, rules,
+ &rules_num);
+ if (res) {
+ pr_err("ERROR - Check the logs\n");
+ IPAERR("failed reading tbl from hw\n");
+ goto bail;
+ }
+ if (!rules_num)
+ pr_err("-->No rules. Empty tbl or modem system table\n");
+
+ for (rl = 0 ; rl < rules_num ; rl++) {
+ pr_err("rule_idx:%d dst ep:%d L:%u ",
+ rl, rules[rl].dst_pipe_idx, rules[rl].hdr_lcl);
+
+ if (rules[rl].hdr_type == IPAHAL_RT_RULE_HDR_PROC_CTX)
+ pr_err("proc_ctx:%u attrib_mask:%08x ",
+ rules[rl].hdr_ofst,
+ rules[rl].eq_attrib.rule_eq_bitmap);
+ else
+ pr_err("hdr_ofst:%u attrib_mask:%08x ",
+ rules[rl].hdr_ofst,
+ rules[rl].eq_attrib.rule_eq_bitmap);
+
+ pr_err("rule_id:%u prio:%u retain_hdr:%u ",
+ rules[rl].id, rules[rl].priority,
+ rules[rl].retain_hdr);
+ ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
+ }
+
+ pr_err("=== Routing Table %d = Non-Hashable Rules ===\n", tbl);
+ rules_num = IPA_DBG_MAX_RULE_IN_TBL;
+ res = ipa3_rt_read_tbl_from_hw(tbl, ip, false, rules,
+ &rules_num);
+ if (res) {
+ pr_err("ERROR - Check the logs\n");
+ IPAERR("failed reading tbl from hw\n");
+ goto bail;
+ }
+ if (!rules_num)
+ pr_err("-->No rules. Empty tbl or modem system table\n");
+
+ for (rl = 0 ; rl < rules_num ; rl++) {
+ pr_err("rule_idx:%d dst ep:%d L:%u ",
+ rl, rules[rl].dst_pipe_idx, rules[rl].hdr_lcl);
+
+ if (rules[rl].hdr_type == IPAHAL_RT_RULE_HDR_PROC_CTX)
+ pr_err("proc_ctx:%u attrib_mask:%08x ",
+ rules[rl].hdr_ofst,
+ rules[rl].eq_attrib.rule_eq_bitmap);
+ else
+ pr_err("hdr_ofst:%u attrib_mask:%08x ",
+ rules[rl].hdr_ofst,
+ rules[rl].eq_attrib.rule_eq_bitmap);
+
+ pr_err("rule_id:%u prio:%u retain_hdr:%u\n",
+ rules[rl].id, rules[rl].priority,
+ rules[rl].retain_hdr);
+ ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
+ }
+ pr_err("\n");
+ }
+
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ kfree(rules);
+ return res;
+}
+
+static ssize_t ipa3_read_proc_ctx(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes = 0;
+ struct ipa3_hdr_proc_ctx_tbl *tbl;
+ struct ipa3_hdr_proc_ctx_entry *entry;
+ u32 ofst_words;
+
+ tbl = &ipa3_ctx->hdr_proc_ctx_tbl;
+
+ mutex_lock(&ipa3_ctx->lock);
+
+ if (ipa3_ctx->hdr_proc_ctx_tbl_lcl)
+ pr_info("Table resides on local memory\n");
+ else
+ pr_info("Table resides on system(ddr) memory\n");
+
+ list_for_each_entry(entry, &tbl->head_proc_ctx_entry_list, link) {
+ ofst_words = (entry->offset_entry->offset +
+ ipa3_ctx->hdr_proc_ctx_tbl.start_offset)
+ >> 5;
+ if (entry->hdr->is_hdr_proc_ctx) {
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "id:%u hdr_proc_type:%s proc_ctx[32B]:%u ",
+ entry->id,
+ ipa3_hdr_proc_type_name[entry->type],
+ ofst_words);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "hdr_phys_base:0x%pa\n",
+ &entry->hdr->phys_base);
+ } else {
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "id:%u hdr_proc_type:%s proc_ctx[32B]:%u ",
+ entry->id,
+ ipa3_hdr_proc_type_name[entry->type],
+ ofst_words);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "hdr[words]:%u\n",
+ entry->hdr->offset_entry->offset >> 2);
+ }
+ }
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ int i;
+ int j;
+ struct ipa3_flt_tbl *tbl;
+ struct ipa3_flt_entry *entry;
+ enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+ struct ipa3_rt_tbl *rt_tbl;
+ u32 rt_tbl_idx;
+ u32 bitmap;
+ bool eq;
+
+ mutex_lock(&ipa3_ctx->lock);
+
+ for (j = 0; j < ipa3_ctx->ipa_num_pipes; j++) {
+ if (!ipa_is_ep_support_flt(j))
+ continue;
+ tbl = &ipa3_ctx->flt_tbl[j][ip];
+ i = 0;
+ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ if (entry->rule.eq_attrib_type) {
+ rt_tbl_idx = entry->rule.rt_tbl_idx;
+ bitmap = entry->rule.eq_attrib.rule_eq_bitmap;
+ eq = true;
+ } else {
+ rt_tbl = ipa3_id_find(entry->rule.rt_tbl_hdl);
+ if (rt_tbl)
+ rt_tbl_idx = rt_tbl->idx;
+ else
+ rt_tbl_idx = ~0;
+ bitmap = entry->rule.attrib.attrib_mask;
+ eq = false;
+ }
+ pr_err("ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ",
+ j, i, entry->rule.action, rt_tbl_idx);
+ pr_err("attrib_mask:%08x retain_hdr:%d eq:%d ",
+ bitmap, entry->rule.retain_hdr, eq);
+ pr_err("hashable:%u rule_id:%u max_prio:%u prio:%u ",
+ entry->rule.hashable, entry->rule_id,
+ entry->rule.max_prio, entry->prio);
+ if (eq)
+ ipa3_attrib_dump_eq(
+ &entry->rule.eq_attrib);
+ else
+ ipa3_attrib_dump(
+ &entry->rule.attrib, ip);
+ i++;
+ }
+ }
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return 0;
+}
+
+static ssize_t ipa3_read_flt_hw(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int pipe;
+ int rl;
+ int rules_num;
+ struct ipahal_flt_rule_entry *rules;
+ enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+ u32 rt_tbl_idx;
+ u32 bitmap;
+ int res = 0;
+
+ IPADBG("Tring to parse %d H/W filtering tables - IP=%d\n",
+ ipa3_ctx->ep_flt_num, ip);
+
+ rules = kzalloc(sizeof(*rules) * IPA_DBG_MAX_RULE_IN_TBL, GFP_KERNEL);
+ if (!rules) {
+ IPAERR("failed to allocate mem for tbl rules\n");
+ return -ENOMEM;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ mutex_lock(&ipa3_ctx->lock);
+ for (pipe = 0; pipe < ipa3_ctx->ipa_num_pipes; pipe++) {
+ if (!ipa_is_ep_support_flt(pipe))
+ continue;
+ pr_err("=== Filtering Table ep:%d = Hashable Rules ===\n",
+ pipe);
+ rules_num = IPA_DBG_MAX_RULE_IN_TBL;
+ res = ipa3_flt_read_tbl_from_hw(pipe, ip, true, rules,
+ &rules_num);
+ if (res) {
+ pr_err("ERROR - Check the logs\n");
+ IPAERR("failed reading tbl from hw\n");
+ goto bail;
+ }
+ if (!rules_num)
+ pr_err("-->No rules. Empty tbl or modem sys table\n");
+
+ for (rl = 0; rl < rules_num; rl++) {
+ rt_tbl_idx = rules[rl].rule.rt_tbl_idx;
+ bitmap = rules[rl].rule.eq_attrib.rule_eq_bitmap;
+ pr_err("ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ",
+ pipe, rl, rules[rl].rule.action, rt_tbl_idx);
+ pr_err("attrib_mask:%08x retain_hdr:%d ",
+ bitmap, rules[rl].rule.retain_hdr);
+ pr_err("rule_id:%u prio:%u ",
+ rules[rl].id, rules[rl].priority);
+ ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
+ }
+
+ pr_err("=== Filtering Table ep:%d = Non-Hashable Rules ===\n",
+ pipe);
+ rules_num = IPA_DBG_MAX_RULE_IN_TBL;
+ res = ipa3_flt_read_tbl_from_hw(pipe, ip, false, rules,
+ &rules_num);
+ if (res) {
+ pr_err("ERROR - Check the logs\n");
+ IPAERR("failed reading tbl from hw\n");
+ goto bail;
+ }
+ if (!rules_num)
+ pr_err("-->No rules. Empty tbl or modem sys table\n");
+ for (rl = 0; rl < rules_num; rl++) {
+ rt_tbl_idx = rules[rl].rule.rt_tbl_idx;
+ bitmap = rules[rl].rule.eq_attrib.rule_eq_bitmap;
+ pr_err("ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ",
+ pipe, rl, rules[rl].rule.action, rt_tbl_idx);
+ pr_err("attrib_mask:%08x retain_hdr:%d ",
+ bitmap, rules[rl].rule.retain_hdr);
+ pr_err("rule_id:%u prio:%u ",
+ rules[rl].id, rules[rl].priority);
+ ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
+ }
+ pr_err("\n");
+ }
+
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+ kfree(rules);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return res;
+}
+
+static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes;
+ int i;
+ int cnt = 0;
+ uint connect = 0;
+
+ for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++)
+ connect |= (ipa3_ctx->ep[i].valid << i);
+
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "sw_tx=%u\n"
+ "hw_tx=%u\n"
+ "tx_non_linear=%u\n"
+ "tx_compl=%u\n"
+ "wan_rx=%u\n"
+ "stat_compl=%u\n"
+ "lan_aggr_close=%u\n"
+ "wan_aggr_close=%u\n"
+ "act_clnt=%u\n"
+ "con_clnt_bmap=0x%x\n"
+ "wan_rx_empty=%u\n"
+ "wan_repl_rx_empty=%u\n"
+ "lan_rx_empty=%u\n"
+ "lan_repl_rx_empty=%u\n"
+ "flow_enable=%u\n"
+ "flow_disable=%u\n",
+ ipa3_ctx->stats.tx_sw_pkts,
+ ipa3_ctx->stats.tx_hw_pkts,
+ ipa3_ctx->stats.tx_non_linear,
+ ipa3_ctx->stats.tx_pkts_compl,
+ ipa3_ctx->stats.rx_pkts,
+ ipa3_ctx->stats.stat_compl,
+ ipa3_ctx->stats.aggr_close,
+ ipa3_ctx->stats.wan_aggr_close,
+ ipa3_ctx->ipa3_active_clients.cnt,
+ connect,
+ ipa3_ctx->stats.wan_rx_empty,
+ ipa3_ctx->stats.wan_repl_rx_empty,
+ ipa3_ctx->stats.lan_rx_empty,
+ ipa3_ctx->stats.lan_repl_rx_empty,
+ ipa3_ctx->stats.flow_enable,
+ ipa3_ctx->stats.flow_disable);
+ cnt += nbytes;
+
+ for (i = 0; i < IPAHAL_PKT_STATUS_EXCEPTION_MAX; i++) {
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt,
+ "lan_rx_excp[%u:%20s]=%u\n", i,
+ ipahal_pkt_status_exception_str(i),
+ ipa3_ctx->stats.rx_excp_pkts[i]);
+ cnt += nbytes;
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_wstats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+
+#define HEAD_FRMT_STR "%25s\n"
+#define FRMT_STR "%25s %10u\n"
+#define FRMT_STR1 "%25s %10u\n\n"
+
+ int cnt = 0;
+ int nbytes;
+ int ipa_ep_idx;
+ enum ipa_client_type client = IPA_CLIENT_WLAN1_PROD;
+ struct ipa3_ep_context *ep;
+
+ do {
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ HEAD_FRMT_STR, "Client IPA_CLIENT_WLAN1_PROD Stats:");
+ cnt += nbytes;
+
+ ipa_ep_idx = ipa3_get_ep_mapping(client);
+ if (ipa_ep_idx == -1) {
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+ cnt += nbytes;
+ break;
+ }
+
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+ if (ep->valid != 1) {
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+ cnt += nbytes;
+ break;
+ }
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR, "Avail Fifo Desc:",
+ atomic_read(&ep->avail_fifo_desc));
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR, "Rx Pkts Rcvd:", ep->wstats.rx_pkts_rcvd);
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR, "Rx Pkts Status Rcvd:",
+ ep->wstats.rx_pkts_status_rcvd);
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR, "Rx DH Rcvd:", ep->wstats.rx_hd_rcvd);
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR, "Rx DH Processed:",
+ ep->wstats.rx_hd_processed);
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR, "Rx DH Sent Back:", ep->wstats.rx_hd_reply);
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR, "Rx Pkt Leak:", ep->wstats.rx_pkt_leak);
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR1, "Rx DP Fail:", ep->wstats.rx_dp_fail);
+ cnt += nbytes;
+
+ } while (0);
+
+ client = IPA_CLIENT_WLAN1_CONS;
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+ "Client IPA_CLIENT_WLAN1_CONS Stats:");
+ cnt += nbytes;
+ while (1) {
+ ipa_ep_idx = ipa3_get_ep_mapping(client);
+ if (ipa_ep_idx == -1) {
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+ cnt += nbytes;
+ goto nxt_clnt_cons;
+ }
+
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+ if (ep->valid != 1) {
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+ cnt += nbytes;
+ goto nxt_clnt_cons;
+ }
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR, "Tx Pkts Received:", ep->wstats.tx_pkts_rcvd);
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR, "Tx Pkts Sent:", ep->wstats.tx_pkts_sent);
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ FRMT_STR1, "Tx Pkts Dropped:",
+ ep->wstats.tx_pkts_dropped);
+ cnt += nbytes;
+
+nxt_clnt_cons:
+ switch (client) {
+ case IPA_CLIENT_WLAN1_CONS:
+ client = IPA_CLIENT_WLAN2_CONS;
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+ "Client IPA_CLIENT_WLAN2_CONS Stats:");
+ cnt += nbytes;
+ continue;
+ case IPA_CLIENT_WLAN2_CONS:
+ client = IPA_CLIENT_WLAN3_CONS;
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+ "Client IPA_CLIENT_WLAN3_CONS Stats:");
+ cnt += nbytes;
+ continue;
+ case IPA_CLIENT_WLAN3_CONS:
+ client = IPA_CLIENT_WLAN4_CONS;
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+ "Client IPA_CLIENT_WLAN4_CONS Stats:");
+ cnt += nbytes;
+ continue;
+ case IPA_CLIENT_WLAN4_CONS:
+ default:
+ break;
+ }
+ break;
+ }
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "\n"HEAD_FRMT_STR, "All Wlan Consumer pipes stats:");
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR,
+ "Tx Comm Buff Allocated:",
+ ipa3_ctx->wc_memb.wlan_comm_total_cnt);
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR,
+ "Tx Comm Buff Avail:", ipa3_ctx->wc_memb.wlan_comm_free_cnt);
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR1,
+ "Total Tx Pkts Freed:", ipa3_ctx->wc_memb.total_tx_pkts_freed);
+ cnt += nbytes;
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_ntn(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+#define TX_STATS(y) \
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) \
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+ struct Ipa3HwStatsNTNInfoData_t stats;
+ int nbytes;
+ int cnt = 0;
+
+ if (!ipa3_get_ntn_stats(&stats)) {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "TX num_pkts_processed=%u\n"
+ "TX tail_ptr_val=%u\n"
+ "TX num_db_fired=%u\n"
+ "TX ringFull=%u\n"
+ "TX ringEmpty=%u\n"
+ "TX ringUsageHigh=%u\n"
+ "TX ringUsageLow=%u\n"
+ "TX RingUtilCount=%u\n"
+ "TX bamFifoFull=%u\n"
+ "TX bamFifoEmpty=%u\n"
+ "TX bamFifoUsageHigh=%u\n"
+ "TX bamFifoUsageLow=%u\n"
+ "TX bamUtilCount=%u\n"
+ "TX num_db=%u\n"
+ "TX num_unexpected_db=%u\n"
+ "TX num_bam_int_handled=%u\n"
+ "TX num_bam_int_in_non_running_state=%u\n"
+ "TX num_qmb_int_handled=%u\n"
+ "TX num_bam_int_handled_while_wait_for_bam=%u\n"
+ "TX num_bam_int_handled_while_not_in_bam=%u\n",
+ TX_STATS(num_pkts_processed),
+ TX_STATS(tail_ptr_val),
+ TX_STATS(num_db_fired),
+ TX_STATS(tx_comp_ring_stats.ringFull),
+ TX_STATS(tx_comp_ring_stats.ringEmpty),
+ TX_STATS(tx_comp_ring_stats.ringUsageHigh),
+ TX_STATS(tx_comp_ring_stats.ringUsageLow),
+ TX_STATS(tx_comp_ring_stats.RingUtilCount),
+ TX_STATS(bam_stats.bamFifoFull),
+ TX_STATS(bam_stats.bamFifoEmpty),
+ TX_STATS(bam_stats.bamFifoUsageHigh),
+ TX_STATS(bam_stats.bamFifoUsageLow),
+ TX_STATS(bam_stats.bamUtilCount),
+ TX_STATS(num_db),
+ TX_STATS(num_unexpected_db),
+ TX_STATS(num_bam_int_handled),
+ TX_STATS(num_bam_int_in_non_running_state),
+ TX_STATS(num_qmb_int_handled),
+ TX_STATS(num_bam_int_handled_while_wait_for_bam),
+ TX_STATS(num_bam_int_handled_while_not_in_bam));
+ cnt += nbytes;
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "RX max_outstanding_pkts=%u\n"
+ "RX num_pkts_processed=%u\n"
+ "RX rx_ring_rp_value=%u\n"
+ "RX ringFull=%u\n"
+ "RX ringEmpty=%u\n"
+ "RX ringUsageHigh=%u\n"
+ "RX ringUsageLow=%u\n"
+ "RX RingUtilCount=%u\n"
+ "RX bamFifoFull=%u\n"
+ "RX bamFifoEmpty=%u\n"
+ "RX bamFifoUsageHigh=%u\n"
+ "RX bamFifoUsageLow=%u\n"
+ "RX bamUtilCount=%u\n"
+ "RX num_bam_int_handled=%u\n"
+ "RX num_db=%u\n"
+ "RX num_unexpected_db=%u\n"
+ "RX num_pkts_in_dis_uninit_state=%u\n"
+ "num_ic_inj_vdev_change=%u\n"
+ "num_ic_inj_fw_desc_change=%u\n",
+ RX_STATS(max_outstanding_pkts),
+ RX_STATS(num_pkts_processed),
+ RX_STATS(rx_ring_rp_value),
+ RX_STATS(rx_ind_ring_stats.ringFull),
+ RX_STATS(rx_ind_ring_stats.ringEmpty),
+ RX_STATS(rx_ind_ring_stats.ringUsageHigh),
+ RX_STATS(rx_ind_ring_stats.ringUsageLow),
+ RX_STATS(rx_ind_ring_stats.RingUtilCount),
+ RX_STATS(bam_stats.bamFifoFull),
+ RX_STATS(bam_stats.bamFifoEmpty),
+ RX_STATS(bam_stats.bamFifoUsageHigh),
+ RX_STATS(bam_stats.bamFifoUsageLow),
+ RX_STATS(bam_stats.bamUtilCount),
+ RX_STATS(num_bam_int_handled),
+ RX_STATS(num_db),
+ RX_STATS(num_unexpected_db),
+ RX_STATS(num_pkts_in_dis_uninit_state),
+ RX_STATS(num_bam_int_handled_while_not_in_bam),
+ RX_STATS(num_bam_int_handled_while_in_bam_state));
+ cnt += nbytes;
+ } else {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "Fail to read NTN stats\n");
+ cnt += nbytes;
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_wdi(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct IpaHwStatsWDIInfoData_t stats;
+ int nbytes;
+ int cnt = 0;
+
+ if (!ipa3_get_wdi_stats(&stats)) {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "TX num_pkts_processed=%u\n"
+ "TX copy_engine_doorbell_value=%u\n"
+ "TX num_db_fired=%u\n"
+ "TX ringFull=%u\n"
+ "TX ringEmpty=%u\n"
+ "TX ringUsageHigh=%u\n"
+ "TX ringUsageLow=%u\n"
+ "TX RingUtilCount=%u\n"
+ "TX bamFifoFull=%u\n"
+ "TX bamFifoEmpty=%u\n"
+ "TX bamFifoUsageHigh=%u\n"
+ "TX bamFifoUsageLow=%u\n"
+ "TX bamUtilCount=%u\n"
+ "TX num_db=%u\n"
+ "TX num_unexpected_db=%u\n"
+ "TX num_bam_int_handled=%u\n"
+ "TX num_bam_int_in_non_running_state=%u\n"
+ "TX num_qmb_int_handled=%u\n"
+ "TX num_bam_int_handled_while_wait_for_bam=%u\n",
+ stats.tx_ch_stats.num_pkts_processed,
+ stats.tx_ch_stats.copy_engine_doorbell_value,
+ stats.tx_ch_stats.num_db_fired,
+ stats.tx_ch_stats.tx_comp_ring_stats.ringFull,
+ stats.tx_ch_stats.tx_comp_ring_stats.ringEmpty,
+ stats.tx_ch_stats.tx_comp_ring_stats.ringUsageHigh,
+ stats.tx_ch_stats.tx_comp_ring_stats.ringUsageLow,
+ stats.tx_ch_stats.tx_comp_ring_stats.RingUtilCount,
+ stats.tx_ch_stats.bam_stats.bamFifoFull,
+ stats.tx_ch_stats.bam_stats.bamFifoEmpty,
+ stats.tx_ch_stats.bam_stats.bamFifoUsageHigh,
+ stats.tx_ch_stats.bam_stats.bamFifoUsageLow,
+ stats.tx_ch_stats.bam_stats.bamUtilCount,
+ stats.tx_ch_stats.num_db,
+ stats.tx_ch_stats.num_unexpected_db,
+ stats.tx_ch_stats.num_bam_int_handled,
+ stats.tx_ch_stats.num_bam_int_in_non_running_state,
+ stats.tx_ch_stats.num_qmb_int_handled,
+ stats.tx_ch_stats.
+ num_bam_int_handled_while_wait_for_bam);
+ cnt += nbytes;
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "RX max_outstanding_pkts=%u\n"
+ "RX num_pkts_processed=%u\n"
+ "RX rx_ring_rp_value=%u\n"
+ "RX ringFull=%u\n"
+ "RX ringEmpty=%u\n"
+ "RX ringUsageHigh=%u\n"
+ "RX ringUsageLow=%u\n"
+ "RX RingUtilCount=%u\n"
+ "RX bamFifoFull=%u\n"
+ "RX bamFifoEmpty=%u\n"
+ "RX bamFifoUsageHigh=%u\n"
+ "RX bamFifoUsageLow=%u\n"
+ "RX bamUtilCount=%u\n"
+ "RX num_bam_int_handled=%u\n"
+ "RX num_db=%u\n"
+ "RX num_unexpected_db=%u\n"
+ "RX num_pkts_in_dis_uninit_state=%u\n"
+ "num_ic_inj_vdev_change=%u\n"
+ "num_ic_inj_fw_desc_change=%u\n"
+ "RX reserved1=%u\n"
+ "RX reserved2=%u\n",
+ stats.rx_ch_stats.max_outstanding_pkts,
+ stats.rx_ch_stats.num_pkts_processed,
+ stats.rx_ch_stats.rx_ring_rp_value,
+ stats.rx_ch_stats.rx_ind_ring_stats.ringFull,
+ stats.rx_ch_stats.rx_ind_ring_stats.ringEmpty,
+ stats.rx_ch_stats.rx_ind_ring_stats.ringUsageHigh,
+ stats.rx_ch_stats.rx_ind_ring_stats.ringUsageLow,
+ stats.rx_ch_stats.rx_ind_ring_stats.RingUtilCount,
+ stats.rx_ch_stats.bam_stats.bamFifoFull,
+ stats.rx_ch_stats.bam_stats.bamFifoEmpty,
+ stats.rx_ch_stats.bam_stats.bamFifoUsageHigh,
+ stats.rx_ch_stats.bam_stats.bamFifoUsageLow,
+ stats.rx_ch_stats.bam_stats.bamUtilCount,
+ stats.rx_ch_stats.num_bam_int_handled,
+ stats.rx_ch_stats.num_db,
+ stats.rx_ch_stats.num_unexpected_db,
+ stats.rx_ch_stats.num_pkts_in_dis_uninit_state,
+ stats.rx_ch_stats.num_ic_inj_vdev_change,
+ stats.rx_ch_stats.num_ic_inj_fw_desc_change,
+ stats.rx_ch_stats.reserved1,
+ stats.rx_ch_stats.reserved2);
+ cnt += nbytes;
+ } else {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "Fail to read WDI stats\n");
+ cnt += nbytes;
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_write_dbg_cnt(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ u32 option = 0;
+ struct ipahal_reg_debug_cnt_ctrl dbg_cnt_ctrl;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, buf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+ if (kstrtou32(dbg_buff, 0, &option))
+ return -EFAULT;
+
+ memset(&dbg_cnt_ctrl, 0, sizeof(dbg_cnt_ctrl));
+ dbg_cnt_ctrl.type = DBG_CNT_TYPE_GENERAL;
+ dbg_cnt_ctrl.product = true;
+ dbg_cnt_ctrl.src_pipe = 0xff;
+ dbg_cnt_ctrl.rule_idx_pipe_rule = false;
+ dbg_cnt_ctrl.rule_idx = 0;
+ if (option == 1)
+ dbg_cnt_ctrl.en = true;
+ else
+ dbg_cnt_ctrl.en = false;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ ipahal_write_reg_n_fields(IPA_DEBUG_CNT_CTRL_n, 0, &dbg_cnt_ctrl);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return count;
+}
+
+static ssize_t ipa3_read_dbg_cnt(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes;
+ u32 regval;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ regval =
+ ipahal_read_reg_n(IPA_DEBUG_CNT_REG_n, 0);
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "IPA_DEBUG_CNT_REG_0=0x%x\n", regval);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_read_msg(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes;
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < IPA_EVENT_MAX_NUM; i++) {
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "msg[%u:%27s] W:%u R:%u\n", i,
+ ipa3_event_name[i],
+ ipa3_ctx->stats.msg_w[i],
+ ipa3_ctx->stats.msg_r[i]);
+ cnt += nbytes;
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_nat4(struct file *file,
+ char __user *ubuf, size_t count,
+ loff_t *ppos) {
+
+#define ENTRY_U32_FIELDS 8
+#define NAT_ENTRY_ENABLE 0x8000
+#define NAT_ENTRY_RST_FIN_BIT 0x4000
+#define BASE_TABLE 0
+#define EXPANSION_TABLE 1
+
+ u32 *base_tbl, *indx_tbl;
+ u32 tbl_size, *tmp;
+ u32 value, i, j, rule_id;
+ u16 enable, tbl_entry, flag;
+ u32 no_entrys = 0;
+
+ value = ipa3_ctx->nat_mem.public_ip_addr;
+ pr_err(
+ "Table IP Address:%d.%d.%d.%d\n",
+ ((value & 0xFF000000) >> 24),
+ ((value & 0x00FF0000) >> 16),
+ ((value & 0x0000FF00) >> 8),
+ ((value & 0x000000FF)));
+
+ pr_err("Table Size:%d\n",
+ ipa3_ctx->nat_mem.size_base_tables);
+
+ pr_err("Expansion Table Size:%d\n",
+ ipa3_ctx->nat_mem.size_expansion_tables-1);
+
+ if (!ipa3_ctx->nat_mem.is_sys_mem)
+ pr_err("Not supported for local(shared) memory\n");
+
+ /* Print Base tables */
+ rule_id = 0;
+ for (j = 0; j < 2; j++) {
+ if (j == BASE_TABLE) {
+ tbl_size = ipa3_ctx->nat_mem.size_base_tables;
+ base_tbl = (u32 *)ipa3_ctx->nat_mem.ipv4_rules_addr;
+
+ pr_err("\nBase Table:\n");
+ } else {
+ tbl_size = ipa3_ctx->nat_mem.size_expansion_tables-1;
+ base_tbl =
+ (u32 *)ipa3_ctx->nat_mem.ipv4_expansion_rules_addr;
+
+ pr_err("\nExpansion Base Table:\n");
+ }
+
+ if (base_tbl != NULL) {
+ for (i = 0; i <= tbl_size; i++, rule_id++) {
+ tmp = base_tbl;
+ value = tmp[4];
+ enable = ((value & 0xFFFF0000) >> 16);
+
+ if (enable & NAT_ENTRY_ENABLE) {
+ no_entrys++;
+ pr_err("Rule:%d ", rule_id);
+
+ value = *tmp;
+ pr_err(
+ "Private_IP:%d.%d.%d.%d ",
+ ((value & 0xFF000000) >> 24),
+ ((value & 0x00FF0000) >> 16),
+ ((value & 0x0000FF00) >> 8),
+ ((value & 0x000000FF)));
+ tmp++;
+
+ value = *tmp;
+ pr_err(
+ "Target_IP:%d.%d.%d.%d ",
+ ((value & 0xFF000000) >> 24),
+ ((value & 0x00FF0000) >> 16),
+ ((value & 0x0000FF00) >> 8),
+ ((value & 0x000000FF)));
+ tmp++;
+
+ value = *tmp;
+ pr_err(
+ "Next_Index:%d Public_Port:%d ",
+ (value & 0x0000FFFF),
+ ((value & 0xFFFF0000) >> 16));
+ tmp++;
+
+ value = *tmp;
+ pr_err(
+ "Private_Port:%d Target_Port:%d ",
+ (value & 0x0000FFFF),
+ ((value & 0xFFFF0000) >> 16));
+ tmp++;
+
+ value = *tmp;
+ flag = ((value & 0xFFFF0000) >> 16);
+ if (flag & NAT_ENTRY_RST_FIN_BIT) {
+ pr_err(
+ "IP_CKSM_delta:0x%x Flags:%s ",
+ (value & 0x0000FFFF),
+ "Direct_To_A5");
+ } else {
+ pr_err(
+ "IP_CKSM_delta:0x%x Flags:%s ",
+ (value & 0x0000FFFF),
+ "Fwd_to_route");
+ }
+ tmp++;
+
+ value = *tmp;
+ pr_err(
+ "Time_stamp:0x%x Proto:%d ",
+ (value & 0x00FFFFFF),
+ ((value & 0xFF000000) >> 24));
+ tmp++;
+
+ value = *tmp;
+ pr_err(
+ "Prev_Index:%d Indx_tbl_entry:%d ",
+ (value & 0x0000FFFF),
+ ((value & 0xFFFF0000) >> 16));
+ tmp++;
+
+ value = *tmp;
+ pr_err(
+ "TCP_UDP_cksum_delta:0x%x\n",
+ ((value & 0xFFFF0000) >> 16));
+ }
+
+ base_tbl += ENTRY_U32_FIELDS;
+
+ }
+ }
+ }
+
+ /* Print Index tables */
+ rule_id = 0;
+ for (j = 0; j < 2; j++) {
+ if (j == BASE_TABLE) {
+ tbl_size = ipa3_ctx->nat_mem.size_base_tables;
+ indx_tbl = (u32 *)ipa3_ctx->nat_mem.index_table_addr;
+
+ pr_err("\nIndex Table:\n");
+ } else {
+ tbl_size = ipa3_ctx->nat_mem.size_expansion_tables-1;
+ indx_tbl =
+ (u32 *)ipa3_ctx->nat_mem.index_table_expansion_addr;
+
+ pr_err("\nExpansion Index Table:\n");
+ }
+
+ if (indx_tbl != NULL) {
+ for (i = 0; i <= tbl_size; i++, rule_id++) {
+ tmp = indx_tbl;
+ value = *tmp;
+ tbl_entry = (value & 0x0000FFFF);
+
+ if (tbl_entry) {
+ pr_err("Rule:%d ", rule_id);
+
+ value = *tmp;
+ pr_err(
+ "Table_Entry:%d Next_Index:%d\n",
+ tbl_entry,
+ ((value & 0xFFFF0000) >> 16));
+ }
+
+ indx_tbl++;
+ }
+ }
+ }
+ pr_err("Current No. Nat Entries: %d\n", no_entrys);
+
+ return 0;
+}
+
+static ssize_t ipa3_rm_read_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int result, nbytes, cnt = 0;
+
+ result = ipa_rm_stat(dbg_buff, IPA_MAX_MSG_LEN);
+ if (result < 0) {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "Error in printing RM stat %d\n", result);
+ cnt += nbytes;
+ } else
+ cnt += result;
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static void ipa_dump_status(struct ipahal_pkt_status *status)
+{
+ IPA_DUMP_STATUS_FIELD(status_opcode);
+ IPA_DUMP_STATUS_FIELD(exception);
+ IPA_DUMP_STATUS_FIELD(status_mask);
+ IPA_DUMP_STATUS_FIELD(pkt_len);
+ IPA_DUMP_STATUS_FIELD(endp_src_idx);
+ IPA_DUMP_STATUS_FIELD(endp_dest_idx);
+ IPA_DUMP_STATUS_FIELD(metadata);
+ IPA_DUMP_STATUS_FIELD(flt_local);
+ IPA_DUMP_STATUS_FIELD(flt_hash);
+ IPA_DUMP_STATUS_FIELD(flt_global);
+ IPA_DUMP_STATUS_FIELD(flt_ret_hdr);
+ IPA_DUMP_STATUS_FIELD(flt_miss);
+ IPA_DUMP_STATUS_FIELD(flt_rule_id);
+ IPA_DUMP_STATUS_FIELD(rt_local);
+ IPA_DUMP_STATUS_FIELD(rt_hash);
+ IPA_DUMP_STATUS_FIELD(ucp);
+ IPA_DUMP_STATUS_FIELD(rt_tbl_idx);
+ IPA_DUMP_STATUS_FIELD(rt_miss);
+ IPA_DUMP_STATUS_FIELD(rt_rule_id);
+ IPA_DUMP_STATUS_FIELD(nat_hit);
+ IPA_DUMP_STATUS_FIELD(nat_entry_idx);
+ IPA_DUMP_STATUS_FIELD(nat_type);
+ pr_err("tag = 0x%llx\n", (u64)status->tag_info & 0xFFFFFFFFFFFF);
+ IPA_DUMP_STATUS_FIELD(seq_num);
+ IPA_DUMP_STATUS_FIELD(time_of_day_ctr);
+ IPA_DUMP_STATUS_FIELD(hdr_local);
+ IPA_DUMP_STATUS_FIELD(hdr_offset);
+ IPA_DUMP_STATUS_FIELD(frag_hit);
+ IPA_DUMP_STATUS_FIELD(frag_rule);
+}
+
+static ssize_t ipa_status_stats_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ipa3_status_stats *stats;
+ int i, j;
+
+ stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+ if (!stats)
+ return -EFAULT;
+
+ for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+ if (!ipa3_ctx->ep[i].sys || !ipa3_ctx->ep[i].sys->status_stat)
+ continue;
+
+ memcpy(stats, ipa3_ctx->ep[i].sys->status_stat, sizeof(*stats));
+ pr_err("Statuses for pipe %d\n", i);
+ for (j = 0; j < IPA_MAX_STATUS_STAT_NUM; j++) {
+ pr_err("curr=%d\n", stats->curr);
+ ipa_dump_status(&stats->status[stats->curr]);
+ pr_err("\n\n\n");
+ stats->curr = (stats->curr + 1) %
+ IPA_MAX_STATUS_STAT_NUM;
+ }
+ }
+
+ kfree(stats);
+ return 0;
+}
+
+static ssize_t ipa3_print_active_clients_log(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ int cnt;
+ int table_size;
+
+ if (active_clients_buf == NULL) {
+ IPAERR("Active Clients buffer is not allocated");
+ return 0;
+ }
+ memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENT_BUF_SIZE);
+ ipa3_active_clients_lock();
+ cnt = ipa3_active_clients_log_print_buffer(active_clients_buf,
+ IPA_DBG_ACTIVE_CLIENT_BUF_SIZE - IPA_MAX_MSG_LEN);
+ table_size = ipa3_active_clients_log_print_table(active_clients_buf
+ + cnt, IPA_MAX_MSG_LEN);
+ ipa3_active_clients_unlock();
+
+ return simple_read_from_buffer(ubuf, count, ppos,
+ active_clients_buf, cnt + table_size);
+}
+
+static ssize_t ipa3_clear_active_clients_log(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ s8 option = 0;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, ubuf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+ if (kstrtos8(dbg_buff, 0, &option))
+ return -EFAULT;
+
+ ipa3_active_clients_log_clear();
+
+ return count;
+}
+
+static ssize_t ipa3_enable_ipc_low(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ s8 option = 0;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, ubuf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+ if (kstrtos8(dbg_buff, 0, &option))
+ return -EFAULT;
+
+ if (option) {
+ if (!ipa3_ctx->logbuf_low) {
+ ipa3_ctx->logbuf_low =
+ ipc_log_context_create(IPA_IPC_LOG_PAGES,
+ "ipa_low", 0);
+ }
+
+ if (ipa3_ctx->logbuf_low == NULL) {
+ IPAERR("failed to get logbuf_low\n");
+ return -EFAULT;
+ }
+ } else {
+ if (ipa3_ctx->logbuf_low)
+ ipc_log_context_destroy(ipa3_ctx->logbuf_low);
+ ipa3_ctx->logbuf_low = NULL;
+ }
+
+ return count;
+}
+
+const struct file_operations ipa3_gen_reg_ops = {
+ .read = ipa3_read_gen_reg,
+};
+
+const struct file_operations ipa3_ep_reg_ops = {
+ .read = ipa3_read_ep_reg,
+ .write = ipa3_write_ep_reg,
+};
+
+const struct file_operations ipa3_keep_awake_ops = {
+ .read = ipa3_read_keep_awake,
+ .write = ipa3_write_keep_awake,
+};
+
+const struct file_operations ipa3_ep_holb_ops = {
+ .write = ipa3_write_ep_holb,
+};
+
+const struct file_operations ipa3_hdr_ops = {
+ .read = ipa3_read_hdr,
+};
+
+const struct file_operations ipa3_rt_ops = {
+ .read = ipa3_read_rt,
+ .open = ipa3_open_dbg,
+};
+
+const struct file_operations ipa3_rt_hw_ops = {
+ .read = ipa3_read_rt_hw,
+ .open = ipa3_open_dbg,
+};
+
+const struct file_operations ipa3_proc_ctx_ops = {
+ .read = ipa3_read_proc_ctx,
+};
+
+const struct file_operations ipa3_flt_ops = {
+ .read = ipa3_read_flt,
+ .open = ipa3_open_dbg,
+};
+
+const struct file_operations ipa3_flt_hw_ops = {
+ .read = ipa3_read_flt_hw,
+ .open = ipa3_open_dbg,
+};
+
+const struct file_operations ipa3_stats_ops = {
+ .read = ipa3_read_stats,
+};
+
+const struct file_operations ipa3_wstats_ops = {
+ .read = ipa3_read_wstats,
+};
+
+const struct file_operations ipa3_wdi_ops = {
+ .read = ipa3_read_wdi,
+};
+
+const struct file_operations ipa3_ntn_ops = {
+ .read = ipa3_read_ntn,
+};
+
+const struct file_operations ipa3_msg_ops = {
+ .read = ipa3_read_msg,
+};
+
+const struct file_operations ipa3_dbg_cnt_ops = {
+ .read = ipa3_read_dbg_cnt,
+ .write = ipa3_write_dbg_cnt,
+};
+
+const struct file_operations ipa3_status_stats_ops = {
+ .read = ipa_status_stats_read,
+};
+
+const struct file_operations ipa3_nat4_ops = {
+ .read = ipa3_read_nat4,
+};
+
+const struct file_operations ipa3_rm_stats = {
+ .read = ipa3_rm_read_stats,
+};
+
+const struct file_operations ipa3_active_clients = {
+ .read = ipa3_print_active_clients_log,
+ .write = ipa3_clear_active_clients_log,
+};
+
+const struct file_operations ipa3_ipc_low_ops = {
+ .write = ipa3_enable_ipc_low,
+};
+
+void ipa3_debugfs_init(void)
+{
+ const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+ const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+ S_IWUSR | S_IWGRP;
+ const mode_t write_only_mode = S_IWUSR | S_IWGRP;
+ struct dentry *file;
+
+ dent = debugfs_create_dir("ipa", 0);
+ if (IS_ERR(dent)) {
+ IPAERR("fail to create folder in debug_fs.\n");
+ return;
+ }
+
+ file = debugfs_create_u32("hw_type", read_only_mode,
+ dent, &ipa3_ctx->ipa_hw_type);
+ if (!file) {
+ IPAERR("could not create hw_type file\n");
+ goto fail;
+ }
+
+
+ dfile_gen_reg = debugfs_create_file("gen_reg", read_only_mode, dent, 0,
+ &ipa3_gen_reg_ops);
+ if (!dfile_gen_reg || IS_ERR(dfile_gen_reg)) {
+ IPAERR("fail to create file for debug_fs gen_reg\n");
+ goto fail;
+ }
+
+ dfile_active_clients = debugfs_create_file("active_clients",
+ read_write_mode, dent, 0, &ipa3_active_clients);
+ if (!dfile_active_clients || IS_ERR(dfile_active_clients)) {
+ IPAERR("fail to create file for debug_fs active_clients\n");
+ goto fail;
+ }
+
+ active_clients_buf = NULL;
+ active_clients_buf = kzalloc(IPA_DBG_ACTIVE_CLIENT_BUF_SIZE,
+ GFP_KERNEL);
+ if (active_clients_buf == NULL)
+ IPAERR("fail to allocate active clients memory buffer");
+
+ dfile_ep_reg = debugfs_create_file("ep_reg", read_write_mode, dent, 0,
+ &ipa3_ep_reg_ops);
+ if (!dfile_ep_reg || IS_ERR(dfile_ep_reg)) {
+ IPAERR("fail to create file for debug_fs ep_reg\n");
+ goto fail;
+ }
+
+ dfile_keep_awake = debugfs_create_file("keep_awake", read_write_mode,
+ dent, 0, &ipa3_keep_awake_ops);
+ if (!dfile_keep_awake || IS_ERR(dfile_keep_awake)) {
+ IPAERR("fail to create file for debug_fs dfile_keep_awake\n");
+ goto fail;
+ }
+
+ dfile_ep_holb = debugfs_create_file("holb", write_only_mode, dent,
+ 0, &ipa3_ep_holb_ops);
+ if (!dfile_ep_holb || IS_ERR(dfile_ep_holb)) {
+ IPAERR("fail to create file for debug_fs dfile_ep_hol_en\n");
+ goto fail;
+ }
+
+ dfile_hdr = debugfs_create_file("hdr", read_only_mode, dent, 0,
+ &ipa3_hdr_ops);
+ if (!dfile_hdr || IS_ERR(dfile_hdr)) {
+ IPAERR("fail to create file for debug_fs hdr\n");
+ goto fail;
+ }
+
+ dfile_proc_ctx = debugfs_create_file("proc_ctx", read_only_mode, dent,
+ 0, &ipa3_proc_ctx_ops);
+ if (!dfile_hdr || IS_ERR(dfile_hdr)) {
+ IPAERR("fail to create file for debug_fs proc_ctx\n");
+ goto fail;
+ }
+
+ dfile_ip4_rt = debugfs_create_file("ip4_rt", read_only_mode, dent,
+ (void *)IPA_IP_v4, &ipa3_rt_ops);
+ if (!dfile_ip4_rt || IS_ERR(dfile_ip4_rt)) {
+ IPAERR("fail to create file for debug_fs ip4 rt\n");
+ goto fail;
+ }
+
+ dfile_ip4_rt_hw = debugfs_create_file("ip4_rt_hw", read_only_mode, dent,
+ (void *)IPA_IP_v4, &ipa3_rt_hw_ops);
+ if (!dfile_ip4_rt_hw || IS_ERR(dfile_ip4_rt_hw)) {
+ IPAERR("fail to create file for debug_fs ip4 rt hw\n");
+ goto fail;
+ }
+
+ dfile_ip6_rt = debugfs_create_file("ip6_rt", read_only_mode, dent,
+ (void *)IPA_IP_v6, &ipa3_rt_ops);
+ if (!dfile_ip6_rt || IS_ERR(dfile_ip6_rt)) {
+ IPAERR("fail to create file for debug_fs ip6:w rt\n");
+ goto fail;
+ }
+
+ dfile_ip6_rt_hw = debugfs_create_file("ip6_rt_hw", read_only_mode, dent,
+ (void *)IPA_IP_v6, &ipa3_rt_hw_ops);
+ if (!dfile_ip6_rt_hw || IS_ERR(dfile_ip6_rt_hw)) {
+ IPAERR("fail to create file for debug_fs ip6 rt hw\n");
+ goto fail;
+ }
+
+ dfile_ip4_flt = debugfs_create_file("ip4_flt", read_only_mode, dent,
+ (void *)IPA_IP_v4, &ipa3_flt_ops);
+ if (!dfile_ip4_flt || IS_ERR(dfile_ip4_flt)) {
+ IPAERR("fail to create file for debug_fs ip4 flt\n");
+ goto fail;
+ }
+
+ dfile_ip4_flt_hw = debugfs_create_file("ip4_flt_hw", read_only_mode,
+ dent, (void *)IPA_IP_v4, &ipa3_flt_hw_ops);
+ if (!dfile_ip4_flt_hw || IS_ERR(dfile_ip4_flt_hw)) {
+ IPAERR("fail to create file for debug_fs ip4 flt\n");
+ goto fail;
+ }
+
+ dfile_ip6_flt = debugfs_create_file("ip6_flt", read_only_mode, dent,
+ (void *)IPA_IP_v6, &ipa3_flt_ops);
+ if (!dfile_ip6_flt || IS_ERR(dfile_ip6_flt)) {
+ IPAERR("fail to create file for debug_fs ip6 flt\n");
+ goto fail;
+ }
+
+ dfile_ip6_flt_hw = debugfs_create_file("ip6_flt_hw", read_only_mode,
+ dent, (void *)IPA_IP_v6, &ipa3_flt_hw_ops);
+ if (!dfile_ip6_flt_hw || IS_ERR(dfile_ip6_flt_hw)) {
+ IPAERR("fail to create file for debug_fs ip6 flt\n");
+ goto fail;
+ }
+
+ dfile_stats = debugfs_create_file("stats", read_only_mode, dent, 0,
+ &ipa3_stats_ops);
+ if (!dfile_stats || IS_ERR(dfile_stats)) {
+ IPAERR("fail to create file for debug_fs stats\n");
+ goto fail;
+ }
+
+ dfile_wstats = debugfs_create_file("wstats", read_only_mode,
+ dent, 0, &ipa3_wstats_ops);
+ if (!dfile_wstats || IS_ERR(dfile_wstats)) {
+ IPAERR("fail to create file for debug_fs wstats\n");
+ goto fail;
+ }
+
+ dfile_wdi_stats = debugfs_create_file("wdi", read_only_mode, dent, 0,
+ &ipa3_wdi_ops);
+ if (!dfile_wdi_stats || IS_ERR(dfile_wdi_stats)) {
+ IPAERR("fail to create file for debug_fs wdi stats\n");
+ goto fail;
+ }
+
+ dfile_ntn_stats = debugfs_create_file("ntn", read_only_mode, dent, 0,
+ &ipa3_ntn_ops);
+ if (!dfile_ntn_stats || IS_ERR(dfile_ntn_stats)) {
+ IPAERR("fail to create file for debug_fs ntn stats\n");
+ goto fail;
+ }
+
+ dfile_dbg_cnt = debugfs_create_file("dbg_cnt", read_write_mode, dent, 0,
+ &ipa3_dbg_cnt_ops);
+ if (!dfile_dbg_cnt || IS_ERR(dfile_dbg_cnt)) {
+ IPAERR("fail to create file for debug_fs dbg_cnt\n");
+ goto fail;
+ }
+
+ dfile_msg = debugfs_create_file("msg", read_only_mode, dent, 0,
+ &ipa3_msg_ops);
+ if (!dfile_msg || IS_ERR(dfile_msg)) {
+ IPAERR("fail to create file for debug_fs msg\n");
+ goto fail;
+ }
+
+ dfile_ip4_nat = debugfs_create_file("ip4_nat", read_only_mode, dent,
+ 0, &ipa3_nat4_ops);
+ if (!dfile_ip4_nat || IS_ERR(dfile_ip4_nat)) {
+ IPAERR("fail to create file for debug_fs ip4 nat\n");
+ goto fail;
+ }
+
+ dfile_rm_stats = debugfs_create_file("rm_stats",
+ read_only_mode, dent, 0, &ipa3_rm_stats);
+ if (!dfile_rm_stats || IS_ERR(dfile_rm_stats)) {
+ IPAERR("fail to create file for debug_fs rm_stats\n");
+ goto fail;
+ }
+
+ dfile_status_stats = debugfs_create_file("status_stats",
+ read_only_mode, dent, 0, &ipa3_status_stats_ops);
+ if (!dfile_status_stats || IS_ERR(dfile_status_stats)) {
+ IPAERR("fail to create file for debug_fs status_stats\n");
+ goto fail;
+ }
+
+ file = debugfs_create_u32("enable_clock_scaling", read_write_mode,
+ dent, &ipa3_ctx->enable_clock_scaling);
+ if (!file) {
+ IPAERR("could not create enable_clock_scaling file\n");
+ goto fail;
+ }
+
+ file = debugfs_create_u32("clock_scaling_bw_threshold_nominal_mbps",
+ read_write_mode, dent,
+ &ipa3_ctx->ctrl->clock_scaling_bw_threshold_nominal);
+ if (!file) {
+ IPAERR("could not create bw_threshold_nominal_mbps\n");
+ goto fail;
+ }
+
+ file = debugfs_create_u32("clock_scaling_bw_threshold_turbo_mbps",
+ read_write_mode, dent,
+ &ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo);
+ if (!file) {
+ IPAERR("could not create bw_threshold_turbo_mbps\n");
+ goto fail;
+ }
+
+ file = debugfs_create_file("enable_low_prio_print", write_only_mode,
+ dent, 0, &ipa3_ipc_low_ops);
+ if (!file) {
+ IPAERR("could not create enable_low_prio_print file\n");
+ goto fail;
+ }
+
+ return;
+
+fail:
+ debugfs_remove_recursive(dent);
+}
+
+void ipa3_debugfs_remove(void)
+{
+ if (IS_ERR(dent)) {
+ IPAERR("ipa3_debugfs_remove: folder was not created.\n");
+ return;
+ }
+ if (active_clients_buf != NULL) {
+ kfree(active_clients_buf);
+ active_clients_buf = NULL;
+ }
+ debugfs_remove_recursive(dent);
+}
+
+struct dentry *ipa_debugfs_get_root(void)
+{
+ return dent;
+}
+EXPORT_SYMBOL(ipa_debugfs_get_root);
+
+#else /* !CONFIG_DEBUG_FS */
+void ipa3_debugfs_init(void) {}
+void ipa3_debugfs_remove(void) {}
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
new file mode 100644
index 0000000..2a1c286
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
@@ -0,0 +1,990 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/msm_ipa.h>
+#include <linux/mutex.h>
+#include <linux/ipa.h>
+#include "linux/msm_gsi.h"
+#include "ipa_i.h"
+
+#define IPA_DMA_POLLING_MIN_SLEEP_RX 1010
+#define IPA_DMA_POLLING_MAX_SLEEP_RX 1050
+#define IPA_DMA_SYS_DESC_MAX_FIFO_SZ 0x7FF8
+#define IPA_DMA_MAX_PKT_SZ 0xFFFF
+#define IPA_DMA_MAX_PENDING_SYNC (IPA_SYS_DESC_FIFO_SZ / \
+ sizeof(struct sps_iovec) - 1)
+#define IPA_DMA_MAX_PENDING_ASYNC (IPA_DMA_SYS_DESC_MAX_FIFO_SZ / \
+ sizeof(struct sps_iovec) - 1)
+
+#define IPADMA_DRV_NAME "ipa_dma"
+
+#define IPADMA_DBG(fmt, args...) \
+ do { \
+ pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPADMA_DBG_LOW(fmt, args...) \
+ do { \
+ pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPADMA_ERR(fmt, args...) \
+ do { \
+ pr_err(IPADMA_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPADMA_FUNC_ENTRY() \
+ IPADMA_DBG_LOW("ENTRY\n")
+
+#define IPADMA_FUNC_EXIT() \
+ IPADMA_DBG_LOW("EXIT\n")
+
+#ifdef CONFIG_DEBUG_FS
+#define IPADMA_MAX_MSG_LEN 1024
+static char dbg_buff[IPADMA_MAX_MSG_LEN];
+static void ipa3_dma_debugfs_init(void);
+static void ipa3_dma_debugfs_destroy(void);
+#else
+static void ipa3_dma_debugfs_init(void) {}
+static void ipa3_dma_debugfs_destroy(void) {}
+#endif
+
+/**
+ * struct ipa3_dma_ctx -IPADMA driver context information
+ * @is_enabled:is ipa_dma enabled?
+ * @destroy_pending: destroy ipa_dma after handling all pending memcpy
+ * @ipa_dma_xfer_wrapper_cache: cache of ipa3_dma_xfer_wrapper structs
+ * @sync_lock: lock for synchronisation in sync_memcpy
+ * @async_lock: lock for synchronisation in async_memcpy
+ * @enable_lock: lock for is_enabled
+ * @pending_lock: lock for synchronize is_enable and pending_cnt
+ * @done: no pending works-ipadma can be destroyed
+ * @ipa_dma_sync_prod_hdl: handle of sync memcpy producer
+ * @ipa_dma_async_prod_hdl:handle of async memcpy producer
+ * @ipa_dma_sync_cons_hdl: handle of sync memcpy consumer
+ * @sync_memcpy_pending_cnt: number of pending sync memcopy operations
+ * @async_memcpy_pending_cnt: number of pending async memcopy operations
+ * @uc_memcpy_pending_cnt: number of pending uc memcopy operations
+ * @total_sync_memcpy: total number of sync memcpy (statistics)
+ * @total_async_memcpy: total number of async memcpy (statistics)
+ * @total_uc_memcpy: total number of uc memcpy (statistics)
+ */
+struct ipa3_dma_ctx {
+ bool is_enabled;
+ bool destroy_pending;
+ struct kmem_cache *ipa_dma_xfer_wrapper_cache;
+ struct mutex sync_lock;
+ spinlock_t async_lock;
+ struct mutex enable_lock;
+ spinlock_t pending_lock;
+ struct completion done;
+ u32 ipa_dma_sync_prod_hdl;
+ u32 ipa_dma_async_prod_hdl;
+ u32 ipa_dma_sync_cons_hdl;
+ u32 ipa_dma_async_cons_hdl;
+ atomic_t sync_memcpy_pending_cnt;
+ atomic_t async_memcpy_pending_cnt;
+ atomic_t uc_memcpy_pending_cnt;
+ atomic_t total_sync_memcpy;
+ atomic_t total_async_memcpy;
+ atomic_t total_uc_memcpy;
+};
+static struct ipa3_dma_ctx *ipa3_dma_ctx;
+
+/**
+ * ipa3_dma_init() -Initialize IPADMA.
+ *
+ * This function initialize all IPADMA internal data and connect in dma:
+ * MEMCPY_DMA_SYNC_PROD ->MEMCPY_DMA_SYNC_CONS
+ * MEMCPY_DMA_ASYNC_PROD->MEMCPY_DMA_SYNC_CONS
+ *
+ * Return codes: 0: success
+ * -EFAULT: IPADMA is already initialized
+ * -EINVAL: IPA driver is not initialized
+ * -ENOMEM: allocating memory error
+ * -EPERM: pipe connection failed
+ */
+int ipa3_dma_init(void)
+{
+ struct ipa3_dma_ctx *ipa_dma_ctx_t;
+ struct ipa_sys_connect_params sys_in;
+ int res = 0;
+
+ IPADMA_FUNC_ENTRY();
+
+ if (ipa3_dma_ctx) {
+ IPADMA_ERR("Already initialized.\n");
+ return -EFAULT;
+ }
+
+ if (!ipa3_is_ready()) {
+ IPADMA_ERR("IPA is not ready yet\n");
+ return -EINVAL;
+ }
+
+ ipa_dma_ctx_t = kzalloc(sizeof(*(ipa3_dma_ctx)), GFP_KERNEL);
+
+ if (!ipa_dma_ctx_t) {
+ IPADMA_ERR("kzalloc error.\n");
+ return -ENOMEM;
+ }
+
+ ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache =
+ kmem_cache_create("IPA DMA XFER WRAPPER",
+ sizeof(struct ipa3_dma_xfer_wrapper), 0, 0, NULL);
+ if (!ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache) {
+ IPAERR(":failed to create ipa dma xfer wrapper cache.\n");
+ res = -ENOMEM;
+ goto fail_mem_ctrl;
+ }
+
+ mutex_init(&ipa_dma_ctx_t->enable_lock);
+ spin_lock_init(&ipa_dma_ctx_t->async_lock);
+ mutex_init(&ipa_dma_ctx_t->sync_lock);
+ spin_lock_init(&ipa_dma_ctx_t->pending_lock);
+ init_completion(&ipa_dma_ctx_t->done);
+ ipa_dma_ctx_t->is_enabled = false;
+ ipa_dma_ctx_t->destroy_pending = false;
+ atomic_set(&ipa_dma_ctx_t->async_memcpy_pending_cnt, 0);
+ atomic_set(&ipa_dma_ctx_t->sync_memcpy_pending_cnt, 0);
+ atomic_set(&ipa_dma_ctx_t->uc_memcpy_pending_cnt, 0);
+ atomic_set(&ipa_dma_ctx_t->total_async_memcpy, 0);
+ atomic_set(&ipa_dma_ctx_t->total_sync_memcpy, 0);
+ atomic_set(&ipa_dma_ctx_t->total_uc_memcpy, 0);
+
+ /* IPADMA SYNC PROD-source for sync memcpy */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_PROD;
+ sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+ sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS;
+ sys_in.skip_ep_cfg = false;
+ if (ipa3_setup_sys_pipe(&sys_in,
+ &ipa_dma_ctx_t->ipa_dma_sync_prod_hdl)) {
+ IPADMA_ERR(":setup sync prod pipe failed\n");
+ res = -EPERM;
+ goto fail_sync_prod;
+ }
+
+ /* IPADMA SYNC CONS-destination for sync memcpy */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS;
+ sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ sys_in.skip_ep_cfg = false;
+ sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ sys_in.notify = NULL;
+ sys_in.priv = NULL;
+ if (ipa3_setup_sys_pipe(&sys_in,
+ &ipa_dma_ctx_t->ipa_dma_sync_cons_hdl)) {
+ IPADMA_ERR(":setup sync cons pipe failed.\n");
+ res = -EPERM;
+ goto fail_sync_cons;
+ }
+
+ IPADMA_DBG("SYNC MEMCPY pipes are connected\n");
+
+ /* IPADMA ASYNC PROD-source for sync memcpy */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD;
+ sys_in.desc_fifo_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ;
+ sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+ sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS;
+ sys_in.skip_ep_cfg = false;
+ sys_in.notify = NULL;
+ if (ipa3_setup_sys_pipe(&sys_in,
+ &ipa_dma_ctx_t->ipa_dma_async_prod_hdl)) {
+ IPADMA_ERR(":setup async prod pipe failed.\n");
+ res = -EPERM;
+ goto fail_async_prod;
+ }
+
+ /* IPADMA ASYNC CONS-destination for sync memcpy */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS;
+ sys_in.desc_fifo_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ;
+ sys_in.skip_ep_cfg = false;
+ sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ sys_in.notify = ipa3_dma_async_memcpy_notify_cb;
+ sys_in.priv = NULL;
+ if (ipa3_setup_sys_pipe(&sys_in,
+ &ipa_dma_ctx_t->ipa_dma_async_cons_hdl)) {
+ IPADMA_ERR(":setup async cons pipe failed.\n");
+ res = -EPERM;
+ goto fail_async_cons;
+ }
+ ipa3_dma_debugfs_init();
+ ipa3_dma_ctx = ipa_dma_ctx_t;
+ IPADMA_DBG("ASYNC MEMCPY pipes are connected\n");
+
+ IPADMA_FUNC_EXIT();
+ return res;
+fail_async_cons:
+ ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_async_prod_hdl);
+fail_async_prod:
+ ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_cons_hdl);
+fail_sync_cons:
+ ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_prod_hdl);
+fail_sync_prod:
+ kmem_cache_destroy(ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache);
+fail_mem_ctrl:
+ kfree(ipa_dma_ctx_t);
+ ipa3_dma_ctx = NULL;
+ return res;
+
+}
+
+/**
+ * ipa3_dma_enable() -Vote for IPA clocks.
+ *
+ *Return codes: 0: success
+ * -EINVAL: IPADMA is not initialized
+ * -EPERM: Operation not permitted as ipa_dma is already
+ * enabled
+ */
+int ipa3_dma_enable(void)
+{
+ IPADMA_FUNC_ENTRY();
+ if (ipa3_dma_ctx == NULL) {
+ IPADMA_ERR("IPADMA isn't initialized, can't enable\n");
+ return -EPERM;
+ }
+ mutex_lock(&ipa3_dma_ctx->enable_lock);
+ if (ipa3_dma_ctx->is_enabled) {
+ IPADMA_ERR("Already enabled.\n");
+ mutex_unlock(&ipa3_dma_ctx->enable_lock);
+ return -EPERM;
+ }
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("DMA");
+ ipa3_dma_ctx->is_enabled = true;
+ mutex_unlock(&ipa3_dma_ctx->enable_lock);
+
+ IPADMA_FUNC_EXIT();
+ return 0;
+}
+
+static bool ipa3_dma_work_pending(void)
+{
+ if (atomic_read(&ipa3_dma_ctx->sync_memcpy_pending_cnt)) {
+ IPADMA_DBG("pending sync\n");
+ return true;
+ }
+ if (atomic_read(&ipa3_dma_ctx->async_memcpy_pending_cnt)) {
+ IPADMA_DBG("pending async\n");
+ return true;
+ }
+ if (atomic_read(&ipa3_dma_ctx->uc_memcpy_pending_cnt)) {
+ IPADMA_DBG("pending uc\n");
+ return true;
+ }
+ IPADMA_DBG_LOW("no pending work\n");
+ return false;
+}
+
+/**
+ * ipa3_dma_disable()- Unvote for IPA clocks.
+ *
+ * enter to power save mode.
+ *
+ * Return codes: 0: success
+ * -EINVAL: IPADMA is not initialized
+ * -EPERM: Operation not permitted as ipa_dma is already
+ * diabled
+ * -EFAULT: can not disable ipa_dma as there are pending
+ * memcopy works
+ */
+int ipa3_dma_disable(void)
+{
+ unsigned long flags;
+
+ IPADMA_FUNC_ENTRY();
+ if (ipa3_dma_ctx == NULL) {
+ IPADMA_ERR("IPADMA isn't initialized, can't disable\n");
+ return -EPERM;
+ }
+ mutex_lock(&ipa3_dma_ctx->enable_lock);
+ spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
+ if (!ipa3_dma_ctx->is_enabled) {
+ IPADMA_ERR("Already disabled.\n");
+ spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+ mutex_unlock(&ipa3_dma_ctx->enable_lock);
+ return -EPERM;
+ }
+ if (ipa3_dma_work_pending()) {
+ IPADMA_ERR("There is pending work, can't disable.\n");
+ spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+ mutex_unlock(&ipa3_dma_ctx->enable_lock);
+ return -EFAULT;
+ }
+ ipa3_dma_ctx->is_enabled = false;
+ spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("DMA");
+ mutex_unlock(&ipa3_dma_ctx->enable_lock);
+ IPADMA_FUNC_EXIT();
+ return 0;
+}
+
+/**
+ * ipa3_dma_sync_memcpy()- Perform synchronous memcpy using IPA.
+ *
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Return codes: 0: success
+ * -EINVAL: invalid params
+ * -EPERM: operation not permitted as ipa_dma isn't enable or
+ * initialized
+ * -SPS_ERROR: on sps faliures
+ * -EFAULT: other
+ */
+int ipa3_dma_sync_memcpy(u64 dest, u64 src, int len)
+{
+ int ep_idx;
+ int res;
+ int i = 0;
+ struct ipa3_sys_context *cons_sys;
+ struct ipa3_sys_context *prod_sys;
+ struct sps_iovec iov;
+ struct ipa3_dma_xfer_wrapper *xfer_descr = NULL;
+ struct ipa3_dma_xfer_wrapper *head_descr = NULL;
+ struct gsi_xfer_elem xfer_elem;
+ struct gsi_chan_xfer_notify gsi_notify;
+ unsigned long flags;
+ bool stop_polling = false;
+
+ IPADMA_FUNC_ENTRY();
+ IPADMA_DBG_LOW("dest = 0x%llx, src = 0x%llx, len = %d\n",
+ dest, src, len);
+ if (ipa3_dma_ctx == NULL) {
+ IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n");
+ return -EPERM;
+ }
+ if ((max(src, dest) - min(src, dest)) < len) {
+ IPADMA_ERR("invalid addresses - overlapping buffers\n");
+ return -EINVAL;
+ }
+ if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {
+ IPADMA_ERR("invalid len, %d\n", len);
+ return -EINVAL;
+ }
+ if (ipa3_ctx->transport_prototype != IPA_TRANSPORT_TYPE_GSI) {
+ if (((u32)src != src) || ((u32)dest != dest)) {
+ IPADMA_ERR("Bad addr, only 32b addr supported for BAM");
+ return -EINVAL;
+ }
+ }
+ spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
+ if (!ipa3_dma_ctx->is_enabled) {
+ IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n");
+ spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+ return -EPERM;
+ }
+ atomic_inc(&ipa3_dma_ctx->sync_memcpy_pending_cnt);
+ spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_SPS) {
+ if (atomic_read(&ipa3_dma_ctx->sync_memcpy_pending_cnt) >=
+ IPA_DMA_MAX_PENDING_SYNC) {
+ atomic_dec(&ipa3_dma_ctx->sync_memcpy_pending_cnt);
+ IPADMA_ERR("Reached pending requests limit\n");
+ return -EFAULT;
+ }
+ }
+
+ ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS);
+ if (-1 == ep_idx) {
+ IPADMA_ERR("Client %u is not mapped\n",
+ IPA_CLIENT_MEMCPY_DMA_SYNC_CONS);
+ return -EFAULT;
+ }
+ cons_sys = ipa3_ctx->ep[ep_idx].sys;
+
+ ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
+ if (-1 == ep_idx) {
+ IPADMA_ERR("Client %u is not mapped\n",
+ IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
+ return -EFAULT;
+ }
+ prod_sys = ipa3_ctx->ep[ep_idx].sys;
+
+ xfer_descr = kmem_cache_zalloc(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache,
+ GFP_KERNEL);
+ if (!xfer_descr) {
+ IPADMA_ERR("failed to alloc xfer descr wrapper\n");
+ res = -ENOMEM;
+ goto fail_mem_alloc;
+ }
+ xfer_descr->phys_addr_dest = dest;
+ xfer_descr->phys_addr_src = src;
+ xfer_descr->len = len;
+ init_completion(&xfer_descr->xfer_done);
+
+ mutex_lock(&ipa3_dma_ctx->sync_lock);
+ list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list);
+ cons_sys->len++;
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+ xfer_elem.addr = dest;
+ xfer_elem.len = len;
+ xfer_elem.type = GSI_XFER_ELEM_DATA;
+ xfer_elem.flags = GSI_XFER_FLAG_EOT;
+ xfer_elem.xfer_user_data = xfer_descr;
+ res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
+ &xfer_elem, true);
+ if (res) {
+ IPADMA_ERR(
+ "Failed: gsi_queue_xfer dest descr res:%d\n",
+ res);
+ goto fail_send;
+ }
+ xfer_elem.addr = src;
+ xfer_elem.len = len;
+ xfer_elem.type = GSI_XFER_ELEM_DATA;
+ xfer_elem.flags = GSI_XFER_FLAG_EOT;
+ xfer_elem.xfer_user_data = NULL;
+ res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
+ &xfer_elem, true);
+ if (res) {
+ IPADMA_ERR(
+ "Failed: gsi_queue_xfer src descr res:%d\n",
+ res);
+ BUG();
+ }
+ } else {
+ res = sps_transfer_one(cons_sys->ep->ep_hdl, dest, len,
+ NULL, 0);
+ if (res) {
+ IPADMA_ERR("Failed: sps_transfer_one on dest descr\n");
+ goto fail_send;
+ }
+ res = sps_transfer_one(prod_sys->ep->ep_hdl, src, len,
+ NULL, SPS_IOVEC_FLAG_EOT);
+ if (res) {
+ IPADMA_ERR("Failed: sps_transfer_one on src descr\n");
+ BUG();
+ }
+ }
+ head_descr = list_first_entry(&cons_sys->head_desc_list,
+ struct ipa3_dma_xfer_wrapper, link);
+
+ /* in case we are not the head of the list, wait for head to wake us */
+ if (xfer_descr != head_descr) {
+ mutex_unlock(&ipa3_dma_ctx->sync_lock);
+ wait_for_completion(&xfer_descr->xfer_done);
+ mutex_lock(&ipa3_dma_ctx->sync_lock);
+ head_descr = list_first_entry(&cons_sys->head_desc_list,
+ struct ipa3_dma_xfer_wrapper, link);
+ BUG_ON(xfer_descr != head_descr);
+ }
+ mutex_unlock(&ipa3_dma_ctx->sync_lock);
+
+ do {
+ /* wait for transfer to complete */
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+ res = gsi_poll_channel(cons_sys->ep->gsi_chan_hdl,
+ &gsi_notify);
+ if (res == GSI_STATUS_SUCCESS)
+ stop_polling = true;
+ else if (res != GSI_STATUS_POLL_EMPTY)
+ IPADMA_ERR(
+ "Failed: gsi_poll_chanel, returned %d loop#:%d\n",
+ res, i);
+ } else {
+ res = sps_get_iovec(cons_sys->ep->ep_hdl, &iov);
+ if (res)
+ IPADMA_ERR(
+ "Failed: get_iovec, returned %d loop#:%d\n",
+ res, i);
+ if (iov.addr != 0)
+ stop_polling = true;
+ }
+ usleep_range(IPA_DMA_POLLING_MIN_SLEEP_RX,
+ IPA_DMA_POLLING_MAX_SLEEP_RX);
+ i++;
+ } while (!stop_polling);
+
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+ BUG_ON(len != gsi_notify.bytes_xfered);
+ BUG_ON(dest != ((struct ipa3_dma_xfer_wrapper *)
+ (gsi_notify.xfer_user_data))->phys_addr_dest);
+ } else {
+ BUG_ON(dest != iov.addr);
+ BUG_ON(len != iov.size);
+ }
+
+ mutex_lock(&ipa3_dma_ctx->sync_lock);
+ list_del(&head_descr->link);
+ cons_sys->len--;
+ kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr);
+ /* wake the head of the list */
+ if (!list_empty(&cons_sys->head_desc_list)) {
+ head_descr = list_first_entry(&cons_sys->head_desc_list,
+ struct ipa3_dma_xfer_wrapper, link);
+ complete(&head_descr->xfer_done);
+ }
+ mutex_unlock(&ipa3_dma_ctx->sync_lock);
+
+ atomic_inc(&ipa3_dma_ctx->total_sync_memcpy);
+ atomic_dec(&ipa3_dma_ctx->sync_memcpy_pending_cnt);
+ if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
+ complete(&ipa3_dma_ctx->done);
+
+ IPADMA_FUNC_EXIT();
+ return res;
+
+fail_send:
+ list_del(&xfer_descr->link);
+ cons_sys->len--;
+ mutex_unlock(&ipa3_dma_ctx->sync_lock);
+ kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr);
+fail_mem_alloc:
+ atomic_dec(&ipa3_dma_ctx->sync_memcpy_pending_cnt);
+ if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
+ complete(&ipa3_dma_ctx->done);
+ return res;
+}
+
+/**
+ * ipa3_dma_async_memcpy()- Perform asynchronous memcpy using IPA.
+ *
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ * @user_cb: callback function to notify the client when the copy was done.
+ * @user_param: cookie for user_cb.
+ *
+ * Return codes: 0: success
+ * -EINVAL: invalid params
+ * -EPERM: operation not permitted as ipa_dma isn't enable or
+ * initialized
+ * -SPS_ERROR: on sps faliures
+ * -EFAULT: descr fifo is full.
+ */
+int ipa3_dma_async_memcpy(u64 dest, u64 src, int len,
+ void (*user_cb)(void *user1), void *user_param)
+{
+ int ep_idx;
+ int res = 0;
+ struct ipa3_dma_xfer_wrapper *xfer_descr = NULL;
+ struct ipa3_sys_context *prod_sys;
+ struct ipa3_sys_context *cons_sys;
+ struct gsi_xfer_elem xfer_elem_cons, xfer_elem_prod;
+ unsigned long flags;
+
+ IPADMA_FUNC_ENTRY();
+ IPADMA_DBG_LOW("dest = 0x%llx, src = 0x%llx, len = %d\n",
+ dest, src, len);
+ if (ipa3_dma_ctx == NULL) {
+ IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n");
+ return -EPERM;
+ }
+ if ((max(src, dest) - min(src, dest)) < len) {
+ IPADMA_ERR("invalid addresses - overlapping buffers\n");
+ return -EINVAL;
+ }
+ if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {
+ IPADMA_ERR("invalid len, %d\n", len);
+ return -EINVAL;
+ }
+ if (ipa3_ctx->transport_prototype != IPA_TRANSPORT_TYPE_GSI) {
+ if (((u32)src != src) || ((u32)dest != dest)) {
+ IPADMA_ERR(
+ "Bad addr - only 32b addr supported for BAM");
+ return -EINVAL;
+ }
+ }
+ if (!user_cb) {
+ IPADMA_ERR("null pointer: user_cb\n");
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
+ if (!ipa3_dma_ctx->is_enabled) {
+ IPADMA_ERR("can't memcpy, IPA_DMA isn't enabled\n");
+ spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+ return -EPERM;
+ }
+ atomic_inc(&ipa3_dma_ctx->async_memcpy_pending_cnt);
+ spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_SPS) {
+ if (atomic_read(&ipa3_dma_ctx->async_memcpy_pending_cnt) >=
+ IPA_DMA_MAX_PENDING_ASYNC) {
+ atomic_dec(&ipa3_dma_ctx->async_memcpy_pending_cnt);
+ IPADMA_ERR("Reached pending requests limit\n");
+ return -EFAULT;
+ }
+ }
+
+ ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+ if (-1 == ep_idx) {
+ IPADMA_ERR("Client %u is not mapped\n",
+ IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+ return -EFAULT;
+ }
+ cons_sys = ipa3_ctx->ep[ep_idx].sys;
+
+ ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD);
+ if (-1 == ep_idx) {
+ IPADMA_ERR("Client %u is not mapped\n",
+ IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
+ return -EFAULT;
+ }
+ prod_sys = ipa3_ctx->ep[ep_idx].sys;
+
+ xfer_descr = kmem_cache_zalloc(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache,
+ GFP_KERNEL);
+ if (!xfer_descr) {
+ IPADMA_ERR("failed to alloc xfrer descr wrapper\n");
+ res = -ENOMEM;
+ goto fail_mem_alloc;
+ }
+ xfer_descr->phys_addr_dest = dest;
+ xfer_descr->phys_addr_src = src;
+ xfer_descr->len = len;
+ xfer_descr->callback = user_cb;
+ xfer_descr->user1 = user_param;
+
+ spin_lock_irqsave(&ipa3_dma_ctx->async_lock, flags);
+ list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list);
+ cons_sys->len++;
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+ xfer_elem_cons.addr = dest;
+ xfer_elem_cons.len = len;
+ xfer_elem_cons.type = GSI_XFER_ELEM_DATA;
+ xfer_elem_cons.flags = GSI_XFER_FLAG_EOT;
+ xfer_elem_cons.xfer_user_data = xfer_descr;
+ xfer_elem_prod.addr = src;
+ xfer_elem_prod.len = len;
+ xfer_elem_prod.type = GSI_XFER_ELEM_DATA;
+ xfer_elem_prod.flags = GSI_XFER_FLAG_EOT;
+ xfer_elem_prod.xfer_user_data = NULL;
+ res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
+ &xfer_elem_cons, true);
+ if (res) {
+ IPADMA_ERR(
+ "Failed: gsi_queue_xfer on dest descr res: %d\n",
+ res);
+ goto fail_send;
+ }
+ res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
+ &xfer_elem_prod, true);
+ if (res) {
+ IPADMA_ERR(
+ "Failed: gsi_queue_xfer on src descr res: %d\n",
+ res);
+ BUG();
+ goto fail_send;
+ }
+ } else {
+ res = sps_transfer_one(cons_sys->ep->ep_hdl, dest, len,
+ xfer_descr, 0);
+ if (res) {
+ IPADMA_ERR("Failed: sps_transfer_one on dest descr\n");
+ goto fail_send;
+ }
+ res = sps_transfer_one(prod_sys->ep->ep_hdl, src, len,
+ NULL, SPS_IOVEC_FLAG_EOT);
+ if (res) {
+ IPADMA_ERR("Failed: sps_transfer_one on src descr\n");
+ BUG();
+ goto fail_send;
+ }
+ }
+ spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags);
+ IPADMA_FUNC_EXIT();
+ return res;
+
+fail_send:
+ list_del(&xfer_descr->link);
+ spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags);
+ kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr);
+fail_mem_alloc:
+ atomic_dec(&ipa3_dma_ctx->async_memcpy_pending_cnt);
+ if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
+ complete(&ipa3_dma_ctx->done);
+ return res;
+}
+
+/**
+ * ipa3_dma_uc_memcpy() - Perform a memcpy action using IPA uC
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Return codes: 0: success
+ * -EINVAL: invalid params
+ * -EPERM: operation not permitted as ipa_dma isn't enable or
+ * initialized
+ * -EBADF: IPA uC is not loaded
+ */
+int ipa3_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
+{
+ int res;
+ unsigned long flags;
+
+ IPADMA_FUNC_ENTRY();
+ if (ipa3_dma_ctx == NULL) {
+ IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n");
+ return -EPERM;
+ }
+ if ((max(src, dest) - min(src, dest)) < len) {
+ IPADMA_ERR("invalid addresses - overlapping buffers\n");
+ return -EINVAL;
+ }
+ if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {
+ IPADMA_ERR("invalid len, %d\n", len);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
+ if (!ipa3_dma_ctx->is_enabled) {
+ IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n");
+ spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+ return -EPERM;
+ }
+ atomic_inc(&ipa3_dma_ctx->uc_memcpy_pending_cnt);
+ spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+
+ res = ipa3_uc_memcpy(dest, src, len);
+ if (res) {
+ IPADMA_ERR("ipa3_uc_memcpy failed %d\n", res);
+ goto dec_and_exit;
+ }
+
+ atomic_inc(&ipa3_dma_ctx->total_uc_memcpy);
+ res = 0;
+dec_and_exit:
+ atomic_dec(&ipa3_dma_ctx->uc_memcpy_pending_cnt);
+ if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
+ complete(&ipa3_dma_ctx->done);
+ IPADMA_FUNC_EXIT();
+ return res;
+}
+
+/**
+ * ipa3_dma_destroy() -teardown IPADMA pipes and release ipadma.
+ *
+ * this is a blocking function, returns just after destroying IPADMA.
+ */
+void ipa3_dma_destroy(void)
+{
+ int res = 0;
+
+ IPADMA_FUNC_ENTRY();
+ if (!ipa3_dma_ctx) {
+ IPADMA_ERR("IPADMA isn't initialized\n");
+ return;
+ }
+
+ if (ipa3_dma_work_pending()) {
+ ipa3_dma_ctx->destroy_pending = true;
+ IPADMA_DBG("There are pending memcpy, wait for completion\n");
+ wait_for_completion(&ipa3_dma_ctx->done);
+ }
+
+ res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_async_cons_hdl);
+ if (res)
+ IPADMA_ERR("teardown IPADMA ASYNC CONS failed\n");
+ ipa3_dma_ctx->ipa_dma_async_cons_hdl = 0;
+ res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_sync_cons_hdl);
+ if (res)
+ IPADMA_ERR("teardown IPADMA SYNC CONS failed\n");
+ ipa3_dma_ctx->ipa_dma_sync_cons_hdl = 0;
+ res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_async_prod_hdl);
+ if (res)
+ IPADMA_ERR("teardown IPADMA ASYNC PROD failed\n");
+ ipa3_dma_ctx->ipa_dma_async_prod_hdl = 0;
+ res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_sync_prod_hdl);
+ if (res)
+ IPADMA_ERR("teardown IPADMA SYNC PROD failed\n");
+ ipa3_dma_ctx->ipa_dma_sync_prod_hdl = 0;
+
+ ipa3_dma_debugfs_destroy();
+ kmem_cache_destroy(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache);
+ kfree(ipa3_dma_ctx);
+ ipa3_dma_ctx = NULL;
+
+ IPADMA_FUNC_EXIT();
+}
+
+/**
+ * ipa3_dma_async_memcpy_notify_cb() -Callback function which will be called by
+ * IPA driver after getting notify from SPS driver or poll mode on Rx operation
+ * is completed (data was written to dest descriptor on async_cons ep).
+ *
+ * @priv -not in use.
+ * @evt - event name - IPA_RECIVE.
+ * @data -the ipa_mem_buffer.
+ */
+void ipa3_dma_async_memcpy_notify_cb(void *priv
+ , enum ipa_dp_evt_type evt, unsigned long data)
+{
+ int ep_idx = 0;
+ struct ipa3_dma_xfer_wrapper *xfer_descr_expected;
+ struct ipa3_sys_context *sys;
+ unsigned long flags;
+ struct ipa_mem_buffer *mem_info;
+
+ IPADMA_FUNC_ENTRY();
+
+ mem_info = (struct ipa_mem_buffer *)data;
+ ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+ sys = ipa3_ctx->ep[ep_idx].sys;
+
+ spin_lock_irqsave(&ipa3_dma_ctx->async_lock, flags);
+ xfer_descr_expected = list_first_entry(&sys->head_desc_list,
+ struct ipa3_dma_xfer_wrapper, link);
+ list_del(&xfer_descr_expected->link);
+ sys->len--;
+ spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags);
+ if (ipa3_ctx->transport_prototype != IPA_TRANSPORT_TYPE_GSI) {
+ BUG_ON(xfer_descr_expected->phys_addr_dest !=
+ mem_info->phys_base);
+ BUG_ON(xfer_descr_expected->len != mem_info->size);
+ }
+ atomic_inc(&ipa3_dma_ctx->total_async_memcpy);
+ atomic_dec(&ipa3_dma_ctx->async_memcpy_pending_cnt);
+ xfer_descr_expected->callback(xfer_descr_expected->user1);
+
+ kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache,
+ xfer_descr_expected);
+
+ if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
+ complete(&ipa3_dma_ctx->done);
+
+ IPADMA_FUNC_EXIT();
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *dent;
+static struct dentry *dfile_info;
+
+static ssize_t ipa3_dma_debugfs_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes = 0;
+
+ if (!ipa3_dma_ctx) {
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPADMA_MAX_MSG_LEN - nbytes,
+ "Not initialized\n");
+ } else {
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPADMA_MAX_MSG_LEN - nbytes,
+ "Status:\n IPADMA is %s\n",
+ (ipa3_dma_ctx->is_enabled) ? "Enabled" : "Disabled");
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPADMA_MAX_MSG_LEN - nbytes,
+ "Statistics:\n total sync memcpy: %d\n ",
+ atomic_read(&ipa3_dma_ctx->total_sync_memcpy));
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPADMA_MAX_MSG_LEN - nbytes,
+ "total async memcpy: %d\n ",
+ atomic_read(&ipa3_dma_ctx->total_async_memcpy));
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPADMA_MAX_MSG_LEN - nbytes,
+ "pending sync memcpy jobs: %d\n ",
+ atomic_read(&ipa3_dma_ctx->sync_memcpy_pending_cnt));
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPADMA_MAX_MSG_LEN - nbytes,
+ "pending async memcpy jobs: %d\n",
+ atomic_read(&ipa3_dma_ctx->async_memcpy_pending_cnt));
+ nbytes += scnprintf(&dbg_buff[nbytes],
+ IPADMA_MAX_MSG_LEN - nbytes,
+ "pending uc memcpy jobs: %d\n",
+ atomic_read(&ipa3_dma_ctx->uc_memcpy_pending_cnt));
+ }
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_dma_debugfs_reset_statistics(struct file *file,
+ const char __user *ubuf,
+ size_t count,
+ loff_t *ppos)
+{
+ unsigned long missing;
+ s8 in_num = 0;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, ubuf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+ if (kstrtos8(dbg_buff, 0, &in_num))
+ return -EFAULT;
+ switch (in_num) {
+ case 0:
+ if (ipa3_dma_work_pending())
+ IPADMA_ERR("Note, there are pending memcpy\n");
+
+ atomic_set(&ipa3_dma_ctx->total_async_memcpy, 0);
+ atomic_set(&ipa3_dma_ctx->total_sync_memcpy, 0);
+ break;
+ default:
+ IPADMA_ERR("invalid argument: To reset statistics echo 0\n");
+ break;
+ }
+ return count;
+}
+
+const struct file_operations ipa3_ipadma_stats_ops = {
+ .read = ipa3_dma_debugfs_read,
+ .write = ipa3_dma_debugfs_reset_statistics,
+};
+
+static void ipa3_dma_debugfs_init(void)
+{
+ const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+ S_IWUSR | S_IWGRP | S_IWOTH;
+
+ dent = debugfs_create_dir("ipa_dma", 0);
+ if (IS_ERR(dent)) {
+ IPADMA_ERR("fail to create folder ipa_dma\n");
+ return;
+ }
+
+ dfile_info =
+ debugfs_create_file("info", read_write_mode, dent,
+ 0, &ipa3_ipadma_stats_ops);
+ if (!dfile_info || IS_ERR(dfile_info)) {
+ IPADMA_ERR("fail to create file stats\n");
+ goto fail;
+ }
+ return;
+fail:
+ debugfs_remove_recursive(dent);
+}
+
+static void ipa3_dma_debugfs_destroy(void)
+{
+ debugfs_remove_recursive(dent);
+}
+
+#endif /* !CONFIG_DEBUG_FS */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
new file mode 100644
index 0000000..ec3334c
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -0,0 +1,4287 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/msm_gsi.h>
+#include "ipa_i.h"
+#include "ipa_trace.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+
+#define IPA_LAST_DESC_CNT 0xFFFF
+#define POLLING_INACTIVITY_RX 40
+#define POLLING_MIN_SLEEP_RX 1010
+#define POLLING_MAX_SLEEP_RX 1050
+#define POLLING_INACTIVITY_TX 40
+#define POLLING_MIN_SLEEP_TX 400
+#define POLLING_MAX_SLEEP_TX 500
+/* 8K less 1 nominal MTU (1500 bytes) rounded to units of KB */
+#define IPA_MTU 1500
+#define IPA_GENERIC_AGGR_BYTE_LIMIT 6
+#define IPA_GENERIC_AGGR_TIME_LIMIT 1
+#define IPA_GENERIC_AGGR_PKT_LIMIT 0
+
+#define IPA_GENERIC_RX_BUFF_BASE_SZ 8192
+#define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\
+ (X) + NET_SKB_PAD) +\
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define IPA_GENERIC_RX_BUFF_SZ(X) ((X) -\
+ (IPA_REAL_GENERIC_RX_BUFF_SZ(X) - (X)))
+#define IPA_GENERIC_RX_BUFF_LIMIT (\
+ IPA_REAL_GENERIC_RX_BUFF_SZ(\
+ IPA_GENERIC_RX_BUFF_BASE_SZ) -\
+ IPA_GENERIC_RX_BUFF_BASE_SZ)
+
+/* less 1 nominal MTU (1500 bytes) rounded to units of KB */
+#define IPA_ADJUST_AGGR_BYTE_LIMIT(X) (((X) - IPA_MTU)/1000)
+
+#define IPA_RX_BUFF_CLIENT_HEADROOM 256
+
+#define IPA_WLAN_RX_POOL_SZ 100
+#define IPA_WLAN_RX_POOL_SZ_LOW_WM 5
+#define IPA_WLAN_RX_BUFF_SZ 2048
+#define IPA_WLAN_COMM_RX_POOL_LOW 100
+#define IPA_WLAN_COMM_RX_POOL_HIGH 900
+
+#define IPA_ODU_RX_BUFF_SZ 2048
+#define IPA_ODU_RX_POOL_SZ 64
+#define IPA_SIZE_DL_CSUM_META_TRAILER 8
+
+#define IPA_GSI_EVT_RING_LEN 4096
+#define IPA_GSI_MAX_CH_LOW_WEIGHT 15
+#define IPA_GSI_EVT_RING_INT_MODT 3200 /* 0.1s under 32KHz clock */
+
+#define IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC 10
+/* The below virtual channel cannot be used by any entity */
+#define IPA_GSI_CH_20_WA_VIRT_CHAN 29
+
+#define IPA_DEFAULT_SYS_YELLOW_WM 32
+
+static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
+static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
+static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
+static void ipa3_replenish_rx_work_func(struct work_struct *work);
+static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys);
+static void ipa3_wq_handle_rx(struct work_struct *work);
+static void ipa3_wq_handle_tx(struct work_struct *work);
+static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size);
+static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys,
+ u32 size);
+static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
+ struct ipa3_sys_context *sys);
+static void ipa3_cleanup_rx(struct ipa3_sys_context *sys);
+static void ipa3_wq_rx_avail(struct work_struct *work);
+static void ipa3_alloc_wlan_rx_common_cache(u32 size);
+static void ipa3_cleanup_wlan_rx_common_cache(void);
+static void ipa3_wq_repl_rx(struct work_struct *work);
+static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys,
+ struct ipa_mem_buffer *mem_info);
+static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
+ struct ipa3_ep_context *ep);
+static int ipa_populate_tag_field(struct ipa3_desc *desc,
+ struct ipa3_tx_pkt_wrapper *tx_pkt,
+ struct ipahal_imm_cmd_pyld **tag_pyld_ret);
+static int ipa_handle_rx_core_gsi(struct ipa3_sys_context *sys,
+ bool process_all, bool in_poll_state);
+static int ipa_handle_rx_core_sps(struct ipa3_sys_context *sys,
+ bool process_all, bool in_poll_state);
+static unsigned long tag_to_pointer_wa(uint64_t tag);
+static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt);
+
+static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit);
+
+static void ipa3_wq_write_done_common(struct ipa3_sys_context *sys,
+ struct ipa3_tx_pkt_wrapper *tx_pkt)
+{
+ struct ipa3_tx_pkt_wrapper *next_pkt;
+ int i, cnt;
+
+ if (unlikely(tx_pkt == NULL)) {
+ IPAERR("tx_pkt is NULL\n");
+ return;
+ }
+
+ cnt = tx_pkt->cnt;
+ IPADBG_LOW("cnt: %d\n", cnt);
+ for (i = 0; i < cnt; i++) {
+ spin_lock_bh(&sys->spinlock);
+ if (unlikely(list_empty(&sys->head_desc_list))) {
+ spin_unlock_bh(&sys->spinlock);
+ return;
+ }
+ next_pkt = list_next_entry(tx_pkt, link);
+ list_del(&tx_pkt->link);
+ sys->len--;
+ spin_unlock_bh(&sys->spinlock);
+ if (!tx_pkt->no_unmap_dma) {
+ if (tx_pkt->type != IPA_DATA_DESC_SKB_PAGED) {
+ dma_unmap_single(ipa3_ctx->pdev,
+ tx_pkt->mem.phys_base,
+ tx_pkt->mem.size,
+ DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(ipa3_ctx->pdev,
+ next_pkt->mem.phys_base,
+ next_pkt->mem.size,
+ DMA_TO_DEVICE);
+ }
+ }
+ if (tx_pkt->callback)
+ tx_pkt->callback(tx_pkt->user1, tx_pkt->user2);
+
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_SPS
+ && tx_pkt->cnt > 1
+ && tx_pkt->cnt != IPA_LAST_DESC_CNT) {
+ if (tx_pkt->cnt == IPA_NUM_DESC_PER_SW_TX) {
+ dma_pool_free(ipa3_ctx->dma_pool,
+ tx_pkt->mult.base,
+ tx_pkt->mult.phys_base);
+ } else {
+ dma_unmap_single(ipa3_ctx->pdev,
+ tx_pkt->mult.phys_base,
+ tx_pkt->mult.size,
+ DMA_TO_DEVICE);
+ kfree(tx_pkt->mult.base);
+ }
+ }
+
+ kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+ tx_pkt = next_pkt;
+ }
+}
+
+static void ipa3_wq_write_done_status(int src_pipe,
+ struct ipa3_tx_pkt_wrapper *tx_pkt)
+{
+ struct ipa3_sys_context *sys;
+
+ WARN_ON(src_pipe >= ipa3_ctx->ipa_num_pipes);
+
+ if (!ipa3_ctx->ep[src_pipe].status.status_en)
+ return;
+
+ sys = ipa3_ctx->ep[src_pipe].sys;
+ if (!sys)
+ return;
+
+ ipa3_wq_write_done_common(sys, tx_pkt);
+}
+
+/**
+ * ipa_write_done() - this function will be (eventually) called when a Tx
+ * operation is complete
+ * * @work: work_struct used by the work queue
+ *
+ * Will be called in deferred context.
+ * - invoke the callback supplied by the client who sent this command
+ * - iterate over all packets and validate that
+ * the order for sent packet is the same as expected
+ * - delete all the tx packet descriptors from the system
+ * pipe context (not needed anymore)
+ * - return the tx buffer back to dma_pool
+ */
+static void ipa3_wq_write_done(struct work_struct *work)
+{
+ struct ipa3_tx_pkt_wrapper *tx_pkt;
+ struct ipa3_sys_context *sys;
+
+ tx_pkt = container_of(work, struct ipa3_tx_pkt_wrapper, work);
+ sys = tx_pkt->sys;
+
+ ipa3_wq_write_done_common(sys, tx_pkt);
+}
+
+static int ipa3_handle_tx_core(struct ipa3_sys_context *sys, bool process_all,
+ bool in_poll_state)
+{
+ struct sps_iovec iov;
+ struct ipa3_tx_pkt_wrapper *tx_pkt_expected;
+ int ret;
+ int cnt = 0;
+
+ while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
+ !atomic_read(&sys->curr_polling_state))) {
+ if (cnt && !process_all)
+ break;
+ ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+ if (ret) {
+ IPAERR("sps_get_iovec failed %d\n", ret);
+ break;
+ }
+
+ if (iov.addr == 0)
+ break;
+
+ tx_pkt_expected = list_first_entry(&sys->head_desc_list,
+ struct ipa3_tx_pkt_wrapper,
+ link);
+ ipa3_wq_write_done_common(sys, tx_pkt_expected);
+ cnt++;
+ };
+
+ return cnt;
+}
+
+/**
+ * ipa3_tx_switch_to_intr_mode() - Operate the Tx data path in interrupt mode
+ */
+static void ipa3_tx_switch_to_intr_mode(struct ipa3_sys_context *sys)
+{
+ int ret;
+
+ if (!atomic_read(&sys->curr_polling_state)) {
+ IPAERR("already in intr mode\n");
+ goto fail;
+ }
+
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+ atomic_set(&sys->curr_polling_state, 0);
+ ipa3_dec_release_wakelock();
+ ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
+ GSI_CHAN_MODE_CALLBACK);
+ if (ret != GSI_STATUS_SUCCESS) {
+ IPAERR("Failed to switch to intr mode.\n");
+ goto fail;
+ }
+ } else {
+ ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ goto fail;
+ }
+ sys->event.options = SPS_O_EOT;
+ ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
+ if (ret) {
+ IPAERR("sps_register_event() failed %d\n", ret);
+ goto fail;
+ }
+ sys->ep->connect.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+ ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ goto fail;
+ }
+ atomic_set(&sys->curr_polling_state, 0);
+ ipa3_handle_tx_core(sys, true, false);
+ ipa3_dec_release_wakelock();
+ }
+ return;
+
+fail:
+ queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
+ msecs_to_jiffies(1));
+}
+
+static void ipa3_handle_tx(struct ipa3_sys_context *sys)
+{
+ int inactive_cycles = 0;
+ int cnt;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ do {
+ cnt = ipa3_handle_tx_core(sys, true, true);
+ if (cnt == 0) {
+ inactive_cycles++;
+ usleep_range(POLLING_MIN_SLEEP_TX,
+ POLLING_MAX_SLEEP_TX);
+ } else {
+ inactive_cycles = 0;
+ }
+ } while (inactive_cycles <= POLLING_INACTIVITY_TX);
+
+ ipa3_tx_switch_to_intr_mode(sys);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+static void ipa3_wq_handle_tx(struct work_struct *work)
+{
+ struct ipa3_sys_context *sys;
+
+ sys = container_of(work, struct ipa3_sys_context, work);
+
+ ipa3_handle_tx(sys);
+}
+
+/**
+ * ipa3_send_one() - Send a single descriptor
+ * @sys: system pipe context
+ * @desc: descriptor to send
+ * @in_atomic: whether caller is in atomic context
+ *
+ * - Allocate tx_packet wrapper
+ * - transfer data to the IPA
+ * - after the transfer was done the SPS will
+ * notify the sending user via ipa_sps_irq_comp_tx()
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc,
+ bool in_atomic)
+{
+ struct ipa3_tx_pkt_wrapper *tx_pkt;
+ struct gsi_xfer_elem gsi_xfer;
+ int result;
+ u16 sps_flags = SPS_IOVEC_FLAG_EOT;
+ dma_addr_t dma_address;
+ u16 len;
+ u32 mem_flag = GFP_ATOMIC;
+
+ if (unlikely(!in_atomic))
+ mem_flag = GFP_KERNEL;
+
+ tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache, mem_flag);
+ if (!tx_pkt) {
+ IPAERR("failed to alloc tx wrapper\n");
+ goto fail_mem_alloc;
+ }
+
+ if (!desc->dma_address_valid) {
+ dma_address = dma_map_single(ipa3_ctx->pdev, desc->pyld,
+ desc->len, DMA_TO_DEVICE);
+ } else {
+ dma_address = desc->dma_address;
+ tx_pkt->no_unmap_dma = true;
+ }
+ if (!dma_address) {
+ IPAERR("failed to DMA wrap\n");
+ goto fail_dma_map;
+ }
+
+ INIT_LIST_HEAD(&tx_pkt->link);
+ tx_pkt->type = desc->type;
+ tx_pkt->cnt = 1; /* only 1 desc in this "set" */
+
+ tx_pkt->mem.phys_base = dma_address;
+ tx_pkt->mem.base = desc->pyld;
+ tx_pkt->mem.size = desc->len;
+ tx_pkt->sys = sys;
+ tx_pkt->callback = desc->callback;
+ tx_pkt->user1 = desc->user1;
+ tx_pkt->user2 = desc->user2;
+
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+ memset(&gsi_xfer, 0, sizeof(gsi_xfer));
+ gsi_xfer.addr = dma_address;
+ gsi_xfer.flags |= GSI_XFER_FLAG_EOT;
+ gsi_xfer.xfer_user_data = tx_pkt;
+ if (desc->type == IPA_IMM_CMD_DESC) {
+ gsi_xfer.len = desc->opcode;
+ gsi_xfer.type = GSI_XFER_ELEM_IMME_CMD;
+ } else {
+ gsi_xfer.len = desc->len;
+ gsi_xfer.type = GSI_XFER_ELEM_DATA;
+ }
+ } else {
+ /*
+ * Special treatment for immediate commands, where the
+ * structure of the descriptor is different
+ */
+ if (desc->type == IPA_IMM_CMD_DESC) {
+ sps_flags |= SPS_IOVEC_FLAG_IMME;
+ len = desc->opcode;
+ IPADBG_LOW("sending cmd=%d pyld_len=%d sps_flags=%x\n",
+ desc->opcode, desc->len, sps_flags);
+ IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
+ } else {
+ len = desc->len;
+ }
+ }
+
+ INIT_WORK(&tx_pkt->work, ipa3_wq_write_done);
+
+ spin_lock_bh(&sys->spinlock);
+ list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+ result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
+ &gsi_xfer, true);
+ if (result != GSI_STATUS_SUCCESS) {
+ IPAERR("GSI xfer failed.\n");
+ goto fail_transport_send;
+ }
+ } else {
+ result = sps_transfer_one(sys->ep->ep_hdl, dma_address,
+ len, tx_pkt, sps_flags);
+ if (result) {
+ IPAERR("sps_transfer_one failed rc=%d\n", result);
+ goto fail_transport_send;
+ }
+ }
+
+ spin_unlock_bh(&sys->spinlock);
+
+ return 0;
+
+fail_transport_send:
+ list_del(&tx_pkt->link);
+ spin_unlock_bh(&sys->spinlock);
+ dma_unmap_single(ipa3_ctx->pdev, dma_address, desc->len, DMA_TO_DEVICE);
+fail_dma_map:
+ kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+fail_mem_alloc:
+ return -EFAULT;
+}
+
+/**
+ * ipa3_send() - Send multiple descriptors in one HW transaction
+ * @sys: system pipe context
+ * @num_desc: number of packets
+ * @desc: packets to send (may be immediate command or data)
+ * @in_atomic: whether caller is in atomic context
+ *
+ * This function is used for system-to-bam connection.
+ * - SPS driver expect struct sps_transfer which will contain all the data
+ * for a transaction
+ * - ipa3_tx_pkt_wrapper will be used for each ipa
+ * descriptor (allocated from wrappers cache)
+ * - The wrapper struct will be configured for each ipa-desc payload and will
+ * contain information which will be later used by the user callbacks
+ * - each transfer will be made by calling to sps_transfer()
+ * - Each packet (command or data) that will be sent will also be saved in
+ * ipa3_sys_context for later check that all data was sent
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa3_send(struct ipa3_sys_context *sys,
+ u32 num_desc,
+ struct ipa3_desc *desc,
+ bool in_atomic)
+{
+ struct ipa3_tx_pkt_wrapper *tx_pkt, *tx_pkt_first;
+ struct ipahal_imm_cmd_pyld *tag_pyld_ret = NULL;
+ struct ipa3_tx_pkt_wrapper *next_pkt;
+ struct sps_transfer transfer = { 0 };
+ struct sps_iovec *iovec;
+ struct gsi_xfer_elem *gsi_xfer_elem_array = NULL;
+ dma_addr_t dma_addr;
+ int i = 0;
+ int j;
+ int result;
+ int fail_dma_wrap = 0;
+ uint size;
+ u32 mem_flag = GFP_ATOMIC;
+ int ipa_ep_idx;
+ struct ipa_gsi_ep_config *gsi_ep_cfg;
+
+ if (unlikely(!in_atomic))
+ mem_flag = GFP_KERNEL;
+
+ size = num_desc * sizeof(struct sps_iovec);
+
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+ ipa_ep_idx = ipa3_get_ep_mapping(sys->ep->client);
+ if (unlikely(ipa_ep_idx < 0)) {
+ IPAERR("invalid ep_index of client = %d\n",
+ sys->ep->client);
+ return -EFAULT;
+ }
+ gsi_ep_cfg = ipa3_get_gsi_ep_info(ipa_ep_idx);
+ if (unlikely(!gsi_ep_cfg)) {
+ IPAERR("failed to get gsi EP config of ep_idx=%d\n",
+ ipa_ep_idx);
+ return -EFAULT;
+ }
+ if (unlikely(num_desc > gsi_ep_cfg->ipa_if_tlv)) {
+ IPAERR("Too many chained descriptors need=%d max=%d\n",
+ num_desc, gsi_ep_cfg->ipa_if_tlv);
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ gsi_xfer_elem_array =
+ kzalloc(num_desc * sizeof(struct gsi_xfer_elem),
+ mem_flag);
+ if (!gsi_xfer_elem_array) {
+ IPAERR("Failed to alloc mem for gsi xfer array.\n");
+ return -EFAULT;
+ }
+ } else {
+ if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
+ transfer.iovec = dma_pool_alloc(ipa3_ctx->dma_pool,
+ mem_flag, &dma_addr);
+ if (!transfer.iovec) {
+ IPAERR("fail to alloc dma mem\n");
+ return -EFAULT;
+ }
+ } else {
+ transfer.iovec = kmalloc(size, mem_flag);
+ if (!transfer.iovec) {
+ IPAERR("fail to alloc mem for sps xfr buff ");
+ IPAERR("num_desc = %d size = %d\n",
+ num_desc, size);
+ return -EFAULT;
+ }
+ dma_addr = dma_map_single(ipa3_ctx->pdev,
+ transfer.iovec, size, DMA_TO_DEVICE);
+ if (!dma_addr) {
+ IPAERR("dma_map_single failed\n");
+ kfree(transfer.iovec);
+ return -EFAULT;
+ }
+ }
+ transfer.iovec_phys = dma_addr;
+ transfer.iovec_count = num_desc;
+ }
+
+ spin_lock_bh(&sys->spinlock);
+
+ for (i = 0; i < num_desc; i++) {
+ fail_dma_wrap = 0;
+ tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache,
+ mem_flag);
+ if (!tx_pkt) {
+ IPAERR("failed to alloc tx wrapper\n");
+ goto failure;
+ }
+
+ INIT_LIST_HEAD(&tx_pkt->link);
+
+ if (i == 0) {
+ tx_pkt_first = tx_pkt;
+ tx_pkt->cnt = num_desc;
+ INIT_WORK(&tx_pkt->work, ipa3_wq_write_done);
+ }
+
+ /* populate tag field */
+ if (desc[i].opcode ==
+ ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_IP_PACKET_TAG_STATUS)) {
+ if (ipa_populate_tag_field(&desc[i], tx_pkt,
+ &tag_pyld_ret)) {
+ IPAERR("Failed to populate tag field\n");
+ goto failure;
+ }
+ }
+
+ tx_pkt->type = desc[i].type;
+
+ if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) {
+ tx_pkt->mem.base = desc[i].pyld;
+ tx_pkt->mem.size = desc[i].len;
+
+ if (!desc[i].dma_address_valid) {
+ tx_pkt->mem.phys_base =
+ dma_map_single(ipa3_ctx->pdev,
+ tx_pkt->mem.base,
+ tx_pkt->mem.size,
+ DMA_TO_DEVICE);
+ if (!tx_pkt->mem.phys_base) {
+ IPAERR("failed to do dma map.\n");
+ fail_dma_wrap = 1;
+ goto failure;
+ }
+ } else {
+ tx_pkt->mem.phys_base =
+ desc[i].dma_address;
+ tx_pkt->no_unmap_dma = true;
+ }
+ } else {
+ tx_pkt->mem.base = desc[i].frag;
+ tx_pkt->mem.size = desc[i].len;
+
+ if (!desc[i].dma_address_valid) {
+ tx_pkt->mem.phys_base =
+ skb_frag_dma_map(ipa3_ctx->pdev,
+ desc[i].frag,
+ 0, tx_pkt->mem.size,
+ DMA_TO_DEVICE);
+ if (!tx_pkt->mem.phys_base) {
+ IPAERR("dma map failed\n");
+ fail_dma_wrap = 1;
+ goto failure;
+ }
+ } else {
+ tx_pkt->mem.phys_base =
+ desc[i].dma_address;
+ tx_pkt->no_unmap_dma = true;
+ }
+ }
+ tx_pkt->sys = sys;
+ tx_pkt->callback = desc[i].callback;
+ tx_pkt->user1 = desc[i].user1;
+ tx_pkt->user2 = desc[i].user2;
+
+ list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+ gsi_xfer_elem_array[i].addr = tx_pkt->mem.phys_base;
+
+ /*
+ * Special treatment for immediate commands, where
+ * the structure of the descriptor is different
+ */
+ if (desc[i].type == IPA_IMM_CMD_DESC) {
+ gsi_xfer_elem_array[i].len = desc[i].opcode;
+ gsi_xfer_elem_array[i].type =
+ GSI_XFER_ELEM_IMME_CMD;
+ } else {
+ gsi_xfer_elem_array[i].len = desc[i].len;
+ gsi_xfer_elem_array[i].type =
+ GSI_XFER_ELEM_DATA;
+ }
+
+ if (i == (num_desc - 1)) {
+ gsi_xfer_elem_array[i].flags |=
+ GSI_XFER_FLAG_EOT;
+ gsi_xfer_elem_array[i].xfer_user_data =
+ tx_pkt_first;
+ /* "mark" the last desc */
+ tx_pkt->cnt = IPA_LAST_DESC_CNT;
+ } else
+ gsi_xfer_elem_array[i].flags |=
+ GSI_XFER_FLAG_CHAIN;
+ } else {
+ /*
+ * first desc of set is "special" as it
+ * holds the count and other info
+ */
+ if (i == 0) {
+ transfer.user = tx_pkt;
+ tx_pkt->mult.phys_base = dma_addr;
+ tx_pkt->mult.base = transfer.iovec;
+ tx_pkt->mult.size = size;
+ }
+
+ iovec = &transfer.iovec[i];
+ iovec->flags = 0;
+ /*
+ * Point the iovec to the buffer and
+ */
+ iovec->addr = tx_pkt->mem.phys_base;
+ /*
+ * Special treatment for immediate commands, where
+ * the structure of the descriptor is different
+ */
+ if (desc[i].type == IPA_IMM_CMD_DESC) {
+ iovec->size = desc[i].opcode;
+ iovec->flags |= SPS_IOVEC_FLAG_IMME;
+ IPA_DUMP_BUFF(desc[i].pyld,
+ tx_pkt->mem.phys_base, desc[i].len);
+ } else {
+ iovec->size = desc[i].len;
+ }
+
+ if (i == (num_desc - 1)) {
+ iovec->flags |= SPS_IOVEC_FLAG_EOT;
+ /* "mark" the last desc */
+ tx_pkt->cnt = IPA_LAST_DESC_CNT;
+ }
+ }
+ }
+
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+ result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, num_desc,
+ gsi_xfer_elem_array, true);
+ if (result != GSI_STATUS_SUCCESS) {
+ IPAERR("GSI xfer failed.\n");
+ goto failure;
+ }
+ kfree(gsi_xfer_elem_array);
+ } else {
+ result = sps_transfer(sys->ep->ep_hdl, &transfer);
+ if (result) {
+ IPAERR("sps_transfer failed rc=%d\n", result);
+ goto failure;
+ }
+ }
+
+ spin_unlock_bh(&sys->spinlock);
+ return 0;
+
+failure:
+ ipahal_destroy_imm_cmd(tag_pyld_ret);
+ tx_pkt = tx_pkt_first;
+ for (j = 0; j < i; j++) {
+ next_pkt = list_next_entry(tx_pkt, link);
+ list_del(&tx_pkt->link);
+ if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
+ dma_unmap_single(ipa3_ctx->pdev, tx_pkt->mem.phys_base,
+ tx_pkt->mem.size,
+ DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(ipa3_ctx->pdev, tx_pkt->mem.phys_base,
+ tx_pkt->mem.size,
+ DMA_TO_DEVICE);
+ }
+ kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+ tx_pkt = next_pkt;
+ }
+ if (j < num_desc)
+ /* last desc failed */
+ if (fail_dma_wrap)
+ kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+ kfree(gsi_xfer_elem_array);
+ } else {
+ if (transfer.iovec_phys) {
+ if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
+ dma_pool_free(ipa3_ctx->dma_pool,
+ transfer.iovec, transfer.iovec_phys);
+ } else {
+ dma_unmap_single(ipa3_ctx->pdev,
+ transfer.iovec_phys, size,
+ DMA_TO_DEVICE);
+ kfree(transfer.iovec);
+ }
+ }
+ }
+ spin_unlock_bh(&sys->spinlock);
+ return -EFAULT;
+}
+
+/**
+ * ipa3_transport_irq_cmd_ack - callback function which will be called by
+ * SPS/GSI driver after an immediate command is complete.
+ * @user1: pointer to the descriptor of the transfer
+ * @user2:
+ *
+ * Complete the immediate commands completion object, this will release the
+ * thread which waits on this completion object (ipa3_send_cmd())
+ */
+static void ipa3_transport_irq_cmd_ack(void *user1, int user2)
+{
+ struct ipa3_desc *desc = (struct ipa3_desc *)user1;
+
+ if (!desc) {
+ IPAERR("desc is NULL\n");
+ WARN_ON(1);
+ return;
+ }
+ IPADBG_LOW("got ack for cmd=%d\n", desc->opcode);
+ complete(&desc->xfer_done);
+}
+
+/**
+ * ipa3_send_cmd - send immediate commands
+ * @num_desc: number of descriptors within the desc struct
+ * @descr: descriptor structure
+ *
+ * Function will block till command gets ACK from IPA HW, caller needs
+ * to free any resources it allocated after function returns
+ * The callback in ipa3_desc should not be set by the caller
+ * for this function.
+ */
+int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr)
+{
+ struct ipa3_desc *desc;
+ int i, result = 0;
+ struct ipa3_sys_context *sys;
+ int ep_idx;
+
+ for (i = 0; i < num_desc; i++)
+ IPADBG("sending imm cmd %d\n", descr[i].opcode);
+
+ ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
+ if (-1 == ep_idx) {
+ IPAERR("Client %u is not mapped\n",
+ IPA_CLIENT_APPS_CMD_PROD);
+ return -EFAULT;
+ }
+ sys = ipa3_ctx->ep[ep_idx].sys;
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ if (num_desc == 1) {
+ init_completion(&descr->xfer_done);
+
+ if (descr->callback || descr->user1)
+ WARN_ON(1);
+
+ descr->callback = ipa3_transport_irq_cmd_ack;
+ descr->user1 = descr;
+ if (ipa3_send_one(sys, descr, true)) {
+ IPAERR("fail to send immediate command\n");
+ result = -EFAULT;
+ goto bail;
+ }
+ wait_for_completion(&descr->xfer_done);
+ } else {
+ desc = &descr[num_desc - 1];
+ init_completion(&desc->xfer_done);
+
+ if (desc->callback || desc->user1)
+ WARN_ON(1);
+
+ desc->callback = ipa3_transport_irq_cmd_ack;
+ desc->user1 = desc;
+ if (ipa3_send(sys, num_desc, descr, true)) {
+ IPAERR("fail to send multiple immediate command set\n");
+ result = -EFAULT;
+ goto bail;
+ }
+ wait_for_completion(&desc->xfer_done);
+ }
+
+bail:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return result;
+}
+
+/**
+ * ipa3_sps_irq_tx_notify() - Callback function which will be called by
+ * the SPS driver to start a Tx poll operation.
+ * Called in an interrupt context.
+ * @notify: SPS driver supplied notification struct
+ *
+ * This function defer the work for this event to the tx workqueue.
+ */
+static void ipa3_sps_irq_tx_notify(struct sps_event_notify *notify)
+{
+ struct ipa3_sys_context *sys = (struct ipa3_sys_context *)notify->user;
+ int ret;
+
+ IPADBG_LOW("event %d notified\n", notify->event_id);
+
+ switch (notify->event_id) {
+ case SPS_EVENT_EOT:
+ if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
+ atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+ if (!atomic_read(&sys->curr_polling_state)) {
+ ret = sps_get_config(sys->ep->ep_hdl,
+ &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ break;
+ }
+ sys->ep->connect.options = SPS_O_AUTO_ENABLE |
+ SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ ret = sps_set_config(sys->ep->ep_hdl,
+ &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ break;
+ }
+ ipa3_inc_acquire_wakelock();
+ atomic_set(&sys->curr_polling_state, 1);
+ queue_work(sys->wq, &sys->work);
+ }
+ break;
+ default:
+ IPAERR("received unexpected event id %d\n", notify->event_id);
+ }
+}
+
+/**
+ * ipa3_sps_irq_tx_no_aggr_notify() - Callback function which will be called by
+ * the SPS driver after a Tx operation is complete.
+ * Called in an interrupt context.
+ * @notify: SPS driver supplied notification struct
+ *
+ * This function defer the work for this event to the tx workqueue.
+ * This event will be later handled by ipa_write_done.
+ */
+static void ipa3_sps_irq_tx_no_aggr_notify(struct sps_event_notify *notify)
+{
+ struct ipa3_tx_pkt_wrapper *tx_pkt;
+
+ IPADBG_LOW("event %d notified\n", notify->event_id);
+
+ switch (notify->event_id) {
+ case SPS_EVENT_EOT:
+ tx_pkt = notify->data.transfer.user;
+ if (IPA_CLIENT_IS_APPS_CONS(tx_pkt->sys->ep->client))
+ atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+ queue_work(tx_pkt->sys->wq, &tx_pkt->work);
+ break;
+ default:
+ IPAERR("received unexpected event id %d\n", notify->event_id);
+ }
+}
+
+/**
+ * ipa3_handle_rx_core() - The core functionality of packet reception. This
+ * function is read from multiple code paths.
+ *
+ * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN
+ * endpoint. The function runs as long as there are packets in the pipe.
+ * For each packet:
+ * - Disconnect the packet from the system pipe linked list
+ * - Unmap the packets skb, make it non DMAable
+ * - Free the packet from the cache
+ * - Prepare a proper skb
+ * - Call the endpoints notify function, passing the skb in the parameters
+ * - Replenish the rx cache
+ */
+static int ipa3_handle_rx_core(struct ipa3_sys_context *sys, bool process_all,
+ bool in_poll_state)
+{
+ int cnt;
+
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
+ cnt = ipa_handle_rx_core_gsi(sys, process_all, in_poll_state);
+ else
+ cnt = ipa_handle_rx_core_sps(sys, process_all, in_poll_state);
+
+ return cnt;
+}
+
+/**
+ * ipa3_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
+ */
+static void ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys)
+{
+ int ret;
+
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+ if (!atomic_read(&sys->curr_polling_state)) {
+ IPAERR("already in intr mode\n");
+ goto fail;
+ }
+ atomic_set(&sys->curr_polling_state, 0);
+ ipa3_dec_release_wakelock();
+ ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
+ GSI_CHAN_MODE_CALLBACK);
+ if (ret != GSI_STATUS_SUCCESS) {
+ IPAERR("Failed to switch to intr mode.\n");
+ goto fail;
+ }
+ } else {
+ ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ goto fail;
+ }
+ if (!atomic_read(&sys->curr_polling_state) &&
+ ((sys->ep->connect.options & SPS_O_EOT) == SPS_O_EOT)) {
+ IPADBG("already in intr mode\n");
+ return;
+ }
+ if (!atomic_read(&sys->curr_polling_state)) {
+ IPAERR("already in intr mode\n");
+ goto fail;
+ }
+ sys->event.options = SPS_O_EOT;
+ ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
+ if (ret) {
+ IPAERR("sps_register_event() failed %d\n", ret);
+ goto fail;
+ }
+ sys->ep->connect.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+ ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ goto fail;
+ }
+ atomic_set(&sys->curr_polling_state, 0);
+ ipa3_handle_rx_core(sys, true, false);
+ ipa3_dec_release_wakelock();
+ }
+ return;
+
+fail:
+ queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
+ msecs_to_jiffies(1));
+}
+
+/**
+ * ipa_rx_notify() - Callback function which is called by the SPS driver when a
+ * a packet is received
+ * @notify: SPS driver supplied notification information
+ *
+ * Called in an interrupt context, therefore the majority of the work is
+ * deffered using a work queue.
+ *
+ * After receiving a packet, the driver goes to polling mode and keeps pulling
+ * packets until the rx buffer is empty, then it goes back to interrupt mode.
+ * This comes to prevent the CPU from handling too many interrupts when the
+ * throughput is high.
+ */
+static void ipa3_sps_irq_rx_notify(struct sps_event_notify *notify)
+{
+ struct ipa3_sys_context *sys = (struct ipa3_sys_context *)notify->user;
+ int ret;
+
+ IPADBG_LOW("event %d notified\n", notify->event_id);
+
+ switch (notify->event_id) {
+ case SPS_EVENT_EOT:
+ if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
+ atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+ if (!atomic_read(&sys->curr_polling_state)) {
+ sys->ep->eot_in_poll_err++;
+ break;
+ }
+
+ ret = sps_get_config(sys->ep->ep_hdl,
+ &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ break;
+ }
+ sys->ep->connect.options = SPS_O_AUTO_ENABLE |
+ SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ ret = sps_set_config(sys->ep->ep_hdl,
+ &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ break;
+ }
+ ipa3_inc_acquire_wakelock();
+ atomic_set(&sys->curr_polling_state, 1);
+ trace_intr_to_poll3(sys->ep->client);
+ queue_work(sys->wq, &sys->work);
+ break;
+ default:
+ IPAERR("received unexpected event id %d\n", notify->event_id);
+ }
+}
+
+/**
+ * switch_to_intr_tx_work_func() - Wrapper function to move from polling
+ * to interrupt mode
+ * @work: work struct
+ */
+void ipa3_switch_to_intr_tx_work_func(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct ipa3_sys_context *sys;
+
+ dwork = container_of(work, struct delayed_work, work);
+ sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work);
+ ipa3_handle_tx(sys);
+}
+
+/**
+ * ipa3_handle_rx() - handle packet reception. This function is executed in the
+ * context of a work queue.
+ * @work: work struct needed by the work queue
+ *
+ * ipa3_handle_rx_core() is run in polling mode. After all packets has been
+ * received, the driver switches back to interrupt mode.
+ */
+static void ipa3_handle_rx(struct ipa3_sys_context *sys)
+{
+ int inactive_cycles = 0;
+ int cnt;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ do {
+ cnt = ipa3_handle_rx_core(sys, true, true);
+ if (cnt == 0) {
+ inactive_cycles++;
+ trace_idle_sleep_enter3(sys->ep->client);
+ usleep_range(POLLING_MIN_SLEEP_RX,
+ POLLING_MAX_SLEEP_RX);
+ trace_idle_sleep_exit3(sys->ep->client);
+ } else {
+ inactive_cycles = 0;
+ }
+ } while (inactive_cycles <= POLLING_INACTIVITY_RX);
+
+ trace_poll_to_intr3(sys->ep->client);
+ ipa3_rx_switch_to_intr_mode(sys);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+static void ipa3_switch_to_intr_rx_work_func(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct ipa3_sys_context *sys;
+
+ dwork = container_of(work, struct delayed_work, work);
+ sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work);
+
+ if (sys->ep->napi_enabled) {
+ if (sys->ep->switch_to_intr) {
+ ipa3_rx_switch_to_intr_mode(sys);
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI");
+ sys->ep->switch_to_intr = false;
+ sys->ep->inactive_cycles = 0;
+ } else
+ sys->ep->client_notify(sys->ep->priv,
+ IPA_CLIENT_START_POLL, 0);
+ } else
+ ipa3_handle_rx(sys);
+}
+
+/**
+ * ipa3_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
+ * IPA EP configuration
+ * @sys_in: [in] input needed to setup BAM pipe and configure EP
+ * @clnt_hdl: [out] client handle
+ *
+ * - configure the end-point registers with the supplied
+ * parameters from the user.
+ * - call SPS APIs to create a system-to-bam connection with IPA.
+ * - allocate descriptor FIFO
+ * - register callback function(ipa3_sps_irq_rx_notify or
+ * ipa3_sps_irq_tx_notify - depends on client type) in case the driver is
+ * not configured to pulling mode
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
+{
+ struct ipa3_ep_context *ep;
+ int ipa_ep_idx;
+ int result = -EINVAL;
+ dma_addr_t dma_addr;
+ char buff[IPA_RESOURCE_NAME_MAX];
+ struct iommu_domain *smmu_domain;
+
+ if (sys_in == NULL || clnt_hdl == NULL) {
+ IPAERR("NULL args\n");
+ goto fail_gen;
+ }
+
+ if (sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
+ IPAERR("bad parm client:%d fifo_sz:%d\n",
+ sys_in->client, sys_in->desc_fifo_sz);
+ goto fail_gen;
+ }
+
+ ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ goto fail_gen;
+ }
+
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+ IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
+
+ if (ep->valid == 1) {
+ if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
+ IPAERR("EP already allocated.\n");
+ goto fail_and_disable_clocks;
+ } else {
+ if (ipa3_cfg_ep_hdr(ipa_ep_idx,
+ &sys_in->ipa_ep_cfg.hdr)) {
+ IPAERR("fail to configure hdr prop of EP.\n");
+ result = -EFAULT;
+ goto fail_and_disable_clocks;
+ }
+ if (ipa3_cfg_ep_cfg(ipa_ep_idx,
+ &sys_in->ipa_ep_cfg.cfg)) {
+ IPAERR("fail to configure cfg prop of EP.\n");
+ result = -EFAULT;
+ goto fail_and_disable_clocks;
+ }
+ IPADBG("client %d (ep: %d) overlay ok sys=%p\n",
+ sys_in->client, ipa_ep_idx, ep->sys);
+ ep->client_notify = sys_in->notify;
+ ep->priv = sys_in->priv;
+ *clnt_hdl = ipa_ep_idx;
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+ return 0;
+ }
+ }
+
+ memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
+
+ if (!ep->sys) {
+ ep->sys = kzalloc(sizeof(struct ipa3_sys_context), GFP_KERNEL);
+ if (!ep->sys) {
+ IPAERR("failed to sys ctx for client %d\n",
+ sys_in->client);
+ result = -ENOMEM;
+ goto fail_and_disable_clocks;
+ }
+
+ ep->sys->ep = ep;
+ snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipawq%d",
+ sys_in->client);
+ ep->sys->wq = alloc_workqueue(buff,
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+ if (!ep->sys->wq) {
+ IPAERR("failed to create wq for client %d\n",
+ sys_in->client);
+ result = -EFAULT;
+ goto fail_wq;
+ }
+
+ snprintf(buff, IPA_RESOURCE_NAME_MAX, "iparepwq%d",
+ sys_in->client);
+ ep->sys->repl_wq = alloc_workqueue(buff,
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+ if (!ep->sys->repl_wq) {
+ IPAERR("failed to create rep wq for client %d\n",
+ sys_in->client);
+ result = -EFAULT;
+ goto fail_wq2;
+ }
+
+ INIT_LIST_HEAD(&ep->sys->head_desc_list);
+ INIT_LIST_HEAD(&ep->sys->rcycl_list);
+ spin_lock_init(&ep->sys->spinlock);
+ } else {
+ memset(ep->sys, 0, offsetof(struct ipa3_sys_context, ep));
+ }
+
+ ep->skip_ep_cfg = sys_in->skip_ep_cfg;
+ if (ipa3_assign_policy(sys_in, ep->sys)) {
+ IPAERR("failed to sys ctx for client %d\n", sys_in->client);
+ result = -ENOMEM;
+ goto fail_gen2;
+ }
+
+ ep->valid = 1;
+ ep->client = sys_in->client;
+ ep->client_notify = sys_in->notify;
+ ep->napi_enabled = sys_in->napi_enabled;
+ ep->priv = sys_in->priv;
+ ep->keep_ipa_awake = sys_in->keep_ipa_awake;
+ atomic_set(&ep->avail_fifo_desc,
+ ((sys_in->desc_fifo_sz/sizeof(struct sps_iovec))-1));
+
+ if (ep->status.status_en && IPA_CLIENT_IS_CONS(ep->client) &&
+ ep->sys->status_stat == NULL) {
+ ep->sys->status_stat =
+ kzalloc(sizeof(struct ipa3_status_stats), GFP_KERNEL);
+ if (!ep->sys->status_stat) {
+ IPAERR("no memory\n");
+ goto fail_gen2;
+ }
+ }
+
+ result = ipa3_enable_data_path(ipa_ep_idx);
+ if (result) {
+ IPAERR("enable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx);
+ goto fail_gen2;
+ }
+
+ if (!ep->skip_ep_cfg) {
+ if (ipa3_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
+ IPAERR("fail to configure EP.\n");
+ goto fail_gen2;
+ }
+ if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
+ IPAERR("fail to configure status of EP.\n");
+ goto fail_gen2;
+ }
+ IPADBG("ep configuration successful\n");
+ } else {
+ IPADBG("skipping ep configuration\n");
+ }
+
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+ result = ipa_gsi_setup_channel(sys_in, ep);
+ if (result) {
+ IPAERR("Failed to setup GSI channel\n");
+ goto fail_gen2;
+ }
+ } else {
+ /* Default Config */
+ ep->ep_hdl = sps_alloc_endpoint();
+ if (ep->ep_hdl == NULL) {
+ IPAERR("SPS EP allocation failed.\n");
+ goto fail_gen2;
+ }
+
+ result = sps_get_config(ep->ep_hdl, &ep->connect);
+ if (result) {
+ IPAERR("fail to get config.\n");
+ goto fail_sps_cfg;
+ }
+
+ /* Specific Config */
+ if (IPA_CLIENT_IS_CONS(sys_in->client)) {
+ ep->connect.mode = SPS_MODE_SRC;
+ ep->connect.destination = SPS_DEV_HANDLE_MEM;
+ ep->connect.source = ipa3_ctx->bam_handle;
+ ep->connect.dest_pipe_index = ipa3_ctx->a5_pipe_index++;
+ ep->connect.src_pipe_index = ipa_ep_idx;
+ } else {
+ ep->connect.mode = SPS_MODE_DEST;
+ ep->connect.source = SPS_DEV_HANDLE_MEM;
+ ep->connect.destination = ipa3_ctx->bam_handle;
+ ep->connect.src_pipe_index = ipa3_ctx->a5_pipe_index++;
+ ep->connect.dest_pipe_index = ipa_ep_idx;
+ }
+
+ IPADBG("client:%d ep:%d",
+ sys_in->client, ipa_ep_idx);
+
+ IPADBG("dest_pipe_index:%d src_pipe_index:%d\n",
+ ep->connect.dest_pipe_index,
+ ep->connect.src_pipe_index);
+
+ ep->connect.options = ep->sys->sps_option;
+ ep->connect.desc.size = sys_in->desc_fifo_sz;
+ ep->connect.desc.base = dma_alloc_coherent(ipa3_ctx->pdev,
+ ep->connect.desc.size, &dma_addr, 0);
+ if (ipa3_ctx->smmu_s1_bypass) {
+ ep->connect.desc.phys_base = dma_addr;
+ } else {
+ ep->connect.desc.iova = dma_addr;
+ smmu_domain = ipa3_get_smmu_domain();
+ if (smmu_domain != NULL) {
+ ep->connect.desc.phys_base =
+ iommu_iova_to_phys(smmu_domain,
+ dma_addr);
+ }
+ }
+ if (ep->connect.desc.base == NULL) {
+ IPAERR("fail to get DMA desc memory.\n");
+ goto fail_sps_cfg;
+ }
+
+ ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
+
+ result = ipa3_sps_connect_safe(ep->ep_hdl,
+ &ep->connect, sys_in->client);
+ if (result) {
+ IPAERR("sps_connect fails.\n");
+ goto fail_sps_connect;
+ }
+
+ ep->sys->event.options = SPS_O_EOT;
+ ep->sys->event.mode = SPS_TRIGGER_CALLBACK;
+ ep->sys->event.xfer_done = NULL;
+ ep->sys->event.user = ep->sys;
+ ep->sys->event.callback = ep->sys->sps_callback;
+ result = sps_register_event(ep->ep_hdl, &ep->sys->event);
+ if (result < 0) {
+ IPAERR("register event error %d\n", result);
+ goto fail_register_event;
+ }
+ } /* end of sps config */
+
+ *clnt_hdl = ipa_ep_idx;
+
+ if (ep->sys->repl_hdlr == ipa3_fast_replenish_rx_cache) {
+ ep->sys->repl.capacity = ep->sys->rx_pool_sz + 1;
+ ep->sys->repl.cache = kzalloc(ep->sys->repl.capacity *
+ sizeof(void *), GFP_KERNEL);
+ if (!ep->sys->repl.cache) {
+ IPAERR("ep=%d fail to alloc repl cache\n", ipa_ep_idx);
+ ep->sys->repl_hdlr = ipa3_replenish_rx_cache;
+ ep->sys->repl.capacity = 0;
+ } else {
+ atomic_set(&ep->sys->repl.head_idx, 0);
+ atomic_set(&ep->sys->repl.tail_idx, 0);
+ ipa3_wq_repl_rx(&ep->sys->repl_work);
+ }
+ }
+
+ if (IPA_CLIENT_IS_CONS(sys_in->client))
+ ipa3_replenish_rx_cache(ep->sys);
+
+ if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) {
+ ipa3_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW);
+ atomic_inc(&ipa3_ctx->wc_memb.active_clnt_cnt);
+ }
+
+ ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+ if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) {
+ if (ipa3_ctx->modem_cfg_emb_pipe_flt &&
+ sys_in->client == IPA_CLIENT_APPS_LAN_WAN_PROD)
+ IPADBG("modem cfg emb pipe flt\n");
+ else
+ ipa3_install_dflt_flt_rules(ipa_ep_idx);
+ }
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+ IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
+ ipa_ep_idx, ep->sys);
+
+ return 0;
+
+fail_register_event:
+ sps_disconnect(ep->ep_hdl);
+fail_sps_connect:
+ dma_free_coherent(ipa3_ctx->pdev, ep->connect.desc.size,
+ ep->connect.desc.base,
+ ep->connect.desc.phys_base);
+fail_sps_cfg:
+ sps_free_endpoint(ep->ep_hdl);
+fail_gen2:
+ destroy_workqueue(ep->sys->repl_wq);
+fail_wq2:
+ destroy_workqueue(ep->sys->wq);
+fail_wq:
+ kfree(ep->sys);
+ memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+fail_and_disable_clocks:
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+fail_gen:
+ return result;
+}
+
+/**
+ * ipa3_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP
+ * @clnt_hdl: [in] the handle obtained from ipa3_setup_sys_pipe
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_teardown_sys_pipe(u32 clnt_hdl)
+{
+ struct ipa3_ep_context *ep;
+ int empty;
+ int result;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ ipa3_disable_data_path(clnt_hdl);
+ if (ep->napi_enabled) {
+ ep->switch_to_intr = true;
+ do {
+ usleep_range(95, 105);
+ } while (atomic_read(&ep->sys->curr_polling_state));
+ }
+
+ if (IPA_CLIENT_IS_PROD(ep->client)) {
+ do {
+ spin_lock_bh(&ep->sys->spinlock);
+ empty = list_empty(&ep->sys->head_desc_list);
+ spin_unlock_bh(&ep->sys->spinlock);
+ if (!empty)
+ usleep_range(95, 105);
+ else
+ break;
+ } while (1);
+ }
+
+ if (IPA_CLIENT_IS_CONS(ep->client))
+ cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
+ flush_workqueue(ep->sys->wq);
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+ result = ipa3_stop_gsi_channel(clnt_hdl);
+ if (result != GSI_STATUS_SUCCESS) {
+ IPAERR("GSI stop chan err: %d.\n", result);
+ BUG();
+ return result;
+ }
+ result = gsi_reset_channel(ep->gsi_chan_hdl);
+ if (result != GSI_STATUS_SUCCESS) {
+ IPAERR("Failed to reset chan: %d.\n", result);
+ BUG();
+ return result;
+ }
+ dma_free_coherent(ipa3_ctx->pdev,
+ ep->gsi_mem_info.chan_ring_len,
+ ep->gsi_mem_info.chan_ring_base_vaddr,
+ ep->gsi_mem_info.chan_ring_base_addr);
+ result = gsi_dealloc_channel(ep->gsi_chan_hdl);
+ if (result != GSI_STATUS_SUCCESS) {
+ IPAERR("Failed to dealloc chan: %d.\n", result);
+ BUG();
+ return result;
+ }
+
+ /* free event ring only when it is present */
+ if (ep->gsi_evt_ring_hdl != ~0) {
+ result = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
+ if (result != GSI_STATUS_SUCCESS) {
+ IPAERR("Failed to reset evt ring: %d.\n",
+ result);
+ BUG();
+ return result;
+ }
+ dma_free_coherent(ipa3_ctx->pdev,
+ ep->gsi_mem_info.evt_ring_len,
+ ep->gsi_mem_info.evt_ring_base_vaddr,
+ ep->gsi_mem_info.evt_ring_base_addr);
+ result = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+ if (result != GSI_STATUS_SUCCESS) {
+ IPAERR("Failed to dealloc evt ring: %d.\n",
+ result);
+ BUG();
+ return result;
+ }
+ }
+ } else {
+ sps_disconnect(ep->ep_hdl);
+ dma_free_coherent(ipa3_ctx->pdev, ep->connect.desc.size,
+ ep->connect.desc.base,
+ ep->connect.desc.phys_base);
+ sps_free_endpoint(ep->ep_hdl);
+ }
+ if (ep->sys->repl_wq)
+ flush_workqueue(ep->sys->repl_wq);
+ if (IPA_CLIENT_IS_CONS(ep->client))
+ ipa3_cleanup_rx(ep->sys);
+
+ if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) {
+ if (ipa3_ctx->modem_cfg_emb_pipe_flt &&
+ ep->client == IPA_CLIENT_APPS_LAN_WAN_PROD)
+ IPADBG("modem cfg emb pipe flt\n");
+ else
+ ipa3_delete_dflt_flt_rules(clnt_hdl);
+ }
+
+ if (IPA_CLIENT_IS_WLAN_CONS(ep->client))
+ atomic_dec(&ipa3_ctx->wc_memb.active_clnt_cnt);
+
+ memset(&ep->wstats, 0, sizeof(struct ipa3_wlan_stats));
+
+ if (!atomic_read(&ipa3_ctx->wc_memb.active_clnt_cnt))
+ ipa3_cleanup_wlan_rx_common_cache();
+
+ ep->valid = 0;
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+ return 0;
+}
+
+/**
+ * ipa3_tx_comp_usr_notify_release() - Callback function which will call the
+ * user supplied callback function to release the skb, or release it on
+ * its own if no callback function was supplied.
+ * @user1
+ * @user2
+ *
+ * This notified callback is for the destination client.
+ * This function is supplied in ipa3_connect.
+ */
+static void ipa3_tx_comp_usr_notify_release(void *user1, int user2)
+{
+ struct sk_buff *skb = (struct sk_buff *)user1;
+ int ep_idx = user2;
+
+ IPADBG_LOW("skb=%p ep=%d\n", skb, ep_idx);
+
+ IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_pkts_compl);
+
+ if (ipa3_ctx->ep[ep_idx].client_notify)
+ ipa3_ctx->ep[ep_idx].client_notify(ipa3_ctx->ep[ep_idx].priv,
+ IPA_WRITE_DONE, (unsigned long)skb);
+ else
+ dev_kfree_skb_any(skb);
+}
+
+static void ipa3_tx_cmd_comp(void *user1, int user2)
+{
+ ipahal_destroy_imm_cmd(user1);
+}
+
+/**
+ * ipa3_tx_dp() - Data-path tx handler
+ * @dst: [in] which IPA destination to route tx packets to
+ * @skb: [in] the packet to send
+ * @metadata: [in] TX packet meta-data
+ *
+ * Data-path tx handler, this is used for both SW data-path which by-passes most
+ * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
+ * dst is a "valid" CONS type, then SW data-path is used. If dst is the
+ * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
+ * is an error. For errors, client needs to free the skb as needed. For success,
+ * IPA driver will later invoke client callback if one was supplied. That
+ * callback should free the skb. If no callback supplied, IPA driver will free
+ * the skb internally
+ *
+ * The function will use two descriptors for this send command
+ * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
+ * the first descriptor will be used to inform the IPA hardware that
+ * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
+ * Once this send was done from SPS point-of-view the IPA driver will
+ * get notified by the supplied callback - ipa_sps_irq_tx_comp()
+ *
+ * ipa_sps_irq_tx_comp will call to the user supplied
+ * callback (from ipa3_connect)
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+ struct ipa_tx_meta *meta)
+{
+ struct ipa3_desc *desc;
+ struct ipa3_desc _desc[3];
+ int dst_ep_idx;
+ struct ipahal_imm_cmd_ip_packet_init cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+ struct ipa3_sys_context *sys;
+ int src_ep_idx;
+ int num_frags, f;
+
+ if (unlikely(!ipa3_ctx)) {
+ IPAERR("IPA3 driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (skb->len == 0) {
+ IPAERR("packet size is 0\n");
+ return -EINVAL;
+ }
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ if (num_frags) {
+ /* 1 desc for tag to resolve status out-of-order issue;
+ * 1 desc is needed for the linear portion of skb;
+ * 1 desc may be needed for the PACKET_INIT;
+ * 1 desc for each frag
+ */
+ desc = kzalloc(sizeof(*desc) * (num_frags + 3), GFP_ATOMIC);
+ if (!desc) {
+ IPAERR("failed to alloc desc array\n");
+ goto fail_mem;
+ }
+ } else {
+ memset(_desc, 0, 3 * sizeof(struct ipa3_desc));
+ desc = &_desc[0];
+ }
+
+ /*
+ * USB_CONS: PKT_INIT ep_idx = dst pipe
+ * Q6_CONS: PKT_INIT ep_idx = sender pipe
+ * A5_LAN_WAN_PROD: HW path ep_idx = sender pipe
+ *
+ * LAN TX: all PKT_INIT
+ * WAN TX: PKT_INIT (cmd) + HW (data)
+ *
+ */
+ if (IPA_CLIENT_IS_CONS(dst)) {
+ src_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
+ if (-1 == src_ep_idx) {
+ IPAERR("Client %u is not mapped\n",
+ IPA_CLIENT_APPS_LAN_WAN_PROD);
+ goto fail_gen;
+ }
+ dst_ep_idx = ipa3_get_ep_mapping(dst);
+ } else {
+ src_ep_idx = ipa3_get_ep_mapping(dst);
+ if (-1 == src_ep_idx) {
+ IPAERR("Client %u is not mapped\n", dst);
+ goto fail_gen;
+ }
+ if (meta && meta->pkt_init_dst_ep_valid)
+ dst_ep_idx = meta->pkt_init_dst_ep;
+ else
+ dst_ep_idx = -1;
+ }
+
+ sys = ipa3_ctx->ep[src_ep_idx].sys;
+
+ if (!sys->ep->valid) {
+ IPAERR("pipe not valid\n");
+ goto fail_gen;
+ }
+
+ if (dst_ep_idx != -1) {
+ /* SW data path */
+ cmd.destination_pipe_index = dst_ep_idx;
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_PACKET_INIT, &cmd, true);
+ if (unlikely(!cmd_pyld)) {
+ IPAERR("failed to construct ip_packet_init imm cmd\n");
+ goto fail_gen;
+ }
+
+ /* the tag field will be populated in ipa3_send() function */
+ desc[0].opcode = ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
+ desc[0].type = IPA_IMM_CMD_DESC;
+ desc[0].callback = ipa3_tag_destroy_imm;
+ desc[1].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_INIT);
+ desc[1].pyld = cmd_pyld->data;
+ desc[1].len = cmd_pyld->len;
+ desc[1].type = IPA_IMM_CMD_DESC;
+ desc[1].callback = ipa3_tx_cmd_comp;
+ desc[1].user1 = cmd_pyld;
+ desc[2].pyld = skb->data;
+ desc[2].len = skb_headlen(skb);
+ desc[2].type = IPA_DATA_DESC_SKB;
+ desc[2].callback = ipa3_tx_comp_usr_notify_release;
+ desc[2].user1 = skb;
+ desc[2].user2 = (meta && meta->pkt_init_dst_ep_valid &&
+ meta->pkt_init_dst_ep_remote) ?
+ src_ep_idx :
+ dst_ep_idx;
+ if (meta && meta->dma_address_valid) {
+ desc[2].dma_address_valid = true;
+ desc[2].dma_address = meta->dma_address;
+ }
+
+ for (f = 0; f < num_frags; f++) {
+ desc[3+f].frag = &skb_shinfo(skb)->frags[f];
+ desc[3+f].type = IPA_DATA_DESC_SKB_PAGED;
+ desc[3+f].len = skb_frag_size(desc[3+f].frag);
+ }
+ /* don't free skb till frag mappings are released */
+ if (num_frags) {
+ desc[3+f-1].callback = desc[2].callback;
+ desc[3+f-1].user1 = desc[2].user1;
+ desc[3+f-1].user2 = desc[2].user2;
+ desc[2].callback = NULL;
+ }
+
+ if (ipa3_send(sys, num_frags + 3, desc, true)) {
+ IPAERR("fail to send skb %p num_frags %u SWP\n",
+ skb, num_frags);
+ goto fail_send;
+ }
+ IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_sw_pkts);
+ } else {
+ /* HW data path */
+ desc[0].opcode =
+ ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
+ desc[0].type = IPA_IMM_CMD_DESC;
+ desc[0].callback = ipa3_tag_destroy_imm;
+ desc[1].pyld = skb->data;
+ desc[1].len = skb_headlen(skb);
+ desc[1].type = IPA_DATA_DESC_SKB;
+ desc[1].callback = ipa3_tx_comp_usr_notify_release;
+ desc[1].user1 = skb;
+ desc[1].user2 = src_ep_idx;
+
+ if (meta && meta->dma_address_valid) {
+ desc[1].dma_address_valid = true;
+ desc[1].dma_address = meta->dma_address;
+ }
+ if (num_frags == 0) {
+ if (ipa3_send(sys, 2, desc, true)) {
+ IPAERR("fail to send skb %p HWP\n", skb);
+ goto fail_gen;
+ }
+ } else {
+ for (f = 0; f < num_frags; f++) {
+ desc[2+f].frag = &skb_shinfo(skb)->frags[f];
+ desc[2+f].type = IPA_DATA_DESC_SKB_PAGED;
+ desc[2+f].len = skb_frag_size(desc[2+f].frag);
+ }
+ /* don't free skb till frag mappings are released */
+ desc[2+f-1].callback = desc[1].callback;
+ desc[2+f-1].user1 = desc[1].user1;
+ desc[2+f-1].user2 = desc[1].user2;
+ desc[1].callback = NULL;
+
+ if (ipa3_send(sys, num_frags + 2, desc, true)) {
+ IPAERR("fail to send skb %p num_frags %u HWP\n",
+ skb, num_frags);
+ goto fail_gen;
+ }
+ }
+ IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_hw_pkts);
+ }
+
+ if (num_frags) {
+ kfree(desc);
+ IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_non_linear);
+ }
+ return 0;
+
+fail_send:
+ ipahal_destroy_imm_cmd(cmd_pyld);
+fail_gen:
+ if (num_frags)
+ kfree(desc);
+fail_mem:
+ return -EFAULT;
+}
+
+static void ipa3_wq_handle_rx(struct work_struct *work)
+{
+ struct ipa3_sys_context *sys;
+
+ sys = container_of(work, struct ipa3_sys_context, work);
+
+ if (sys->ep->napi_enabled) {
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI");
+ sys->ep->client_notify(sys->ep->priv,
+ IPA_CLIENT_START_POLL, 0);
+ } else
+ ipa3_handle_rx(sys);
+}
+
+static void ipa3_wq_repl_rx(struct work_struct *work)
+{
+ struct ipa3_sys_context *sys;
+ void *ptr;
+ struct ipa3_rx_pkt_wrapper *rx_pkt;
+ gfp_t flag = GFP_KERNEL;
+ u32 next;
+ u32 curr;
+
+ sys = container_of(work, struct ipa3_sys_context, repl_work);
+ curr = atomic_read(&sys->repl.tail_idx);
+
+begin:
+ while (1) {
+ next = (curr + 1) % sys->repl.capacity;
+ if (next == atomic_read(&sys->repl.head_idx))
+ goto fail_kmem_cache_alloc;
+
+ rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
+ flag);
+ if (!rx_pkt) {
+ pr_err_ratelimited("%s fail alloc rx wrapper sys=%p\n",
+ __func__, sys);
+ goto fail_kmem_cache_alloc;
+ }
+
+ INIT_LIST_HEAD(&rx_pkt->link);
+ INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+ rx_pkt->sys = sys;
+
+ rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
+ if (rx_pkt->data.skb == NULL) {
+ pr_err_ratelimited("%s fail alloc skb sys=%p\n",
+ __func__, sys);
+ goto fail_skb_alloc;
+ }
+ ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+ rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
+ sys->rx_buff_sz,
+ DMA_FROM_DEVICE);
+ if (rx_pkt->data.dma_addr == 0 ||
+ rx_pkt->data.dma_addr == ~0) {
+ pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n",
+ __func__, (void *)rx_pkt->data.dma_addr,
+ ptr, sys);
+ goto fail_dma_mapping;
+ }
+
+ sys->repl.cache[curr] = rx_pkt;
+ curr = next;
+ /* ensure write is done before setting tail index */
+ mb();
+ atomic_set(&sys->repl.tail_idx, next);
+ }
+
+ return;
+
+fail_dma_mapping:
+ sys->free_skb(rx_pkt->data.skb);
+fail_skb_alloc:
+ kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+fail_kmem_cache_alloc:
+ if (atomic_read(&sys->repl.tail_idx) ==
+ atomic_read(&sys->repl.head_idx)) {
+ if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
+ IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_repl_rx_empty);
+ else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
+ IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_repl_rx_empty);
+ else
+ WARN_ON(1);
+ pr_err_ratelimited("%s sys=%p repl ring empty\n",
+ __func__, sys);
+ goto begin;
+ }
+}
+
+static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys)
+{
+ struct ipa3_rx_pkt_wrapper *rx_pkt = NULL;
+ struct ipa3_rx_pkt_wrapper *tmp;
+ int ret;
+ struct gsi_xfer_elem gsi_xfer_elem_one;
+ u32 rx_len_cached = 0;
+
+ IPADBG_LOW("\n");
+
+ spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+ rx_len_cached = sys->len;
+
+ if (rx_len_cached < sys->rx_pool_sz) {
+ list_for_each_entry_safe(rx_pkt, tmp,
+ &ipa3_ctx->wc_memb.wlan_comm_desc_list, link) {
+ list_del(&rx_pkt->link);
+
+ if (ipa3_ctx->wc_memb.wlan_comm_free_cnt > 0)
+ ipa3_ctx->wc_memb.wlan_comm_free_cnt--;
+
+ INIT_LIST_HEAD(&rx_pkt->link);
+ rx_pkt->len = 0;
+ rx_pkt->sys = sys;
+
+ list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+ if (ipa3_ctx->transport_prototype ==
+ IPA_TRANSPORT_TYPE_GSI) {
+ memset(&gsi_xfer_elem_one, 0,
+ sizeof(gsi_xfer_elem_one));
+ gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
+ gsi_xfer_elem_one.len = IPA_WLAN_RX_BUFF_SZ;
+ gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
+ gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
+ gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
+ gsi_xfer_elem_one.xfer_user_data = rx_pkt;
+
+ ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
+ &gsi_xfer_elem_one, true);
+ } else {
+ ret = sps_transfer_one(sys->ep->ep_hdl,
+ rx_pkt->data.dma_addr,
+ IPA_WLAN_RX_BUFF_SZ, rx_pkt, 0);
+ }
+
+ if (ret) {
+ IPAERR("failed to provide buffer: %d\n", ret);
+ goto fail_provide_rx_buffer;
+ }
+
+ rx_len_cached = ++sys->len;
+
+ if (rx_len_cached >= sys->rx_pool_sz) {
+ spin_unlock_bh(
+ &ipa3_ctx->wc_memb.wlan_spinlock);
+ return;
+ }
+ }
+ }
+ spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+
+ if (rx_len_cached < sys->rx_pool_sz &&
+ ipa3_ctx->wc_memb.wlan_comm_total_cnt <
+ IPA_WLAN_COMM_RX_POOL_HIGH) {
+ ipa3_replenish_rx_cache(sys);
+ ipa3_ctx->wc_memb.wlan_comm_total_cnt +=
+ (sys->rx_pool_sz - rx_len_cached);
+ }
+
+ return;
+
+fail_provide_rx_buffer:
+ list_del(&rx_pkt->link);
+ spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+}
+
+static void ipa3_cleanup_wlan_rx_common_cache(void)
+{
+ struct ipa3_rx_pkt_wrapper *rx_pkt;
+ struct ipa3_rx_pkt_wrapper *tmp;
+
+ list_for_each_entry_safe(rx_pkt, tmp,
+ &ipa3_ctx->wc_memb.wlan_comm_desc_list, link) {
+ list_del(&rx_pkt->link);
+ dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+ IPA_WLAN_COMM_RX_POOL_LOW, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(rx_pkt->data.skb);
+ kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+ ipa3_ctx->wc_memb.wlan_comm_free_cnt--;
+ ipa3_ctx->wc_memb.wlan_comm_total_cnt--;
+ }
+ ipa3_ctx->wc_memb.total_tx_pkts_freed = 0;
+
+ if (ipa3_ctx->wc_memb.wlan_comm_free_cnt != 0)
+ IPAERR("wlan comm buff free cnt: %d\n",
+ ipa3_ctx->wc_memb.wlan_comm_free_cnt);
+
+ if (ipa3_ctx->wc_memb.wlan_comm_total_cnt != 0)
+ IPAERR("wlan comm buff total cnt: %d\n",
+ ipa3_ctx->wc_memb.wlan_comm_total_cnt);
+
+}
+
+static void ipa3_alloc_wlan_rx_common_cache(u32 size)
+{
+ void *ptr;
+ struct ipa3_rx_pkt_wrapper *rx_pkt;
+ int rx_len_cached = 0;
+ gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+
+ rx_len_cached = ipa3_ctx->wc_memb.wlan_comm_total_cnt;
+ while (rx_len_cached < size) {
+ rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
+ flag);
+ if (!rx_pkt) {
+ IPAERR("failed to alloc rx wrapper\n");
+ goto fail_kmem_cache_alloc;
+ }
+
+ INIT_LIST_HEAD(&rx_pkt->link);
+ INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+
+ rx_pkt->data.skb =
+ ipa3_get_skb_ipa_rx(IPA_WLAN_RX_BUFF_SZ,
+ flag);
+ if (rx_pkt->data.skb == NULL) {
+ IPAERR("failed to alloc skb\n");
+ goto fail_skb_alloc;
+ }
+ ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
+ rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
+ IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
+ if (rx_pkt->data.dma_addr == 0 ||
+ rx_pkt->data.dma_addr == ~0) {
+ IPAERR("dma_map_single failure %p for %p\n",
+ (void *)rx_pkt->data.dma_addr, ptr);
+ goto fail_dma_mapping;
+ }
+
+ list_add_tail(&rx_pkt->link,
+ &ipa3_ctx->wc_memb.wlan_comm_desc_list);
+ rx_len_cached = ++ipa3_ctx->wc_memb.wlan_comm_total_cnt;
+
+ ipa3_ctx->wc_memb.wlan_comm_free_cnt++;
+
+ }
+
+ return;
+
+fail_dma_mapping:
+ dev_kfree_skb_any(rx_pkt->data.skb);
+fail_skb_alloc:
+ kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+fail_kmem_cache_alloc:
+ return;
+}
+
+
+/**
+ * ipa3_replenish_rx_cache() - Replenish the Rx packets cache.
+ *
+ * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
+ * are IPA_RX_POOL_CEIL buffers in the cache.
+ * - Allocate a buffer in the cache
+ * - Initialized the packets link
+ * - Initialize the packets work struct
+ * - Allocate the packets socket buffer (skb)
+ * - Fill the packets skb with data
+ * - Make the packet DMAable
+ * - Add the packet to the system pipe linked list
+ * - Initiate a SPS transfer so that SPS driver will use this packet later.
+ */
+static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys)
+{
+ void *ptr;
+ struct ipa3_rx_pkt_wrapper *rx_pkt;
+ int ret;
+ int rx_len_cached = 0;
+ struct gsi_xfer_elem gsi_xfer_elem_one;
+ gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+
+ rx_len_cached = sys->len;
+
+ while (rx_len_cached < sys->rx_pool_sz) {
+ rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
+ flag);
+ if (!rx_pkt) {
+ IPAERR("failed to alloc rx wrapper\n");
+ goto fail_kmem_cache_alloc;
+ }
+
+ INIT_LIST_HEAD(&rx_pkt->link);
+ INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+ rx_pkt->sys = sys;
+
+ rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
+ if (rx_pkt->data.skb == NULL) {
+ IPAERR("failed to alloc skb\n");
+ goto fail_skb_alloc;
+ }
+ ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+ rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
+ sys->rx_buff_sz,
+ DMA_FROM_DEVICE);
+ if (rx_pkt->data.dma_addr == 0 ||
+ rx_pkt->data.dma_addr == ~0) {
+ IPAERR("dma_map_single failure %p for %p\n",
+ (void *)rx_pkt->data.dma_addr, ptr);
+ goto fail_dma_mapping;
+ }
+
+ list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+ rx_len_cached = ++sys->len;
+
+ if (ipa3_ctx->transport_prototype ==
+ IPA_TRANSPORT_TYPE_GSI) {
+ memset(&gsi_xfer_elem_one, 0,
+ sizeof(gsi_xfer_elem_one));
+ gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
+ gsi_xfer_elem_one.len = sys->rx_buff_sz;
+ gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
+ gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
+ gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
+ gsi_xfer_elem_one.xfer_user_data = rx_pkt;
+
+ ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl,
+ 1, &gsi_xfer_elem_one, true);
+ if (ret != GSI_STATUS_SUCCESS) {
+ IPAERR("failed to provide buffer: %d\n",
+ ret);
+ goto fail_provide_rx_buffer;
+ }
+ } else {
+ ret = sps_transfer_one(sys->ep->ep_hdl,
+ rx_pkt->data.dma_addr, sys->rx_buff_sz,
+ rx_pkt, 0);
+
+ if (ret) {
+ IPAERR("sps_transfer_one failed %d\n", ret);
+ goto fail_provide_rx_buffer;
+ }
+ }
+ }
+
+ return;
+
+fail_provide_rx_buffer:
+ list_del(&rx_pkt->link);
+ rx_len_cached = --sys->len;
+ dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+ sys->rx_buff_sz, DMA_FROM_DEVICE);
+fail_dma_mapping:
+ sys->free_skb(rx_pkt->data.skb);
+fail_skb_alloc:
+ kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+fail_kmem_cache_alloc:
+ if (rx_len_cached == 0)
+ queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+ msecs_to_jiffies(1));
+}
+
+static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
+{
+ void *ptr;
+ struct ipa3_rx_pkt_wrapper *rx_pkt;
+ int ret;
+ int rx_len_cached = 0;
+ struct gsi_xfer_elem gsi_xfer_elem_one;
+ gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+
+ rx_len_cached = sys->len;
+
+ while (rx_len_cached < sys->rx_pool_sz) {
+ if (list_empty(&sys->rcycl_list)) {
+ rx_pkt = kmem_cache_zalloc(
+ ipa3_ctx->rx_pkt_wrapper_cache, flag);
+ if (!rx_pkt) {
+ IPAERR("failed to alloc rx wrapper\n");
+ goto fail_kmem_cache_alloc;
+ }
+
+ INIT_LIST_HEAD(&rx_pkt->link);
+ INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+ rx_pkt->sys = sys;
+
+ rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
+ if (rx_pkt->data.skb == NULL) {
+ IPAERR("failed to alloc skb\n");
+ kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache,
+ rx_pkt);
+ goto fail_kmem_cache_alloc;
+ }
+ ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+ rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
+ ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
+ if (rx_pkt->data.dma_addr == 0 ||
+ rx_pkt->data.dma_addr == ~0) {
+ IPAERR("dma_map_single failure %p for %p\n",
+ (void *)rx_pkt->data.dma_addr, ptr);
+ goto fail_dma_mapping;
+ }
+ } else {
+ spin_lock_bh(&sys->spinlock);
+ rx_pkt = list_first_entry(&sys->rcycl_list,
+ struct ipa3_rx_pkt_wrapper, link);
+ list_del(&rx_pkt->link);
+ spin_unlock_bh(&sys->spinlock);
+ INIT_LIST_HEAD(&rx_pkt->link);
+ ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+ rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
+ ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
+ if (rx_pkt->data.dma_addr == 0 ||
+ rx_pkt->data.dma_addr == ~0) {
+ IPAERR("dma_map_single failure %p for %p\n",
+ (void *)rx_pkt->data.dma_addr, ptr);
+ goto fail_dma_mapping;
+ }
+ }
+
+ list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+ rx_len_cached = ++sys->len;
+ if (ipa3_ctx->transport_prototype ==
+ IPA_TRANSPORT_TYPE_GSI) {
+ memset(&gsi_xfer_elem_one, 0,
+ sizeof(gsi_xfer_elem_one));
+ gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
+ gsi_xfer_elem_one.len = sys->rx_buff_sz;
+ gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
+ gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
+ gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
+ gsi_xfer_elem_one.xfer_user_data = rx_pkt;
+
+ ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl,
+ 1, &gsi_xfer_elem_one, true);
+ if (ret != GSI_STATUS_SUCCESS) {
+ IPAERR("failed to provide buffer: %d\n",
+ ret);
+ goto fail_provide_rx_buffer;
+ }
+ } else {
+ ret = sps_transfer_one(sys->ep->ep_hdl,
+ rx_pkt->data.dma_addr, sys->rx_buff_sz,
+ rx_pkt, 0);
+
+ if (ret) {
+ IPAERR("sps_transfer_one failed %d\n", ret);
+ goto fail_provide_rx_buffer;
+ }
+ }
+ }
+
+ return;
+fail_provide_rx_buffer:
+ rx_len_cached = --sys->len;
+ list_del(&rx_pkt->link);
+ INIT_LIST_HEAD(&rx_pkt->link);
+ dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+ sys->rx_buff_sz, DMA_FROM_DEVICE);
+fail_dma_mapping:
+ spin_lock_bh(&sys->spinlock);
+ list_add_tail(&rx_pkt->link, &sys->rcycl_list);
+ INIT_LIST_HEAD(&rx_pkt->link);
+ spin_unlock_bh(&sys->spinlock);
+fail_kmem_cache_alloc:
+ if (rx_len_cached == 0)
+ queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+ msecs_to_jiffies(1));
+}
+
+static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys)
+{
+ struct ipa3_rx_pkt_wrapper *rx_pkt;
+ int ret;
+ int rx_len_cached = 0;
+ struct gsi_xfer_elem gsi_xfer_elem_one;
+ u32 curr;
+
+ rx_len_cached = sys->len;
+ curr = atomic_read(&sys->repl.head_idx);
+
+ while (rx_len_cached < sys->rx_pool_sz) {
+ if (curr == atomic_read(&sys->repl.tail_idx))
+ break;
+
+ rx_pkt = sys->repl.cache[curr];
+ list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+
+ if (ipa3_ctx->transport_prototype ==
+ IPA_TRANSPORT_TYPE_GSI) {
+ memset(&gsi_xfer_elem_one, 0,
+ sizeof(gsi_xfer_elem_one));
+ gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
+ gsi_xfer_elem_one.len = sys->rx_buff_sz;
+ gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
+ gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
+ gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
+ gsi_xfer_elem_one.xfer_user_data = rx_pkt;
+
+ ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
+ &gsi_xfer_elem_one, true);
+ if (ret != GSI_STATUS_SUCCESS) {
+ IPAERR("failed to provide buffer: %d\n",
+ ret);
+ break;
+ }
+ } else {
+ ret = sps_transfer_one(sys->ep->ep_hdl,
+ rx_pkt->data.dma_addr, sys->rx_buff_sz,
+ rx_pkt, 0);
+
+ if (ret) {
+ IPAERR("sps_transfer_one failed %d\n", ret);
+ list_del(&rx_pkt->link);
+ break;
+ }
+ }
+ rx_len_cached = ++sys->len;
+ curr = (curr + 1) % sys->repl.capacity;
+ /* ensure write is done before setting head index */
+ mb();
+ atomic_set(&sys->repl.head_idx, curr);
+ }
+
+ queue_work(sys->repl_wq, &sys->repl_work);
+
+ if (rx_len_cached <= IPA_DEFAULT_SYS_YELLOW_WM) {
+ if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
+ IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_rx_empty);
+ else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
+ IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty);
+ else
+ WARN_ON(1);
+ queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+ msecs_to_jiffies(1));
+ }
+}
+
+static void ipa3_replenish_rx_work_func(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct ipa3_sys_context *sys;
+
+ dwork = container_of(work, struct delayed_work, work);
+ sys = container_of(dwork, struct ipa3_sys_context, replenish_rx_work);
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ sys->repl_hdlr(sys);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+/**
+ * ipa3_cleanup_rx() - release RX queue resources
+ *
+ */
+static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
+{
+ struct ipa3_rx_pkt_wrapper *rx_pkt;
+ struct ipa3_rx_pkt_wrapper *r;
+ u32 head;
+ u32 tail;
+
+ list_for_each_entry_safe(rx_pkt, r,
+ &sys->head_desc_list, link) {
+ list_del(&rx_pkt->link);
+ dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+ sys->rx_buff_sz, DMA_FROM_DEVICE);
+ sys->free_skb(rx_pkt->data.skb);
+ kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+ }
+
+ list_for_each_entry_safe(rx_pkt, r,
+ &sys->rcycl_list, link) {
+ list_del(&rx_pkt->link);
+ dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+ sys->rx_buff_sz, DMA_FROM_DEVICE);
+ sys->free_skb(rx_pkt->data.skb);
+ kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+ }
+
+ if (sys->repl.cache) {
+ head = atomic_read(&sys->repl.head_idx);
+ tail = atomic_read(&sys->repl.tail_idx);
+ while (head != tail) {
+ rx_pkt = sys->repl.cache[head];
+ dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+ sys->rx_buff_sz, DMA_FROM_DEVICE);
+ sys->free_skb(rx_pkt->data.skb);
+ kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+ head = (head + 1) % sys->repl.capacity;
+ }
+ kfree(sys->repl.cache);
+ }
+}
+
+static struct sk_buff *ipa3_skb_copy_for_client(struct sk_buff *skb, int len)
+{
+ struct sk_buff *skb2 = NULL;
+
+ skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM, GFP_KERNEL);
+ if (likely(skb2)) {
+ /* Set the data pointer */
+ skb_reserve(skb2, IPA_RX_BUFF_CLIENT_HEADROOM);
+ memcpy(skb2->data, skb->data, len);
+ skb2->len = len;
+ skb_set_tail_pointer(skb2, len);
+ }
+
+ return skb2;
+}
+
+static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
+ struct ipa3_sys_context *sys)
+{
+ int rc = 0;
+ struct ipahal_pkt_status status;
+ u32 pkt_status_sz;
+ struct sk_buff *skb2;
+ int pad_len_byte;
+ int len;
+ unsigned char *buf;
+ int src_pipe;
+ unsigned int used = *(unsigned int *)skb->cb;
+ unsigned int used_align = ALIGN(used, 32);
+ unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
+ struct ipa3_tx_pkt_wrapper *tx_pkt = NULL;
+ unsigned long ptr;
+
+ IPA_DUMP_BUFF(skb->data, 0, skb->len);
+
+ if (skb->len == 0) {
+ IPAERR("ZLT\n");
+ return rc;
+ }
+
+ if (sys->len_partial) {
+ IPADBG_LOW("len_partial %d\n", sys->len_partial);
+ buf = skb_push(skb, sys->len_partial);
+ memcpy(buf, sys->prev_skb->data, sys->len_partial);
+ sys->len_partial = 0;
+ sys->free_skb(sys->prev_skb);
+ sys->prev_skb = NULL;
+ goto begin;
+ }
+
+ /* this pipe has TX comp (status only) + mux-ed LAN RX data
+ * (status+data)
+ */
+ if (sys->len_rem) {
+ IPADBG_LOW("rem %d skb %d pad %d\n", sys->len_rem, skb->len,
+ sys->len_pad);
+ if (sys->len_rem <= skb->len) {
+ if (sys->prev_skb) {
+ skb2 = skb_copy_expand(sys->prev_skb, 0,
+ sys->len_rem, GFP_KERNEL);
+ if (likely(skb2)) {
+ memcpy(skb_put(skb2, sys->len_rem),
+ skb->data, sys->len_rem);
+ skb_trim(skb2,
+ skb2->len - sys->len_pad);
+ skb2->truesize = skb2->len +
+ sizeof(struct sk_buff);
+ if (sys->drop_packet)
+ dev_kfree_skb_any(skb2);
+ else
+ sys->ep->client_notify(
+ sys->ep->priv,
+ IPA_RECEIVE,
+ (unsigned long)(skb2));
+ } else {
+ IPAERR("copy expand failed\n");
+ }
+ dev_kfree_skb_any(sys->prev_skb);
+ }
+ skb_pull(skb, sys->len_rem);
+ sys->prev_skb = NULL;
+ sys->len_rem = 0;
+ sys->len_pad = 0;
+ } else {
+ if (sys->prev_skb) {
+ skb2 = skb_copy_expand(sys->prev_skb, 0,
+ skb->len, GFP_KERNEL);
+ if (likely(skb2)) {
+ memcpy(skb_put(skb2, skb->len),
+ skb->data, skb->len);
+ } else {
+ IPAERR("copy expand failed\n");
+ }
+ dev_kfree_skb_any(sys->prev_skb);
+ sys->prev_skb = skb2;
+ }
+ sys->len_rem -= skb->len;
+ return rc;
+ }
+ }
+
+begin:
+ pkt_status_sz = ipahal_pkt_status_get_size();
+ while (skb->len) {
+ sys->drop_packet = false;
+ IPADBG_LOW("LEN_REM %d\n", skb->len);
+
+ if (skb->len < pkt_status_sz) {
+ WARN_ON(sys->prev_skb != NULL);
+ IPADBG_LOW("status straddles buffer\n");
+ sys->prev_skb = skb_copy(skb, GFP_KERNEL);
+ sys->len_partial = skb->len;
+ return rc;
+ }
+
+ ipahal_pkt_status_parse(skb->data, &status);
+ IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n",
+ status.status_opcode, status.endp_src_idx,
+ status.endp_dest_idx, status.pkt_len);
+ if (sys->status_stat) {
+ sys->status_stat->status[sys->status_stat->curr] =
+ status;
+ sys->status_stat->curr++;
+ if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
+ sys->status_stat->curr = 0;
+ }
+
+ if ((status.status_opcode !=
+ IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET) &&
+ (status.status_opcode !=
+ IPAHAL_PKT_STATUS_OPCODE_PACKET) &&
+ (status.status_opcode !=
+ IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET) &&
+ (status.status_opcode !=
+ IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS)) {
+ IPAERR("unsupported opcode(%d)\n",
+ status.status_opcode);
+ skb_pull(skb, pkt_status_sz);
+ continue;
+ }
+ IPA_STATS_EXCP_CNT(status.exception,
+ ipa3_ctx->stats.rx_excp_pkts);
+ if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes ||
+ status.endp_src_idx >= ipa3_ctx->ipa_num_pipes) {
+ IPAERR("status fields invalid\n");
+ IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n",
+ status.status_opcode, status.endp_src_idx,
+ status.endp_dest_idx, status.pkt_len);
+ WARN_ON(1);
+ BUG();
+ }
+ if (IPAHAL_PKT_STATUS_MASK_FLAG_VAL(
+ IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT, &status)) {
+ struct ipa3_tag_completion *comp;
+
+ IPADBG_LOW("TAG packet arrived\n");
+ if (status.tag_info == IPA_COOKIE) {
+ skb_pull(skb, pkt_status_sz);
+ if (skb->len < sizeof(comp)) {
+ IPAERR("TAG arrived without packet\n");
+ return rc;
+ }
+ memcpy(&comp, skb->data, sizeof(comp));
+ skb_pull(skb, sizeof(comp) +
+ IPA_SIZE_DL_CSUM_META_TRAILER);
+ complete(&comp->comp);
+ if (atomic_dec_return(&comp->cnt) == 0)
+ kfree(comp);
+ continue;
+ } else {
+ ptr = tag_to_pointer_wa(status.tag_info);
+ tx_pkt = (struct ipa3_tx_pkt_wrapper *)ptr;
+ IPADBG_LOW("tx_pkt recv = %p\n", tx_pkt);
+ }
+ }
+ if (status.pkt_len == 0) {
+ IPADBG_LOW("Skip aggr close status\n");
+ skb_pull(skb, pkt_status_sz);
+ IPA_STATS_INC_CNT(ipa3_ctx->stats.aggr_close);
+ IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts
+ [IPAHAL_PKT_STATUS_EXCEPTION_NONE]);
+ continue;
+ }
+
+ if (status.endp_dest_idx == (sys->ep - ipa3_ctx->ep)) {
+ /* RX data */
+ src_pipe = status.endp_src_idx;
+
+ /*
+ * A packet which is received back to the AP after
+ * there was no route match.
+ */
+ if (status.exception ==
+ IPAHAL_PKT_STATUS_EXCEPTION_NONE &&
+ ipahal_is_rule_miss_id(status.rt_rule_id))
+ sys->drop_packet = true;
+
+ if (skb->len == pkt_status_sz &&
+ status.exception ==
+ IPAHAL_PKT_STATUS_EXCEPTION_NONE) {
+ WARN_ON(sys->prev_skb != NULL);
+ IPADBG_LOW("Ins header in next buffer\n");
+ sys->prev_skb = skb_copy(skb, GFP_KERNEL);
+ sys->len_partial = skb->len;
+ return rc;
+ }
+
+ pad_len_byte = ((status.pkt_len + 3) & ~3) -
+ status.pkt_len;
+
+ len = status.pkt_len + pad_len_byte +
+ IPA_SIZE_DL_CSUM_META_TRAILER;
+ IPADBG_LOW("pad %d pkt_len %d len %d\n", pad_len_byte,
+ status.pkt_len, len);
+
+ if (status.exception ==
+ IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR) {
+ IPADBG_LOW(
+ "Dropping packet on DeAggr Exception\n");
+ sys->drop_packet = true;
+ }
+
+ skb2 = ipa3_skb_copy_for_client(skb,
+ min(status.pkt_len + pkt_status_sz, skb->len));
+ if (likely(skb2)) {
+ if (skb->len < len + pkt_status_sz) {
+ IPADBG_LOW("SPL skb len %d len %d\n",
+ skb->len, len);
+ sys->prev_skb = skb2;
+ sys->len_rem = len - skb->len +
+ pkt_status_sz;
+ sys->len_pad = pad_len_byte;
+ skb_pull(skb, skb->len);
+ } else {
+ skb_trim(skb2, status.pkt_len +
+ pkt_status_sz);
+ IPADBG_LOW("rx avail for %d\n",
+ status.endp_dest_idx);
+ if (sys->drop_packet) {
+ dev_kfree_skb_any(skb2);
+ } else if (status.pkt_len >
+ IPA_GENERIC_AGGR_BYTE_LIMIT *
+ 1024) {
+ IPAERR("packet size invalid\n");
+ IPAERR("STATUS opcode=%d\n",
+ status.status_opcode);
+ IPAERR("src=%d dst=%d len=%d\n",
+ status.endp_src_idx,
+ status.endp_dest_idx,
+ status.pkt_len);
+ BUG();
+ } else {
+ skb2->truesize = skb2->len +
+ sizeof(struct sk_buff) +
+ (ALIGN(len +
+ pkt_status_sz, 32) *
+ unused / used_align);
+ sys->ep->client_notify(
+ sys->ep->priv,
+ IPA_RECEIVE,
+ (unsigned long)(skb2));
+ }
+ skb_pull(skb, len + pkt_status_sz);
+ }
+ } else {
+ IPAERR("fail to alloc skb\n");
+ if (skb->len < len) {
+ sys->prev_skb = NULL;
+ sys->len_rem = len - skb->len +
+ pkt_status_sz;
+ sys->len_pad = pad_len_byte;
+ skb_pull(skb, skb->len);
+ } else {
+ skb_pull(skb, len + pkt_status_sz);
+ }
+ }
+ /* TX comp */
+ ipa3_wq_write_done_status(src_pipe, tx_pkt);
+ IPADBG_LOW("tx comp imp for %d\n", src_pipe);
+ } else {
+ /* TX comp */
+ ipa3_wq_write_done_status(status.endp_src_idx, tx_pkt);
+ IPADBG_LOW("tx comp exp for %d\n",
+ status.endp_src_idx);
+ skb_pull(skb, pkt_status_sz);
+ IPA_STATS_INC_CNT(ipa3_ctx->stats.stat_compl);
+ IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts
+ [IPAHAL_PKT_STATUS_EXCEPTION_NONE]);
+ }
+ };
+
+ return rc;
+}
+
+static struct sk_buff *ipa3_join_prev_skb(struct sk_buff *prev_skb,
+ struct sk_buff *skb, unsigned int len)
+{
+ struct sk_buff *skb2;
+
+ skb2 = skb_copy_expand(prev_skb, 0,
+ len, GFP_KERNEL);
+ if (likely(skb2)) {
+ memcpy(skb_put(skb2, len),
+ skb->data, len);
+ } else {
+ IPAERR("copy expand failed\n");
+ skb2 = NULL;
+ }
+ dev_kfree_skb_any(prev_skb);
+
+ return skb2;
+}
+
+static void ipa3_wan_rx_handle_splt_pyld(struct sk_buff *skb,
+ struct ipa3_sys_context *sys)
+{
+ struct sk_buff *skb2;
+
+ IPADBG_LOW("rem %d skb %d\n", sys->len_rem, skb->len);
+ if (sys->len_rem <= skb->len) {
+ if (sys->prev_skb) {
+ skb2 = ipa3_join_prev_skb(sys->prev_skb, skb,
+ sys->len_rem);
+ if (likely(skb2)) {
+ IPADBG_LOW(
+ "removing Status element from skb and sending to WAN client");
+ skb_pull(skb2, ipahal_pkt_status_get_size());
+ skb2->truesize = skb2->len +
+ sizeof(struct sk_buff);
+ sys->ep->client_notify(sys->ep->priv,
+ IPA_RECEIVE,
+ (unsigned long)(skb2));
+ }
+ }
+ skb_pull(skb, sys->len_rem);
+ sys->prev_skb = NULL;
+ sys->len_rem = 0;
+ } else {
+ if (sys->prev_skb) {
+ skb2 = ipa3_join_prev_skb(sys->prev_skb, skb,
+ skb->len);
+ sys->prev_skb = skb2;
+ }
+ sys->len_rem -= skb->len;
+ skb_pull(skb, skb->len);
+ }
+}
+
+static int ipa3_wan_rx_pyld_hdlr(struct sk_buff *skb,
+ struct ipa3_sys_context *sys)
+{
+ int rc = 0;
+ struct ipahal_pkt_status status;
+ unsigned char *skb_data;
+ u32 pkt_status_sz;
+ struct sk_buff *skb2;
+ u16 pkt_len_with_pad;
+ u32 qmap_hdr;
+ int checksum_trailer_exists;
+ int frame_len;
+ int ep_idx;
+ unsigned int used = *(unsigned int *)skb->cb;
+ unsigned int used_align = ALIGN(used, 32);
+ unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
+
+ IPA_DUMP_BUFF(skb->data, 0, skb->len);
+ if (skb->len == 0) {
+ IPAERR("ZLT\n");
+ goto bail;
+ }
+
+ if (ipa3_ctx->ipa_client_apps_wan_cons_agg_gro) {
+ sys->ep->client_notify(sys->ep->priv,
+ IPA_RECEIVE, (unsigned long)(skb));
+ return rc;
+ }
+ if (sys->repl_hdlr == ipa3_replenish_rx_cache_recycle) {
+ IPAERR("Recycle should enable only with GRO Aggr\n");
+ ipa_assert();
+ }
+
+ /*
+ * payload splits across 2 buff or more,
+ * take the start of the payload from prev_skb
+ */
+ if (sys->len_rem)
+ ipa3_wan_rx_handle_splt_pyld(skb, sys);
+
+ pkt_status_sz = ipahal_pkt_status_get_size();
+ while (skb->len) {
+ IPADBG_LOW("LEN_REM %d\n", skb->len);
+ if (skb->len < pkt_status_sz) {
+ IPAERR("status straddles buffer\n");
+ WARN_ON(1);
+ goto bail;
+ }
+ ipahal_pkt_status_parse(skb->data, &status);
+ skb_data = skb->data;
+ IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n",
+ status.status_opcode, status.endp_src_idx,
+ status.endp_dest_idx, status.pkt_len);
+
+ if (sys->status_stat) {
+ sys->status_stat->status[sys->status_stat->curr] =
+ status;
+ sys->status_stat->curr++;
+ if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
+ sys->status_stat->curr = 0;
+ }
+
+ if ((status.status_opcode !=
+ IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET) &&
+ (status.status_opcode !=
+ IPAHAL_PKT_STATUS_OPCODE_PACKET) &&
+ (status.status_opcode !=
+ IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS)) {
+ IPAERR("unsupported opcode(%d)\n",
+ status.status_opcode);
+ skb_pull(skb, pkt_status_sz);
+ continue;
+ }
+
+ IPA_STATS_INC_CNT(ipa3_ctx->stats.rx_pkts);
+ if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes ||
+ status.endp_src_idx >= ipa3_ctx->ipa_num_pipes ||
+ status.pkt_len > IPA_GENERIC_AGGR_BYTE_LIMIT * 1024) {
+ IPAERR("status fields invalid\n");
+ WARN_ON(1);
+ goto bail;
+ }
+ if (status.pkt_len == 0) {
+ IPADBG_LOW("Skip aggr close status\n");
+ skb_pull(skb, pkt_status_sz);
+ IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_pkts);
+ IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_aggr_close);
+ continue;
+ }
+ ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+ if (status.endp_dest_idx != ep_idx) {
+ IPAERR("expected endp_dest_idx %d received %d\n",
+ ep_idx, status.endp_dest_idx);
+ WARN_ON(1);
+ goto bail;
+ }
+ /* RX data */
+ if (skb->len == pkt_status_sz) {
+ IPAERR("Ins header in next buffer\n");
+ WARN_ON(1);
+ goto bail;
+ }
+ qmap_hdr = *(u32 *)(skb_data + pkt_status_sz);
+ /*
+ * Take the pkt_len_with_pad from the last 2 bytes of the QMAP
+ * header
+ */
+
+ /*QMAP is BE: convert the pkt_len field from BE to LE*/
+ pkt_len_with_pad = ntohs((qmap_hdr>>16) & 0xffff);
+ IPADBG_LOW("pkt_len with pad %d\n", pkt_len_with_pad);
+ /*get the CHECKSUM_PROCESS bit*/
+ checksum_trailer_exists = IPAHAL_PKT_STATUS_MASK_FLAG_VAL(
+ IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT, &status);
+ IPADBG_LOW("checksum_trailer_exists %d\n",
+ checksum_trailer_exists);
+
+ frame_len = pkt_status_sz + IPA_QMAP_HEADER_LENGTH +
+ pkt_len_with_pad;
+ if (checksum_trailer_exists)
+ frame_len += IPA_DL_CHECKSUM_LENGTH;
+ IPADBG_LOW("frame_len %d\n", frame_len);
+
+ skb2 = skb_clone(skb, GFP_KERNEL);
+ if (likely(skb2)) {
+ /*
+ * the len of actual data is smaller than expected
+ * payload split across 2 buff
+ */
+ if (skb->len < frame_len) {
+ IPADBG_LOW("SPL skb len %d len %d\n",
+ skb->len, frame_len);
+ sys->prev_skb = skb2;
+ sys->len_rem = frame_len - skb->len;
+ skb_pull(skb, skb->len);
+ } else {
+ skb_trim(skb2, frame_len);
+ IPADBG_LOW("rx avail for %d\n",
+ status.endp_dest_idx);
+ IPADBG_LOW(
+ "removing Status element from skb and sending to WAN client");
+ skb_pull(skb2, pkt_status_sz);
+ skb2->truesize = skb2->len +
+ sizeof(struct sk_buff) +
+ (ALIGN(frame_len, 32) *
+ unused / used_align);
+ sys->ep->client_notify(sys->ep->priv,
+ IPA_RECEIVE, (unsigned long)(skb2));
+ skb_pull(skb, frame_len);
+ }
+ } else {
+ IPAERR("fail to clone\n");
+ if (skb->len < frame_len) {
+ sys->prev_skb = NULL;
+ sys->len_rem = frame_len - skb->len;
+ skb_pull(skb, skb->len);
+ } else {
+ skb_pull(skb, frame_len);
+ }
+ }
+ };
+bail:
+ sys->free_skb(skb);
+ return rc;
+}
+
+static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags)
+{
+ return __dev_alloc_skb(len, flags);
+}
+
+static void ipa3_free_skb_rx(struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+}
+
+void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
+{
+ struct sk_buff *rx_skb = (struct sk_buff *)data;
+ struct ipahal_pkt_status status;
+ struct ipa3_ep_context *ep;
+ unsigned int src_pipe;
+ u32 metadata;
+
+ ipahal_pkt_status_parse(rx_skb->data, &status);
+ src_pipe = status.endp_src_idx;
+ metadata = status.metadata;
+ ep = &ipa3_ctx->ep[src_pipe];
+ if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes ||
+ !ep->valid ||
+ !ep->client_notify)) {
+ IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
+ src_pipe, ep->valid, ep->client_notify);
+ dev_kfree_skb_any(rx_skb);
+ return;
+ }
+ if (status.exception == IPAHAL_PKT_STATUS_EXCEPTION_NONE)
+ skb_pull(rx_skb, ipahal_pkt_status_get_size() +
+ IPA_LAN_RX_HEADER_LENGTH);
+ else
+ skb_pull(rx_skb, ipahal_pkt_status_get_size());
+
+ /* Metadata Info
+ * ------------------------------------------
+ * | 3 | 2 | 1 | 0 |
+ * | fw_desc | vdev_id | qmap mux id | Resv |
+ * ------------------------------------------
+ */
+ *(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF);
+ IPADBG_LOW("meta_data: 0x%x cb: 0x%x\n",
+ metadata, *(u32 *)rx_skb->cb);
+
+ ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
+}
+
+static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
+{
+ rx_pkt->data.dma_addr = 0;
+ ipa3_skb_recycle(rx_pkt->data.skb);
+ INIT_LIST_HEAD(&rx_pkt->link);
+ spin_lock_bh(&rx_pkt->sys->spinlock);
+ list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list);
+ spin_unlock_bh(&rx_pkt->sys->spinlock);
+}
+
+void ipa3_recycle_wan_skb(struct sk_buff *skb)
+{
+ struct ipa3_rx_pkt_wrapper *rx_pkt;
+ int ep_idx = ipa3_get_ep_mapping(
+ IPA_CLIENT_APPS_WAN_CONS);
+ gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+
+ if (unlikely(ep_idx == -1)) {
+ IPAERR("dest EP does not exist\n");
+ ipa_assert();
+ }
+
+ rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
+ flag);
+ if (!rx_pkt)
+ ipa_assert();
+
+ INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+ rx_pkt->sys = ipa3_ctx->ep[ep_idx].sys;
+
+ rx_pkt->data.skb = skb;
+ ipa3_recycle_rx_wrapper(rx_pkt);
+}
+
+static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size)
+{
+ struct ipa3_rx_pkt_wrapper *rx_pkt_expected;
+ struct sk_buff *rx_skb;
+
+ if (unlikely(list_empty(&sys->head_desc_list))) {
+ WARN_ON(1);
+ return;
+ }
+ rx_pkt_expected = list_first_entry(&sys->head_desc_list,
+ struct ipa3_rx_pkt_wrapper,
+ link);
+ list_del(&rx_pkt_expected->link);
+ sys->len--;
+ if (size)
+ rx_pkt_expected->len = size;
+ rx_skb = rx_pkt_expected->data.skb;
+ dma_unmap_single(ipa3_ctx->pdev, rx_pkt_expected->data.dma_addr,
+ sys->rx_buff_sz, DMA_FROM_DEVICE);
+ skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
+ rx_skb->len = rx_pkt_expected->len;
+ *(unsigned int *)rx_skb->cb = rx_skb->len;
+ rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
+ sys->pyld_hdlr(rx_skb, sys);
+ sys->free_rx_wrapper(rx_pkt_expected);
+ sys->repl_hdlr(sys);
+}
+
+static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys, u32 size)
+{
+ struct ipa3_rx_pkt_wrapper *rx_pkt_expected;
+ struct sk_buff *rx_skb;
+
+ if (unlikely(list_empty(&sys->head_desc_list))) {
+ WARN_ON(1);
+ return;
+ }
+ rx_pkt_expected = list_first_entry(&sys->head_desc_list,
+ struct ipa3_rx_pkt_wrapper,
+ link);
+ list_del(&rx_pkt_expected->link);
+ sys->len--;
+
+ if (size)
+ rx_pkt_expected->len = size;
+
+ rx_skb = rx_pkt_expected->data.skb;
+ skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
+ rx_skb->len = rx_pkt_expected->len;
+ rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
+ sys->ep->wstats.tx_pkts_rcvd++;
+ if (sys->len <= IPA_WLAN_RX_POOL_SZ_LOW_WM) {
+ ipa3_free_skb(&rx_pkt_expected->data);
+ sys->ep->wstats.tx_pkts_dropped++;
+ } else {
+ sys->ep->wstats.tx_pkts_sent++;
+ sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
+ (unsigned long)(&rx_pkt_expected->data));
+ }
+ ipa3_replenish_wlan_rx_cache(sys);
+}
+
+static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys,
+ struct ipa_mem_buffer *mem_info)
+{
+ IPADBG_LOW("ENTER.\n");
+ if (unlikely(list_empty(&sys->head_desc_list))) {
+ IPAERR("descriptor list is empty!\n");
+ WARN_ON(1);
+ return;
+ }
+ sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
+ (unsigned long)(mem_info));
+ IPADBG_LOW("EXIT\n");
+}
+
+static void ipa3_wq_rx_avail(struct work_struct *work)
+{
+ struct ipa3_rx_pkt_wrapper *rx_pkt;
+ struct ipa3_sys_context *sys;
+
+ rx_pkt = container_of(work, struct ipa3_rx_pkt_wrapper, work);
+ if (unlikely(rx_pkt == NULL))
+ WARN_ON(1);
+ sys = rx_pkt->sys;
+ ipa3_wq_rx_common(sys, 0);
+}
+
+/**
+ * ipa3_sps_irq_rx_no_aggr_notify() - Callback function which will be called by
+ * the SPS driver after a Rx operation is complete.
+ * Called in an interrupt context.
+ * @notify: SPS driver supplied notification struct
+ *
+ * This function defer the work for this event to a workqueue.
+ */
+void ipa3_sps_irq_rx_no_aggr_notify(struct sps_event_notify *notify)
+{
+ struct ipa3_rx_pkt_wrapper *rx_pkt;
+
+ switch (notify->event_id) {
+ case SPS_EVENT_EOT:
+ rx_pkt = notify->data.transfer.user;
+ if (IPA_CLIENT_IS_APPS_CONS(rx_pkt->sys->ep->client))
+ atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+ rx_pkt->len = notify->data.transfer.iovec.size;
+ IPADBG_LOW("event %d notified sys=%p len=%u\n",
+ notify->event_id,
+ notify->user, rx_pkt->len);
+ queue_work(rx_pkt->sys->wq, &rx_pkt->work);
+ break;
+ default:
+ IPAERR("received unexpected event id %d sys=%p\n",
+ notify->event_id, notify->user);
+ }
+}
+
+static int ipa3_odu_rx_pyld_hdlr(struct sk_buff *rx_skb,
+ struct ipa3_sys_context *sys)
+{
+ if (sys->ep->client_notify) {
+ sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
+ (unsigned long)(rx_skb));
+ } else {
+ dev_kfree_skb_any(rx_skb);
+ WARN_ON(1);
+ }
+
+ return 0;
+}
+
+static void ipa3_free_rx_wrapper(struct ipa3_rx_pkt_wrapper *rk_pkt)
+{
+ kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rk_pkt);
+}
+
+static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
+ struct ipa3_sys_context *sys)
+{
+ if (in->client == IPA_CLIENT_APPS_CMD_PROD) {
+ sys->policy = IPA_POLICY_INTR_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT);
+ sys->sps_callback = ipa3_sps_irq_tx_no_aggr_notify;
+ return 0;
+ }
+
+ if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client)) {
+ sys->policy = IPA_POLICY_NOINTR_MODE;
+ sys->sps_option = SPS_O_AUTO_ENABLE;
+ sys->sps_callback = NULL;
+ return 0;
+ }
+
+ if (IPA_CLIENT_IS_PROD(in->client)) {
+ if (sys->ep->skip_ep_cfg) {
+ sys->policy = IPA_POLICY_INTR_POLL_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE|
+ SPS_O_EOT | SPS_O_ACK_TRANSFERS);
+ sys->sps_callback = ipa3_sps_irq_tx_notify;
+ INIT_WORK(&sys->work, ipa3_wq_handle_tx);
+ INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+ ipa3_switch_to_intr_tx_work_func);
+ atomic_set(&sys->curr_polling_state, 0);
+ } else {
+ sys->policy = IPA_POLICY_NOINTR_MODE;
+ sys->sps_option = SPS_O_AUTO_ENABLE;
+ sys->sps_callback = NULL;
+ sys->ep->status.status_en = true;
+ sys->ep->status.status_ep = ipa3_get_ep_mapping(
+ IPA_CLIENT_APPS_LAN_CONS);
+ }
+ } else {
+ if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
+ in->client == IPA_CLIENT_APPS_WAN_CONS) {
+ sys->ep->status.status_en = true;
+ sys->policy = IPA_POLICY_INTR_POLL_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+ | SPS_O_ACK_TRANSFERS);
+ sys->sps_callback = ipa3_sps_irq_rx_notify;
+ INIT_WORK(&sys->work, ipa3_wq_handle_rx);
+ INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+ ipa3_switch_to_intr_rx_work_func);
+ INIT_DELAYED_WORK(&sys->replenish_rx_work,
+ ipa3_replenish_rx_work_func);
+ INIT_WORK(&sys->repl_work, ipa3_wq_repl_rx);
+ atomic_set(&sys->curr_polling_state, 0);
+ sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
+ IPA_GENERIC_RX_BUFF_BASE_SZ);
+ sys->get_skb = ipa3_get_skb_ipa_rx;
+ sys->free_skb = ipa3_free_skb_rx;
+ in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
+ in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
+ in->ipa_ep_cfg.aggr.aggr_time_limit =
+ IPA_GENERIC_AGGR_TIME_LIMIT;
+ if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
+ sys->pyld_hdlr = ipa3_lan_rx_pyld_hdlr;
+ sys->repl_hdlr =
+ ipa3_replenish_rx_cache_recycle;
+ sys->free_rx_wrapper =
+ ipa3_recycle_rx_wrapper;
+ sys->rx_pool_sz =
+ ipa3_ctx->lan_rx_ring_size;
+ in->ipa_ep_cfg.aggr.aggr_byte_limit =
+ IPA_GENERIC_AGGR_BYTE_LIMIT;
+ in->ipa_ep_cfg.aggr.aggr_pkt_limit =
+ IPA_GENERIC_AGGR_PKT_LIMIT;
+ } else if (in->client ==
+ IPA_CLIENT_APPS_WAN_CONS) {
+ sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
+ sys->free_rx_wrapper = ipa3_free_rx_wrapper;
+ if (in->napi_enabled) {
+ sys->repl_hdlr =
+ ipa3_replenish_rx_cache_recycle;
+ sys->rx_pool_sz =
+ IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+ } else {
+ if (nr_cpu_ids > 1) {
+ sys->repl_hdlr =
+ ipa3_fast_replenish_rx_cache;
+ } else {
+ sys->repl_hdlr =
+ ipa3_replenish_rx_cache;
+ }
+ sys->rx_pool_sz =
+ ipa3_ctx->wan_rx_ring_size;
+ }
+ in->ipa_ep_cfg.aggr.aggr_sw_eof_active
+ = true;
+ if (ipa3_ctx->
+ ipa_client_apps_wan_cons_agg_gro) {
+ IPAERR("get close-by %u\n",
+ ipa_adjust_ra_buff_base_sz(
+ in->ipa_ep_cfg.aggr.
+ aggr_byte_limit));
+ IPAERR("set rx_buff_sz %lu\n",
+ (unsigned long int)
+ IPA_GENERIC_RX_BUFF_SZ(
+ ipa_adjust_ra_buff_base_sz(
+ in->ipa_ep_cfg.
+ aggr.aggr_byte_limit)));
+ /* disable ipa_status */
+ sys->ep->status.
+ status_en = false;
+ sys->rx_buff_sz =
+ IPA_GENERIC_RX_BUFF_SZ(
+ ipa_adjust_ra_buff_base_sz(
+ in->ipa_ep_cfg.aggr.
+ aggr_byte_limit));
+ in->ipa_ep_cfg.aggr.
+ aggr_byte_limit =
+ sys->rx_buff_sz < in->
+ ipa_ep_cfg.aggr.
+ aggr_byte_limit ?
+ IPA_ADJUST_AGGR_BYTE_LIMIT(
+ sys->rx_buff_sz) :
+ IPA_ADJUST_AGGR_BYTE_LIMIT(
+ in->ipa_ep_cfg.
+ aggr.aggr_byte_limit);
+ IPAERR("set aggr_limit %lu\n",
+ (unsigned long int)
+ in->ipa_ep_cfg.aggr.
+ aggr_byte_limit);
+ } else {
+ in->ipa_ep_cfg.aggr.
+ aggr_byte_limit =
+ IPA_GENERIC_AGGR_BYTE_LIMIT;
+ in->ipa_ep_cfg.aggr.
+ aggr_pkt_limit =
+ IPA_GENERIC_AGGR_PKT_LIMIT;
+ }
+ }
+ } else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) {
+ IPADBG("assigning policy to client:%d",
+ in->client);
+
+ sys->policy = IPA_POLICY_INTR_POLL_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+ | SPS_O_ACK_TRANSFERS);
+ sys->sps_callback = ipa3_sps_irq_rx_notify;
+ INIT_WORK(&sys->work, ipa3_wq_handle_rx);
+ INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+ ipa3_switch_to_intr_rx_work_func);
+ INIT_DELAYED_WORK(&sys->replenish_rx_work,
+ ipa3_replenish_rx_work_func);
+ atomic_set(&sys->curr_polling_state, 0);
+ sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ;
+ sys->rx_pool_sz = in->desc_fifo_sz/
+ sizeof(struct sps_iovec) - 1;
+ if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
+ sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
+ sys->pyld_hdlr = NULL;
+ sys->repl_hdlr = ipa3_replenish_wlan_rx_cache;
+ sys->get_skb = ipa3_get_skb_ipa_rx;
+ sys->free_skb = ipa3_free_skb_rx;
+ sys->free_rx_wrapper = ipa3_free_rx_wrapper;
+ in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+ } else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
+ IPADBG("assigning policy to client:%d",
+ in->client);
+
+ sys->policy = IPA_POLICY_INTR_POLL_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+ | SPS_O_ACK_TRANSFERS);
+ sys->sps_callback = ipa3_sps_irq_rx_notify;
+ INIT_WORK(&sys->work, ipa3_wq_handle_rx);
+ INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+ ipa3_switch_to_intr_rx_work_func);
+ INIT_DELAYED_WORK(&sys->replenish_rx_work,
+ ipa3_replenish_rx_work_func);
+ atomic_set(&sys->curr_polling_state, 0);
+ sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
+ sys->rx_pool_sz = in->desc_fifo_sz /
+ sizeof(struct sps_iovec) - 1;
+ if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ)
+ sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ;
+ sys->pyld_hdlr = ipa3_odu_rx_pyld_hdlr;
+ sys->get_skb = ipa3_get_skb_ipa_rx;
+ sys->free_skb = ipa3_free_skb_rx;
+ sys->free_rx_wrapper = ipa3_free_rx_wrapper;
+ sys->repl_hdlr = ipa3_replenish_rx_cache;
+ } else if (in->client ==
+ IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
+ IPADBG("assigning policy to client:%d",
+ in->client);
+
+ sys->policy = IPA_POLICY_INTR_POLL_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+ | SPS_O_ACK_TRANSFERS);
+ sys->sps_callback = ipa3_sps_irq_rx_notify;
+ INIT_WORK(&sys->work, ipa3_wq_handle_rx);
+ INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+ ipa3_switch_to_intr_rx_work_func);
+ } else if (in->client ==
+ IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
+ IPADBG("assigning policy to client:%d",
+ in->client);
+
+ sys->policy = IPA_POLICY_NOINTR_MODE;
+ sys->sps_option = SPS_O_AUTO_ENABLE |
+ SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ } else {
+ IPAERR("Need to install a RX pipe hdlr\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ipa3_tx_client_rx_notify_release() - Callback function
+ * which will call the user supplied callback function to
+ * release the skb, or release it on its own if no callback
+ * function was supplied
+ *
+ * @user1: [in] - Data Descriptor
+ * @user2: [in] - endpoint idx
+ *
+ * This notified callback is for the destination client
+ * This function is supplied in ipa3_tx_dp_mul
+ */
+static void ipa3_tx_client_rx_notify_release(void *user1, int user2)
+{
+ struct ipa_tx_data_desc *dd = (struct ipa_tx_data_desc *)user1;
+ int ep_idx = user2;
+
+ IPADBG_LOW("Received data desc anchor:%p\n", dd);
+
+ atomic_inc(&ipa3_ctx->ep[ep_idx].avail_fifo_desc);
+ ipa3_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
+
+ /* wlan host driver waits till tx complete before unload */
+ IPADBG_LOW("ep=%d fifo_desc_free_count=%d\n",
+ ep_idx, atomic_read(&ipa3_ctx->ep[ep_idx].avail_fifo_desc));
+ IPADBG_LOW("calling client notify callback with priv:%p\n",
+ ipa3_ctx->ep[ep_idx].priv);
+
+ if (ipa3_ctx->ep[ep_idx].client_notify) {
+ ipa3_ctx->ep[ep_idx].client_notify(ipa3_ctx->ep[ep_idx].priv,
+ IPA_WRITE_DONE, (unsigned long)user1);
+ ipa3_ctx->ep[ep_idx].wstats.rx_hd_reply++;
+ }
+}
+/**
+ * ipa3_tx_client_rx_pkt_status() - Callback function
+ * which will call the user supplied callback function to
+ * increase the available fifo descriptor
+ *
+ * @user1: [in] - Data Descriptor
+ * @user2: [in] - endpoint idx
+ *
+ * This notified callback is for the destination client
+ * This function is supplied in ipa3_tx_dp_mul
+ */
+static void ipa3_tx_client_rx_pkt_status(void *user1, int user2)
+{
+ int ep_idx = user2;
+
+ atomic_inc(&ipa3_ctx->ep[ep_idx].avail_fifo_desc);
+ ipa3_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
+}
+
+
+/**
+ * ipa3_tx_dp_mul() - Data-path tx handler for multiple packets
+ * @src: [in] - Client that is sending data
+ * @ipa_tx_data_desc: [in] data descriptors from wlan
+ *
+ * this is used for to transfer data descriptors that received
+ * from WLAN1_PROD pipe to IPA HW
+ *
+ * The function will send data descriptors from WLAN1_PROD (one
+ * at a time) using sps_transfer_one. Will set EOT flag for last
+ * descriptor Once this send was done from SPS point-of-view the
+ * IPA driver will get notified by the supplied callback -
+ * ipa3_sps_irq_tx_no_aggr_notify()
+ *
+ * ipa3_sps_irq_tx_no_aggr_notify will call to the user supplied
+ * callback (from ipa3_connect)
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_tx_dp_mul(enum ipa_client_type src,
+ struct ipa_tx_data_desc *data_desc)
+{
+ /* The second byte in wlan header holds qmap id */
+#define IPA_WLAN_HDR_QMAP_ID_OFFSET 1
+ struct ipa_tx_data_desc *entry;
+ struct ipa3_sys_context *sys;
+ struct ipa3_desc desc[2];
+ u32 num_desc, cnt;
+ int ep_idx;
+
+ IPADBG_LOW("Received data desc anchor:%p\n", data_desc);
+
+ spin_lock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
+
+ ep_idx = ipa3_get_ep_mapping(src);
+ if (unlikely(ep_idx == -1)) {
+ IPAERR("dest EP does not exist.\n");
+ goto fail_send;
+ }
+ IPADBG_LOW("ep idx:%d\n", ep_idx);
+ sys = ipa3_ctx->ep[ep_idx].sys;
+
+ if (unlikely(ipa3_ctx->ep[ep_idx].valid == 0)) {
+ IPAERR("dest EP not valid.\n");
+ goto fail_send;
+ }
+ sys->ep->wstats.rx_hd_rcvd++;
+
+ /* Calculate the number of descriptors */
+ num_desc = 0;
+ list_for_each_entry(entry, &data_desc->link, link) {
+ num_desc++;
+ }
+ IPADBG_LOW("Number of Data Descriptors:%d", num_desc);
+
+ if (atomic_read(&sys->ep->avail_fifo_desc) < num_desc) {
+ IPAERR("Insufficient data descriptors available\n");
+ goto fail_send;
+ }
+
+ /* Assign callback only for last data descriptor */
+ cnt = 0;
+ list_for_each_entry(entry, &data_desc->link, link) {
+ memset(desc, 0, 2 * sizeof(struct ipa3_desc));
+
+ IPADBG_LOW("Parsing data desc :%d\n", cnt);
+ cnt++;
+ ((u8 *)entry->pyld_buffer)[IPA_WLAN_HDR_QMAP_ID_OFFSET] =
+ (u8)sys->ep->cfg.meta.qmap_id;
+
+ /* the tag field will be populated in ipa3_send() function */
+ desc[0].opcode =
+ ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
+ desc[0].type = IPA_IMM_CMD_DESC;
+ desc[0].callback = ipa3_tag_destroy_imm;
+ desc[1].pyld = entry->pyld_buffer;
+ desc[1].len = entry->pyld_len;
+ desc[1].type = IPA_DATA_DESC_SKB;
+ desc[1].user1 = data_desc;
+ desc[1].user2 = ep_idx;
+ IPADBG_LOW("priv:%p pyld_buf:0x%p pyld_len:%d\n",
+ entry->priv, desc[1].pyld, desc[1].len);
+
+ /* In case of last descriptor populate callback */
+ if (cnt == num_desc) {
+ IPADBG_LOW("data desc:%p\n", data_desc);
+ desc[1].callback = ipa3_tx_client_rx_notify_release;
+ } else {
+ desc[1].callback = ipa3_tx_client_rx_pkt_status;
+ }
+
+ IPADBG_LOW("calling ipa3_send_one()\n");
+ if (ipa3_send(sys, 2, desc, true)) {
+ IPAERR("fail to send skb\n");
+ sys->ep->wstats.rx_pkt_leak += (cnt-1);
+ sys->ep->wstats.rx_dp_fail++;
+ goto fail_send;
+ }
+
+ if (atomic_read(&sys->ep->avail_fifo_desc) >= 0)
+ atomic_dec(&sys->ep->avail_fifo_desc);
+
+ sys->ep->wstats.rx_pkts_rcvd++;
+ IPADBG_LOW("ep=%d fifo desc=%d\n",
+ ep_idx, atomic_read(&sys->ep->avail_fifo_desc));
+ }
+
+ sys->ep->wstats.rx_hd_processed++;
+ spin_unlock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
+ return 0;
+
+fail_send:
+ spin_unlock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
+ return -EFAULT;
+
+}
+
+void ipa3_free_skb(struct ipa_rx_data *data)
+{
+ struct ipa3_rx_pkt_wrapper *rx_pkt;
+
+ spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+
+ ipa3_ctx->wc_memb.total_tx_pkts_freed++;
+ rx_pkt = container_of(data, struct ipa3_rx_pkt_wrapper, data);
+
+ ipa3_skb_recycle(rx_pkt->data.skb);
+ (void)skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
+
+ list_add_tail(&rx_pkt->link,
+ &ipa3_ctx->wc_memb.wlan_comm_desc_list);
+ ipa3_ctx->wc_memb.wlan_comm_free_cnt++;
+
+ spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+}
+
+/* Functions added to support kernel tests */
+
+int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in,
+ unsigned long *ipa_bam_or_gsi_hdl,
+ u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status)
+{
+ struct ipa3_ep_context *ep;
+ int ipa_ep_idx;
+ int result = -EINVAL;
+
+ if (sys_in == NULL || clnt_hdl == NULL) {
+ IPAERR("NULL args\n");
+ goto fail_gen;
+ }
+
+ if (ipa_bam_or_gsi_hdl == NULL || ipa_pipe_num == NULL) {
+ IPAERR("NULL args\n");
+ goto fail_gen;
+ }
+ if (sys_in->client >= IPA_CLIENT_MAX) {
+ IPAERR("bad parm client:%d\n", sys_in->client);
+ goto fail_gen;
+ }
+
+ ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client :%d\n", sys_in->client);
+ goto fail_gen;
+ }
+
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+ IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
+
+ if (ep->valid == 1) {
+ if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
+ IPAERR("EP %d already allocated\n", ipa_ep_idx);
+ goto fail_and_disable_clocks;
+ } else {
+ if (ipa3_cfg_ep_hdr(ipa_ep_idx,
+ &sys_in->ipa_ep_cfg.hdr)) {
+ IPAERR("fail to configure hdr prop of EP %d\n",
+ ipa_ep_idx);
+ result = -EFAULT;
+ goto fail_and_disable_clocks;
+ }
+ if (ipa3_cfg_ep_cfg(ipa_ep_idx,
+ &sys_in->ipa_ep_cfg.cfg)) {
+ IPAERR("fail to configure cfg prop of EP %d\n",
+ ipa_ep_idx);
+ result = -EFAULT;
+ goto fail_and_disable_clocks;
+ }
+ IPAERR("client %d (ep: %d) overlay ok sys=%p\n",
+ sys_in->client, ipa_ep_idx, ep->sys);
+ ep->client_notify = sys_in->notify;
+ ep->priv = sys_in->priv;
+ *clnt_hdl = ipa_ep_idx;
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+ return 0;
+ }
+ }
+
+ memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
+
+ ep->valid = 1;
+ ep->client = sys_in->client;
+ ep->client_notify = sys_in->notify;
+ ep->priv = sys_in->priv;
+ ep->keep_ipa_awake = true;
+ if (en_status) {
+ ep->status.status_en = true;
+ ep->status.status_ep = ipa_ep_idx;
+ }
+
+ result = ipa3_enable_data_path(ipa_ep_idx);
+ if (result) {
+ IPAERR("enable data path failed res=%d clnt=%d.\n",
+ result, ipa_ep_idx);
+ goto fail_gen2;
+ }
+
+ if (!ep->skip_ep_cfg) {
+ if (ipa3_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
+ IPAERR("fail to configure EP.\n");
+ goto fail_gen2;
+ }
+ if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
+ IPAERR("fail to configure status of EP.\n");
+ goto fail_gen2;
+ }
+ IPADBG("ep configuration successful\n");
+ } else {
+ IPADBG("skipping ep configuration\n");
+ }
+
+ *clnt_hdl = ipa_ep_idx;
+
+ *ipa_pipe_num = ipa_ep_idx;
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
+ *ipa_bam_or_gsi_hdl = ipa3_ctx->gsi_dev_hdl;
+ else
+ *ipa_bam_or_gsi_hdl = ipa3_ctx->bam_handle;
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+ ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+ IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
+ ipa_ep_idx, ep->sys);
+
+ return 0;
+
+fail_gen2:
+fail_and_disable_clocks:
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+fail_gen:
+ return result;
+}
+
+int ipa3_sys_teardown(u32 clnt_hdl)
+{
+ struct ipa3_ep_context *ep;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ ipa3_disable_data_path(clnt_hdl);
+ ep->valid = 0;
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+ return 0;
+}
+
+int ipa3_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
+ unsigned long gsi_ev_hdl)
+{
+ struct ipa3_ep_context *ep;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ ep->gsi_chan_hdl = gsi_ch_hdl;
+ ep->gsi_evt_ring_hdl = gsi_ev_hdl;
+
+ return 0;
+}
+
+static void ipa_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
+{
+ switch (notify->evt_id) {
+ case GSI_EVT_OUT_OF_BUFFERS_ERR:
+ IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n");
+ break;
+ case GSI_EVT_OUT_OF_RESOURCES_ERR:
+ IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n");
+ break;
+ case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
+ IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
+ break;
+ case GSI_EVT_EVT_RING_EMPTY_ERR:
+ IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n");
+ break;
+ default:
+ IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+ }
+}
+
+static void ipa_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
+{
+ switch (notify->evt_id) {
+ case GSI_CHAN_INVALID_TRE_ERR:
+ IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
+ break;
+ case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
+ IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
+ break;
+ case GSI_CHAN_OUT_OF_BUFFERS_ERR:
+ IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
+ break;
+ case GSI_CHAN_OUT_OF_RESOURCES_ERR:
+ IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
+ break;
+ case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
+ IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
+ break;
+ case GSI_CHAN_HWO_1_ERR:
+ IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
+ break;
+ default:
+ IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+ }
+}
+
+static void ipa_gsi_irq_tx_notify_cb(struct gsi_chan_xfer_notify *notify)
+{
+ struct ipa3_tx_pkt_wrapper *tx_pkt;
+
+ IPADBG_LOW("event %d notified\n", notify->evt_id);
+
+ switch (notify->evt_id) {
+ case GSI_CHAN_EVT_EOT:
+ atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+ tx_pkt = notify->xfer_user_data;
+ queue_work(tx_pkt->sys->wq, &tx_pkt->work);
+ break;
+ default:
+ IPAERR("received unexpected event id %d\n", notify->evt_id);
+ }
+}
+
+static void ipa_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
+{
+ struct ipa3_sys_context *sys;
+ struct ipa3_rx_pkt_wrapper *rx_pkt_expected, *rx_pkt_rcvd;
+
+ if (!notify) {
+ IPAERR("gsi notify is NULL.\n");
+ return;
+ }
+ IPADBG_LOW("event %d notified\n", notify->evt_id);
+
+ sys = (struct ipa3_sys_context *)notify->chan_user_data;
+ rx_pkt_expected = list_first_entry(&sys->head_desc_list,
+ struct ipa3_rx_pkt_wrapper, link);
+ rx_pkt_rcvd = (struct ipa3_rx_pkt_wrapper *)notify->xfer_user_data;
+
+ if (rx_pkt_expected != rx_pkt_rcvd) {
+ IPAERR("Pkt was not filled in head of rx buffer.\n");
+ WARN_ON(1);
+ return;
+ }
+ sys->ep->bytes_xfered_valid = true;
+ sys->ep->bytes_xfered = notify->bytes_xfered;
+ sys->ep->phys_base = rx_pkt_rcvd->data.dma_addr;
+
+ switch (notify->evt_id) {
+ case GSI_CHAN_EVT_EOT:
+ case GSI_CHAN_EVT_EOB:
+ atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+ if (!atomic_read(&sys->curr_polling_state)) {
+ /* put the gsi channel into polling mode */
+ gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
+ GSI_CHAN_MODE_POLL);
+ ipa3_inc_acquire_wakelock();
+ atomic_set(&sys->curr_polling_state, 1);
+ queue_work(sys->wq, &sys->work);
+ }
+ break;
+ default:
+ IPAERR("received unexpected event id %d\n", notify->evt_id);
+ }
+}
+
+static void ipa_dma_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
+{
+ struct ipa3_sys_context *sys;
+ struct ipa3_dma_xfer_wrapper *rx_pkt_expected, *rx_pkt_rcvd;
+
+ if (!notify) {
+ IPAERR("gsi notify is NULL.\n");
+ return;
+ }
+ IPADBG_LOW("event %d notified\n", notify->evt_id);
+
+ sys = (struct ipa3_sys_context *)notify->chan_user_data;
+ if (sys->ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
+ IPAERR("IRQ_RX Callback was called for DMA_SYNC_CONS.\n");
+ return;
+ }
+ rx_pkt_expected = list_first_entry(&sys->head_desc_list,
+ struct ipa3_dma_xfer_wrapper, link);
+ rx_pkt_rcvd = (struct ipa3_dma_xfer_wrapper *)notify
+ ->xfer_user_data;
+ if (rx_pkt_expected != rx_pkt_rcvd) {
+ IPAERR("Pkt was not filled in head of rx buffer.\n");
+ WARN_ON(1);
+ return;
+ }
+
+ sys->ep->bytes_xfered_valid = true;
+ sys->ep->bytes_xfered = notify->bytes_xfered;
+ sys->ep->phys_base = rx_pkt_rcvd->phys_addr_dest;
+
+ switch (notify->evt_id) {
+ case GSI_CHAN_EVT_EOT:
+ if (!atomic_read(&sys->curr_polling_state)) {
+ /* put the gsi channel into polling mode */
+ gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
+ GSI_CHAN_MODE_POLL);
+ ipa3_inc_acquire_wakelock();
+ atomic_set(&sys->curr_polling_state, 1);
+ queue_work(sys->wq, &sys->work);
+ }
+ break;
+ default:
+ IPAERR("received unexpected event id %d\n", notify->evt_id);
+ }
+}
+
+static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
+ struct ipa3_ep_context *ep)
+{
+ struct gsi_evt_ring_props gsi_evt_ring_props;
+ struct gsi_chan_props gsi_channel_props;
+ union __packed gsi_channel_scratch ch_scratch;
+ struct ipa_gsi_ep_config *gsi_ep_info;
+ dma_addr_t dma_addr;
+ int result;
+
+ if (!ep) {
+ IPAERR("EP context is empty\n");
+ return -EINVAL;
+ }
+
+ ep->gsi_evt_ring_hdl = ~0;
+ /*
+ * allocate event ring for all interrupt-policy
+ * pipes and IPA consumers pipes
+ */
+ if (ep->sys->policy != IPA_POLICY_NOINTR_MODE ||
+ IPA_CLIENT_IS_CONS(ep->client)) {
+ memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
+ gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
+ gsi_evt_ring_props.intr = GSI_INTR_IRQ;
+ gsi_evt_ring_props.re_size =
+ GSI_EVT_RING_RE_SIZE_16B;
+
+ gsi_evt_ring_props.ring_len = IPA_GSI_EVT_RING_LEN;
+ gsi_evt_ring_props.ring_base_vaddr =
+ dma_alloc_coherent(ipa3_ctx->pdev, IPA_GSI_EVT_RING_LEN,
+ &dma_addr, 0);
+ gsi_evt_ring_props.ring_base_addr = dma_addr;
+
+ /* copy mem info */
+ ep->gsi_mem_info.evt_ring_len = gsi_evt_ring_props.ring_len;
+ ep->gsi_mem_info.evt_ring_base_addr =
+ gsi_evt_ring_props.ring_base_addr;
+ ep->gsi_mem_info.evt_ring_base_vaddr =
+ gsi_evt_ring_props.ring_base_vaddr;
+
+ gsi_evt_ring_props.int_modt = IPA_GSI_EVT_RING_INT_MODT;
+ gsi_evt_ring_props.int_modc = 1;
+ gsi_evt_ring_props.rp_update_addr = 0;
+ gsi_evt_ring_props.exclusive = true;
+ gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb;
+ gsi_evt_ring_props.user_data = NULL;
+
+ result = gsi_alloc_evt_ring(&gsi_evt_ring_props,
+ ipa3_ctx->gsi_dev_hdl, &ep->gsi_evt_ring_hdl);
+ if (result != GSI_STATUS_SUCCESS)
+ goto fail_alloc_evt_ring;
+ }
+
+ memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
+ gsi_channel_props.prot = GSI_CHAN_PROT_GPI;
+ if (IPA_CLIENT_IS_PROD(ep->client)) {
+ gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
+ } else {
+ gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
+ gsi_channel_props.max_re_expected = ep->sys->rx_pool_sz;
+ }
+
+ gsi_ep_info = ipa3_get_gsi_ep_info(ipa3_get_ep_mapping(ep->client));
+ if (!gsi_ep_info) {
+ IPAERR("Invalid ep number\n");
+ result = -EINVAL;
+ goto fail_alloc_evt_ring;
+ } else
+ gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num;
+
+ gsi_channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
+ gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
+
+ /*
+ * GSI ring length is calculated based on the desc_fifo_sz which was
+ * meant to define the BAM desc fifo. GSI descriptors are 16B as opposed
+ * to 8B for BAM. For PROD pipes there is also an additional descriptor
+ * for TAG STATUS immediate command.
+ */
+ if (IPA_CLIENT_IS_PROD(ep->client))
+ gsi_channel_props.ring_len = 4 * in->desc_fifo_sz;
+ else
+ gsi_channel_props.ring_len = 2 * in->desc_fifo_sz;
+ gsi_channel_props.ring_base_vaddr =
+ dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
+ &dma_addr, 0);
+ gsi_channel_props.ring_base_addr = dma_addr;
+
+ /* copy mem info */
+ ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len;
+ ep->gsi_mem_info.chan_ring_base_addr =
+ gsi_channel_props.ring_base_addr;
+ ep->gsi_mem_info.chan_ring_base_vaddr =
+ gsi_channel_props.ring_base_vaddr;
+
+ gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+ gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+ if (ep->client == IPA_CLIENT_APPS_CMD_PROD)
+ gsi_channel_props.low_weight = IPA_GSI_MAX_CH_LOW_WEIGHT;
+ else
+ gsi_channel_props.low_weight = 1;
+ gsi_channel_props.chan_user_data = ep->sys;
+ gsi_channel_props.err_cb = ipa_gsi_chan_err_cb;
+ if (IPA_CLIENT_IS_PROD(ep->client))
+ gsi_channel_props.xfer_cb = ipa_gsi_irq_tx_notify_cb;
+ else
+ gsi_channel_props.xfer_cb = ipa_gsi_irq_rx_notify_cb;
+ if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(ep->client))
+ gsi_channel_props.xfer_cb = ipa_dma_gsi_irq_rx_notify_cb;
+ result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
+ &ep->gsi_chan_hdl);
+ if (result != GSI_STATUS_SUCCESS)
+ goto fail_alloc_channel;
+
+ memset(&ch_scratch, 0, sizeof(ch_scratch));
+ ch_scratch.gpi.max_outstanding_tre = gsi_ep_info->ipa_if_tlv *
+ GSI_CHAN_RE_SIZE_16B;
+ ch_scratch.gpi.outstanding_threshold = 2 * GSI_CHAN_RE_SIZE_16B;
+ result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
+ if (result != GSI_STATUS_SUCCESS) {
+ IPAERR("failed to write scratch %d\n", result);
+ goto fail_start_channel;
+ }
+
+ result = gsi_start_channel(ep->gsi_chan_hdl);
+ if (result != GSI_STATUS_SUCCESS)
+ goto fail_start_channel;
+ if (ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS)
+ gsi_config_channel_mode(ep->gsi_chan_hdl,
+ GSI_CHAN_MODE_POLL);
+ return 0;
+
+fail_start_channel:
+ if (gsi_dealloc_channel(ep->gsi_chan_hdl)
+ != GSI_STATUS_SUCCESS) {
+ IPAERR("Failed to dealloc GSI chan.\n");
+ BUG();
+ }
+fail_alloc_channel:
+ if (ep->gsi_evt_ring_hdl != ~0) {
+ gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+ ep->gsi_evt_ring_hdl = ~0;
+ }
+fail_alloc_evt_ring:
+ IPAERR("Return with err: %d\n", result);
+ return result;
+}
+
+static int ipa_populate_tag_field(struct ipa3_desc *desc,
+ struct ipa3_tx_pkt_wrapper *tx_pkt,
+ struct ipahal_imm_cmd_pyld **tag_pyld_ret)
+{
+ struct ipahal_imm_cmd_pyld *tag_pyld;
+ struct ipahal_imm_cmd_ip_packet_tag_status tag_cmd = {0};
+
+ /* populate tag field only if it is NULL */
+ if (desc->pyld == NULL) {
+ tag_cmd.tag = pointer_to_tag_wa(tx_pkt);
+ tag_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_PACKET_TAG_STATUS, &tag_cmd, true);
+ if (unlikely(!tag_pyld)) {
+ IPAERR("Failed to construct ip_packet_tag_status\n");
+ return -EFAULT;
+ }
+ /*
+ * This is for 32-bit pointer, will need special
+ * handling if 64-bit pointer is used
+ */
+ IPADBG_LOW("tx_pkt sent in tag: 0x%p\n", tx_pkt);
+ desc->pyld = tag_pyld->data;
+ desc->len = tag_pyld->len;
+ desc->user1 = tag_pyld;
+
+ *tag_pyld_ret = tag_pyld;
+ }
+ return 0;
+}
+
+static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys,
+ struct ipa_mem_buffer *mem_info)
+{
+ int ret;
+ struct gsi_chan_xfer_notify xfer_notify;
+ struct ipa3_rx_pkt_wrapper *rx_pkt;
+
+ if (sys->ep->bytes_xfered_valid) {
+ mem_info->phys_base = sys->ep->phys_base;
+ mem_info->size = (u32)sys->ep->bytes_xfered;
+ sys->ep->bytes_xfered_valid = false;
+ return GSI_STATUS_SUCCESS;
+ }
+
+ ret = gsi_poll_channel(sys->ep->gsi_chan_hdl,
+ &xfer_notify);
+ if (ret == GSI_STATUS_POLL_EMPTY)
+ return ret;
+ else if (ret != GSI_STATUS_SUCCESS) {
+ IPAERR("Poll channel err: %d\n", ret);
+ return ret;
+ }
+
+ rx_pkt = (struct ipa3_rx_pkt_wrapper *)
+ xfer_notify.xfer_user_data;
+ mem_info->phys_base = rx_pkt->data.dma_addr;
+ mem_info->size = xfer_notify.bytes_xfered;
+
+ return ret;
+}
+
+static int ipa_handle_rx_core_gsi(struct ipa3_sys_context *sys,
+ bool process_all, bool in_poll_state)
+{
+ int ret;
+ int cnt = 0;
+ struct ipa_mem_buffer mem_info = {0};
+
+ while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
+ !atomic_read(&sys->curr_polling_state))) {
+ if (cnt && !process_all)
+ break;
+
+ ret = ipa_poll_gsi_pkt(sys, &mem_info);
+ if (ret)
+ break;
+
+ if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
+ ipa3_dma_memcpy_notify(sys, &mem_info);
+ else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
+ ipa3_wlan_wq_rx_common(sys, mem_info.size);
+ else
+ ipa3_wq_rx_common(sys, mem_info.size);
+
+ cnt++;
+ }
+ return cnt;
+}
+
+static int ipa_poll_sps_pkt(struct ipa3_sys_context *sys,
+ struct ipa_mem_buffer *mem_info)
+{
+ int ret;
+ struct sps_iovec iov;
+
+ ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+ if (ret) {
+ IPAERR("sps_get_iovec failed %d\n", ret);
+ return ret;
+ }
+
+ if (iov.addr == 0)
+ return -EIO;
+
+ mem_info->phys_base = iov.addr;
+ mem_info->size = iov.size;
+ return 0;
+}
+
+static int ipa_handle_rx_core_sps(struct ipa3_sys_context *sys,
+ bool process_all, bool in_poll_state)
+{
+ int ret;
+ int cnt = 0;
+ struct ipa_mem_buffer mem_info = {0};
+
+ while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
+ !atomic_read(&sys->curr_polling_state))) {
+ if (cnt && !process_all)
+ break;
+
+ ret = ipa_poll_sps_pkt(sys, &mem_info);
+ if (ret)
+ break;
+
+ if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
+ ipa3_dma_memcpy_notify(sys, &mem_info);
+ else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
+ ipa3_wlan_wq_rx_common(sys, mem_info.size);
+ else
+ ipa3_wq_rx_common(sys, mem_info.size);
+
+ cnt++;
+ }
+
+ return cnt;
+}
+
+/**
+ * ipa3_rx_poll() - Poll the rx packets from IPA HW. This
+ * function is exectued in the softirq context
+ *
+ * if input budget is zero, the driver switches back to
+ * interrupt mode
+ *
+ * return number of polled packets, on error 0(zero)
+ */
+int ipa3_rx_poll(u32 clnt_hdl, int weight)
+{
+ struct ipa3_ep_context *ep;
+ int ret;
+ int cnt = 0;
+ unsigned int delay = 1;
+ struct ipa_mem_buffer mem_info = {0};
+
+ IPADBG("\n");
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm 0x%x\n", clnt_hdl);
+ return cnt;
+ }
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ while (cnt < weight &&
+ atomic_read(&ep->sys->curr_polling_state)) {
+
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
+ ret = ipa_poll_gsi_pkt(ep->sys, &mem_info);
+ else
+ ret = ipa_poll_sps_pkt(ep->sys, &mem_info);
+
+ if (ret)
+ break;
+
+ ipa3_wq_rx_common(ep->sys, mem_info.size);
+ cnt += 5;
+ };
+
+ if (cnt == 0) {
+ ep->inactive_cycles++;
+ ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
+
+ if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
+ ep->switch_to_intr = true;
+ delay = 0;
+ }
+ queue_delayed_work(ep->sys->wq,
+ &ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
+ } else
+ ep->inactive_cycles = 0;
+
+ return cnt;
+}
+
+static unsigned long tag_to_pointer_wa(uint64_t tag)
+{
+ return 0xFFFF000000000000 | (unsigned long) tag;
+}
+
+static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt)
+{
+ u16 temp;
+ /* Add the check but it might have throughput issue */
+ if (ipa3_is_msm_device()) {
+ temp = (u16) (~((unsigned long) tx_pkt &
+ 0xFFFF000000000000) >> 48);
+ if (temp) {
+ IPAERR("The 16 prefix is not all 1s (%p)\n",
+ tx_pkt);
+ BUG();
+ }
+ }
+ return (unsigned long)tx_pkt & 0x0000FFFFFFFFFFFF;
+}
+
+/**
+ * ipa_gsi_ch20_wa() - software workaround for IPA GSI channel 20
+ *
+ * A hardware limitation requires to avoid using GSI physical channel 20.
+ * This function allocates GSI physical channel 20 and holds it to prevent
+ * others to use it.
+ *
+ * Return codes: 0 on success, negative on failure
+ */
+int ipa_gsi_ch20_wa(void)
+{
+ struct gsi_chan_props gsi_channel_props;
+ dma_addr_t dma_addr;
+ int result;
+ int i;
+ unsigned long chan_hdl[IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC];
+ unsigned long chan_hdl_to_keep;
+
+
+ memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
+ gsi_channel_props.prot = GSI_CHAN_PROT_GPI;
+ gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
+ gsi_channel_props.evt_ring_hdl = ~0;
+ gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
+ gsi_channel_props.ring_len = 4 * gsi_channel_props.re_size;
+ gsi_channel_props.ring_base_vaddr =
+ dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
+ &dma_addr, 0);
+ gsi_channel_props.ring_base_addr = dma_addr;
+ gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+ gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+ gsi_channel_props.low_weight = 1;
+ gsi_channel_props.err_cb = ipa_gsi_chan_err_cb;
+ gsi_channel_props.xfer_cb = ipa_gsi_irq_tx_notify_cb;
+
+ /* first allocate channels up to channel 20 */
+ for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) {
+ gsi_channel_props.ch_id = i;
+ result = gsi_alloc_channel(&gsi_channel_props,
+ ipa3_ctx->gsi_dev_hdl,
+ &chan_hdl[i]);
+ if (result != GSI_STATUS_SUCCESS) {
+ IPAERR("failed to alloc channel %d err %d\n",
+ i, result);
+ return result;
+ }
+ }
+
+ /* allocate channel 20 */
+ gsi_channel_props.ch_id = IPA_GSI_CH_20_WA_VIRT_CHAN;
+ result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
+ &chan_hdl_to_keep);
+ if (result != GSI_STATUS_SUCCESS) {
+ IPAERR("failed to alloc channel %d err %d\n",
+ i, result);
+ return result;
+ }
+
+ /* release all other channels */
+ for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) {
+ result = gsi_dealloc_channel(chan_hdl[i]);
+ if (result != GSI_STATUS_SUCCESS) {
+ IPAERR("failed to dealloc channel %d err %d\n",
+ i, result);
+ return result;
+ }
+ }
+
+ /* DMA memory shall not be freed as it is used by channel 20 */
+ return 0;
+}
+
+/**
+ * ipa_adjust_ra_buff_base_sz()
+ *
+ * Return value: the largest power of two which is smaller
+ * than the input value
+ */
+static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit)
+{
+ aggr_byte_limit += IPA_MTU;
+ aggr_byte_limit += IPA_GENERIC_RX_BUFF_LIMIT;
+ aggr_byte_limit--;
+ aggr_byte_limit |= aggr_byte_limit >> 1;
+ aggr_byte_limit |= aggr_byte_limit >> 2;
+ aggr_byte_limit |= aggr_byte_limit >> 4;
+ aggr_byte_limit |= aggr_byte_limit >> 8;
+ aggr_byte_limit |= aggr_byte_limit >> 16;
+ aggr_byte_limit++;
+ return aggr_byte_limit >> 1;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
new file mode 100644
index 0000000..e7af53f
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -0,0 +1,1592 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+
+#define IPA_FLT_TABLE_INDEX_NOT_FOUND (-1)
+#define IPA_FLT_STATUS_OF_ADD_FAILED (-1)
+#define IPA_FLT_STATUS_OF_DEL_FAILED (-1)
+#define IPA_FLT_STATUS_OF_MDFY_FAILED (-1)
+
+#define IPA_FLT_GET_RULE_TYPE(__entry) \
+ ( \
+ ((__entry)->rule.hashable) ? \
+ (IPA_RULE_HASHABLE):(IPA_RULE_NON_HASHABLE) \
+ )
+
+/**
+ * ipa3_generate_flt_hw_rule() - generates the filtering hardware rule
+ * @ip: the ip address family type
+ * @entry: filtering entry
+ * @buf: output buffer, buf == NULL means
+ * caller wants to know the size of the rule as seen
+ * by HW so they did not pass a valid buffer, we will use a
+ * scratch buffer instead.
+ * With this scheme we are going to
+ * generate the rule twice, once to know size using scratch
+ * buffer and second to write the rule to the actual caller
+ * supplied buffer which is of required size
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa3_generate_flt_hw_rule(enum ipa_ip_type ip,
+ struct ipa3_flt_entry *entry, u8 *buf)
+{
+ struct ipahal_flt_rule_gen_params gen_params;
+ int res = 0;
+
+ memset(&gen_params, 0, sizeof(gen_params));
+
+ gen_params.ipt = ip;
+ if (entry->rt_tbl)
+ gen_params.rt_tbl_idx = entry->rt_tbl->idx;
+ else
+ gen_params.rt_tbl_idx = entry->rule.rt_tbl_idx;
+
+ gen_params.priority = entry->prio;
+ gen_params.id = entry->rule_id;
+ gen_params.rule = (const struct ipa_flt_rule *)&entry->rule;
+
+ res = ipahal_flt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
+ if (res)
+ IPAERR("failed to generate flt h/w rule\n");
+
+ return 0;
+}
+
+static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip, enum ipa_rule_type rlt)
+{
+ struct ipa3_flt_tbl *tbl;
+ int i;
+
+ IPADBG_LOW("reaping sys flt tbls ip=%d rlt=%d\n", ip, rlt);
+
+ for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+ if (!ipa_is_ep_support_flt(i))
+ continue;
+
+ tbl = &ipa3_ctx->flt_tbl[i][ip];
+ if (tbl->prev_mem[rlt].phys_base) {
+ IPADBG_LOW("reaping flt tbl (prev) pipe=%d\n", i);
+ ipahal_free_dma_mem(&tbl->prev_mem[rlt]);
+ }
+
+ if (list_empty(&tbl->head_flt_rule_list)) {
+ if (tbl->curr_mem[rlt].phys_base) {
+ IPADBG_LOW("reaping flt tbl (curr) pipe=%d\n",
+ i);
+ ipahal_free_dma_mem(&tbl->curr_mem[rlt]);
+ }
+ }
+ }
+}
+
+/**
+ * ipa_prep_flt_tbl_for_cmt() - preparing the flt table for commit
+ * assign priorities to the rules, calculate their sizes and calculate
+ * the overall table size
+ * @ip: the ip address family type
+ * @tbl: the flt tbl to be prepared
+ * @pipe_idx: the ep pipe appropriate for the given tbl
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_prep_flt_tbl_for_cmt(enum ipa_ip_type ip,
+ struct ipa3_flt_tbl *tbl, int pipe_idx)
+{
+ struct ipa3_flt_entry *entry;
+ int prio_i;
+ int max_prio;
+ u32 hdr_width;
+
+ tbl->sz[IPA_RULE_HASHABLE] = 0;
+ tbl->sz[IPA_RULE_NON_HASHABLE] = 0;
+
+ max_prio = ipahal_get_rule_max_priority();
+
+ prio_i = max_prio;
+ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+
+ if (entry->rule.max_prio) {
+ entry->prio = max_prio;
+ } else {
+ if (ipahal_rule_decrease_priority(&prio_i)) {
+ IPAERR("cannot decrease rule priority - %d\n",
+ prio_i);
+ return -EPERM;
+ }
+ entry->prio = prio_i;
+ }
+
+ if (ipa3_generate_flt_hw_rule(ip, entry, NULL)) {
+ IPAERR("failed to calculate HW FLT rule size\n");
+ return -EPERM;
+ }
+ IPADBG("pipe %d rule_id (handle) %u hw_len %d priority %u\n",
+ pipe_idx, entry->rule_id, entry->hw_len, entry->prio);
+
+ if (entry->rule.hashable)
+ tbl->sz[IPA_RULE_HASHABLE] += entry->hw_len;
+ else
+ tbl->sz[IPA_RULE_NON_HASHABLE] += entry->hw_len;
+ }
+
+ if ((tbl->sz[IPA_RULE_HASHABLE] +
+ tbl->sz[IPA_RULE_NON_HASHABLE]) == 0) {
+ IPADBG_LOW("flt tbl pipe %d is with zero total size\n",
+ pipe_idx);
+ return 0;
+ }
+
+ hdr_width = ipahal_get_hw_tbl_hdr_width();
+
+ /* for the header word */
+ if (tbl->sz[IPA_RULE_HASHABLE])
+ tbl->sz[IPA_RULE_HASHABLE] += hdr_width;
+ if (tbl->sz[IPA_RULE_NON_HASHABLE])
+ tbl->sz[IPA_RULE_NON_HASHABLE] += hdr_width;
+
+ IPADBG_LOW("FLT tbl pipe idx %d hash sz %u non-hash sz %u\n", pipe_idx,
+ tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]);
+
+ return 0;
+}
+
+/**
+ * ipa_translate_flt_tbl_to_hw_fmt() - translate the flt driver structures
+ * (rules and tables) to HW format and fill it in the given buffers
+ * @ip: the ip address family type
+ * @rlt: the type of the rules to translate (hashable or non-hashable)
+ * @base: the rules body buffer to be filled
+ * @hdr: the rules header (addresses/offsets) buffer to be filled
+ * @body_ofst: the offset of the rules body from the rules header at
+ * ipa sram
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_translate_flt_tbl_to_hw_fmt(enum ipa_ip_type ip,
+ enum ipa_rule_type rlt, u8 *base, u8 *hdr, u32 body_ofst)
+{
+ u64 offset;
+ u8 *body_i;
+ int res;
+ struct ipa3_flt_entry *entry;
+ u8 *tbl_mem_buf;
+ struct ipa_mem_buffer tbl_mem;
+ struct ipa3_flt_tbl *tbl;
+ int i;
+ int hdr_idx = 0;
+
+ body_i = base;
+ for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+ if (!ipa_is_ep_support_flt(i))
+ continue;
+ tbl = &ipa3_ctx->flt_tbl[i][ip];
+ if (tbl->sz[rlt] == 0) {
+ hdr_idx++;
+ continue;
+ }
+ if (tbl->in_sys[rlt]) {
+ /* only body (no header) */
+ tbl_mem.size = tbl->sz[rlt] -
+ ipahal_get_hw_tbl_hdr_width();
+ if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) {
+ IPAERR("fail to alloc sys tbl of size %d\n",
+ tbl_mem.size);
+ goto err;
+ }
+
+ if (ipahal_fltrt_write_addr_to_hdr(tbl_mem.phys_base,
+ hdr, hdr_idx, true)) {
+ IPAERR("fail to wrt sys tbl addr to hdr\n");
+ goto hdr_update_fail;
+ }
+
+ tbl_mem_buf = tbl_mem.base;
+
+ /* generate the rule-set */
+ list_for_each_entry(entry, &tbl->head_flt_rule_list,
+ link) {
+ if (IPA_FLT_GET_RULE_TYPE(entry) != rlt)
+ continue;
+ res = ipa3_generate_flt_hw_rule(
+ ip, entry, tbl_mem_buf);
+ if (res) {
+ IPAERR("failed to gen HW FLT rule\n");
+ goto hdr_update_fail;
+ }
+ tbl_mem_buf += entry->hw_len;
+ }
+
+ if (tbl->curr_mem[rlt].phys_base) {
+ WARN_ON(tbl->prev_mem[rlt].phys_base);
+ tbl->prev_mem[rlt] = tbl->curr_mem[rlt];
+ }
+ tbl->curr_mem[rlt] = tbl_mem;
+ } else {
+ offset = body_i - base + body_ofst;
+
+ /* update the hdr at the right index */
+ if (ipahal_fltrt_write_addr_to_hdr(offset, hdr,
+ hdr_idx, true)) {
+ IPAERR("fail to wrt lcl tbl ofst to hdr\n");
+ goto hdr_update_fail;
+ }
+
+ /* generate the rule-set */
+ list_for_each_entry(entry, &tbl->head_flt_rule_list,
+ link) {
+ if (IPA_FLT_GET_RULE_TYPE(entry) != rlt)
+ continue;
+ res = ipa3_generate_flt_hw_rule(
+ ip, entry, body_i);
+ if (res) {
+ IPAERR("failed to gen HW FLT rule\n");
+ goto err;
+ }
+ body_i += entry->hw_len;
+ }
+
+ /**
+ * advance body_i to next table alignment as local
+ * tables are order back-to-back
+ */
+ body_i += ipahal_get_lcl_tbl_addr_alignment();
+ body_i = (u8 *)((long)body_i &
+ ~ipahal_get_lcl_tbl_addr_alignment());
+ }
+ hdr_idx++;
+ }
+
+ return 0;
+
+hdr_update_fail:
+ ipahal_free_dma_mem(&tbl_mem);
+err:
+ return -EPERM;
+}
+
+/**
+ * ipa_generate_flt_hw_tbl_img() - generates the flt hw tbls.
+ * headers and bodies are being created into buffers that will be filled into
+ * the local memory (sram)
+ * @ip: the ip address family type
+ * @alloc_params: In and Out parameters for the allocations of the buffers
+ * 4 buffers: hdr and bdy, each hashable and non-hashable
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_generate_flt_hw_tbl_img(enum ipa_ip_type ip,
+ struct ipahal_fltrt_alloc_imgs_params *alloc_params)
+{
+ u32 hash_bdy_start_ofst, nhash_bdy_start_ofst;
+ int rc = 0;
+
+ if (ip == IPA_IP_v4) {
+ nhash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_nhash_ofst) -
+ IPA_MEM_PART(v4_flt_nhash_ofst);
+ hash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_hash_ofst) -
+ IPA_MEM_PART(v4_flt_hash_ofst);
+ } else {
+ nhash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_nhash_ofst) -
+ IPA_MEM_PART(v6_flt_nhash_ofst);
+ hash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_hash_ofst) -
+ IPA_MEM_PART(v6_flt_hash_ofst);
+ }
+
+ if (ipahal_fltrt_allocate_hw_tbl_imgs(alloc_params)) {
+ IPAERR("fail to allocate FLT HW TBL images. IP %d\n", ip);
+ rc = -ENOMEM;
+ goto allocate_failed;
+ }
+
+ if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_HASHABLE,
+ alloc_params->hash_bdy.base, alloc_params->hash_hdr.base,
+ hash_bdy_start_ofst)) {
+ IPAERR("fail to translate hashable flt tbls to hw format\n");
+ rc = -EPERM;
+ goto translate_fail;
+ }
+ if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_NON_HASHABLE,
+ alloc_params->nhash_bdy.base, alloc_params->nhash_hdr.base,
+ nhash_bdy_start_ofst)) {
+ IPAERR("fail to translate non-hash flt tbls to hw format\n");
+ rc = -EPERM;
+ goto translate_fail;
+ }
+
+ return rc;
+
+translate_fail:
+ if (alloc_params->hash_hdr.size)
+ ipahal_free_dma_mem(&alloc_params->hash_hdr);
+ ipahal_free_dma_mem(&alloc_params->nhash_hdr);
+ if (alloc_params->hash_bdy.size)
+ ipahal_free_dma_mem(&alloc_params->hash_bdy);
+ if (alloc_params->nhash_bdy.size)
+ ipahal_free_dma_mem(&alloc_params->nhash_bdy);
+allocate_failed:
+ return rc;
+}
+
+/**
+ * ipa_flt_valid_lcl_tbl_size() - validate if the space allocated for flt
+ * tbl bodies at the sram is enough for the commit
+ * @ipt: the ip address family type
+ * @rlt: the rule type (hashable or non-hashable)
+ *
+ * Return: true if enough space available or false in other cases
+ */
+static bool ipa_flt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
+ enum ipa_rule_type rlt, struct ipa_mem_buffer *bdy)
+{
+ u16 avail;
+
+ if (!bdy) {
+ IPAERR("Bad parameters, bdy = NULL\n");
+ return false;
+ }
+
+ if (ipt == IPA_IP_v4)
+ avail = (rlt == IPA_RULE_HASHABLE) ?
+ IPA_MEM_PART(apps_v4_flt_hash_size) :
+ IPA_MEM_PART(apps_v4_flt_nhash_size);
+ else
+ avail = (rlt == IPA_RULE_HASHABLE) ?
+ IPA_MEM_PART(apps_v6_flt_hash_size) :
+ IPA_MEM_PART(apps_v6_flt_nhash_size);
+
+ if (bdy->size <= avail)
+ return true;
+
+ IPAERR("tbl too big, needed %d avail %d ipt %d rlt %d\n",
+ bdy->size, avail, ipt, rlt);
+ return false;
+}
+
+/**
+ * ipa_flt_alloc_cmd_buffers() - alloc descriptors and imm cmds
+ * payload pointers buffers for headers and bodies of flt structure
+ * as well as place for flush imm.
+ * @ipt: the ip address family type
+ * @desc: [OUT] descriptor buffer
+ * @cmd: [OUT] imm commands payload pointers buffer
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_flt_alloc_cmd_buffers(enum ipa_ip_type ip,
+ struct ipa3_desc **desc, struct ipahal_imm_cmd_pyld ***cmd_pyld)
+{
+ u16 entries;
+
+ /* +3: 2 for bodies (hashable and non-hashable) and 1 for flushing */
+ entries = (ipa3_ctx->ep_flt_num) * 2 + 3;
+
+ *desc = kcalloc(entries, sizeof(**desc), GFP_ATOMIC);
+ if (*desc == NULL) {
+ IPAERR("fail to alloc desc blob ip %d\n", ip);
+ goto fail_desc_alloc;
+ }
+
+ *cmd_pyld = kcalloc(entries, sizeof(**cmd_pyld), GFP_ATOMIC);
+ if (*cmd_pyld == NULL) {
+ IPAERR("fail to alloc cmd pyld blob ip %d\n", ip);
+ goto fail_cmd_alloc;
+ }
+
+ return 0;
+
+fail_cmd_alloc:
+ kfree(*desc);
+fail_desc_alloc:
+ return -ENOMEM;
+}
+
+/**
+ * ipa_flt_skip_pipe_config() - skip ep flt configuration or not?
+ * will skip according to pre-configuration or modem pipes
+ * @pipe: the EP pipe index
+ *
+ * Return: true if to skip, false otherwize
+ */
+static bool ipa_flt_skip_pipe_config(int pipe)
+{
+ if (ipa_is_modem_pipe(pipe)) {
+ IPADBG_LOW("skip %d - modem owned pipe\n", pipe);
+ return true;
+ }
+
+ if (ipa3_ctx->skip_ep_cfg_shadow[pipe]) {
+ IPADBG_LOW("skip %d\n", pipe);
+ return true;
+ }
+
+ if ((ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD) == pipe
+ && ipa3_ctx->modem_cfg_emb_pipe_flt)) {
+ IPADBG_LOW("skip %d\n", pipe);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * __ipa_commit_flt_v3() - commit flt tables to the hw
+ * commit the headers and the bodies if are local with internal cache flushing.
+ * The headers (and local bodies) will first be created into dma buffers and
+ * then written via IC to the SRAM
+ * @ipt: the ip address family type
+ *
+ * Return: 0 on success, negative on failure
+ */
+int __ipa_commit_flt_v3(enum ipa_ip_type ip)
+{
+ struct ipahal_fltrt_alloc_imgs_params alloc_params;
+ int rc = 0;
+ struct ipa3_desc *desc;
+ struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
+ struct ipahal_imm_cmd_dma_shared_mem mem_cmd = {0};
+ struct ipahal_imm_cmd_pyld **cmd_pyld;
+ int num_cmd = 0;
+ int i;
+ int hdr_idx;
+ u32 lcl_hash_hdr, lcl_nhash_hdr;
+ u32 lcl_hash_bdy, lcl_nhash_bdy;
+ bool lcl_hash, lcl_nhash;
+ struct ipahal_reg_fltrt_hash_flush flush;
+ struct ipahal_reg_valmask valmask;
+ u32 tbl_hdr_width;
+ struct ipa3_flt_tbl *tbl;
+
+ tbl_hdr_width = ipahal_get_hw_tbl_hdr_width();
+ memset(&alloc_params, 0, sizeof(alloc_params));
+ alloc_params.ipt = ip;
+ alloc_params.tbls_num = ipa3_ctx->ep_flt_num;
+
+ if (ip == IPA_IP_v4) {
+ lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v4_flt_hash_ofst) +
+ tbl_hdr_width; /* to skip the bitmap */
+ lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v4_flt_nhash_ofst) +
+ tbl_hdr_width; /* to skip the bitmap */
+ lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(apps_v4_flt_hash_ofst);
+ lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(apps_v4_flt_nhash_ofst);
+ lcl_hash = ipa3_ctx->ip4_flt_tbl_hash_lcl;
+ lcl_nhash = ipa3_ctx->ip4_flt_tbl_nhash_lcl;
+ } else {
+ lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v6_flt_hash_ofst) +
+ tbl_hdr_width; /* to skip the bitmap */
+ lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v6_flt_nhash_ofst) +
+ tbl_hdr_width; /* to skip the bitmap */
+ lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(apps_v6_flt_hash_ofst);
+ lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(apps_v6_flt_nhash_ofst);
+ lcl_hash = ipa3_ctx->ip6_flt_tbl_hash_lcl;
+ lcl_nhash = ipa3_ctx->ip6_flt_tbl_nhash_lcl;
+ }
+
+ for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+ if (!ipa_is_ep_support_flt(i))
+ continue;
+ tbl = &ipa3_ctx->flt_tbl[i][ip];
+ if (ipa_prep_flt_tbl_for_cmt(ip, tbl, i)) {
+ rc = -EPERM;
+ goto prep_failed;
+ }
+ if (!tbl->in_sys[IPA_RULE_HASHABLE] &&
+ tbl->sz[IPA_RULE_HASHABLE]) {
+ alloc_params.num_lcl_hash_tbls++;
+ alloc_params.total_sz_lcl_hash_tbls +=
+ tbl->sz[IPA_RULE_HASHABLE];
+ alloc_params.total_sz_lcl_hash_tbls -= tbl_hdr_width;
+
+ }
+ if (!tbl->in_sys[IPA_RULE_NON_HASHABLE] &&
+ tbl->sz[IPA_RULE_NON_HASHABLE]) {
+ alloc_params.num_lcl_nhash_tbls++;
+ alloc_params.total_sz_lcl_nhash_tbls +=
+ tbl->sz[IPA_RULE_NON_HASHABLE];
+ alloc_params.total_sz_lcl_nhash_tbls -= tbl_hdr_width;
+ }
+ }
+
+ if (ipa_generate_flt_hw_tbl_img(ip, &alloc_params)) {
+ IPAERR("fail to generate FLT HW TBL image. IP %d\n", ip);
+ rc = -EFAULT;
+ goto prep_failed;
+ }
+
+ if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE,
+ &alloc_params.hash_bdy)) {
+ rc = -EFAULT;
+ goto fail_size_valid;
+ }
+ if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE,
+ &alloc_params.nhash_bdy)) {
+ rc = -EFAULT;
+ goto fail_size_valid;
+ }
+
+ if (ipa_flt_alloc_cmd_buffers(ip, &desc, &cmd_pyld)) {
+ rc = -ENOMEM;
+ goto fail_size_valid;
+ }
+
+ /* flushing ipa internal hashable flt rules cache */
+ memset(&flush, 0, sizeof(flush));
+ if (ip == IPA_IP_v4)
+ flush.v4_flt = true;
+ else
+ flush.v6_flt = true;
+ ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
+ reg_write_cmd.skip_pipeline_clear = false;
+ reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
+ reg_write_cmd.value = valmask.val;
+ reg_write_cmd.value_mask = valmask.mask;
+ cmd_pyld[0] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_REGISTER_WRITE, ®_write_cmd, false);
+ if (!cmd_pyld[0]) {
+ IPAERR("fail construct register_write imm cmd: IP %d\n", ip);
+ rc = -EFAULT;
+ goto fail_reg_write_construct;
+ }
+ desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+ desc[0].pyld = cmd_pyld[0]->data;
+ desc[0].len = cmd_pyld[0]->len;
+ desc[0].type = IPA_IMM_CMD_DESC;
+ num_cmd++;
+
+ hdr_idx = 0;
+ for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+ if (!ipa_is_ep_support_flt(i)) {
+ IPADBG_LOW("skip %d - not filtering pipe\n", i);
+ continue;
+ }
+
+ if (ipa_flt_skip_pipe_config(i)) {
+ hdr_idx++;
+ continue;
+ }
+
+ IPADBG_LOW("Prepare imm cmd for hdr at index %d for pipe %d\n",
+ hdr_idx, i);
+
+ mem_cmd.is_read = false;
+ mem_cmd.skip_pipeline_clear = false;
+ mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ mem_cmd.size = tbl_hdr_width;
+ mem_cmd.system_addr = alloc_params.nhash_hdr.phys_base +
+ hdr_idx * tbl_hdr_width;
+ mem_cmd.local_addr = lcl_nhash_hdr +
+ hdr_idx * tbl_hdr_width;
+ cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
+ ip);
+ goto fail_imm_cmd_construct;
+ }
+ desc[num_cmd].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+ desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+ desc[num_cmd++].type = IPA_IMM_CMD_DESC;
+
+ mem_cmd.is_read = false;
+ mem_cmd.skip_pipeline_clear = false;
+ mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ mem_cmd.size = tbl_hdr_width;
+ mem_cmd.system_addr = alloc_params.hash_hdr.phys_base +
+ hdr_idx * tbl_hdr_width;
+ mem_cmd.local_addr = lcl_hash_hdr +
+ hdr_idx * tbl_hdr_width;
+ cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
+ ip);
+ goto fail_imm_cmd_construct;
+ }
+ desc[num_cmd].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+ desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+ desc[num_cmd++].type = IPA_IMM_CMD_DESC;
+
+ hdr_idx++;
+ }
+
+ if (lcl_nhash) {
+ mem_cmd.is_read = false;
+ mem_cmd.skip_pipeline_clear = false;
+ mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ mem_cmd.size = alloc_params.nhash_bdy.size;
+ mem_cmd.system_addr = alloc_params.nhash_bdy.phys_base;
+ mem_cmd.local_addr = lcl_nhash_bdy;
+ cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
+ ip);
+ goto fail_imm_cmd_construct;
+ }
+ desc[num_cmd].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+ desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+ desc[num_cmd++].type = IPA_IMM_CMD_DESC;
+ }
+ if (lcl_hash) {
+ mem_cmd.is_read = false;
+ mem_cmd.skip_pipeline_clear = false;
+ mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ mem_cmd.size = alloc_params.hash_bdy.size;
+ mem_cmd.system_addr = alloc_params.hash_bdy.phys_base;
+ mem_cmd.local_addr = lcl_hash_bdy;
+ cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
+ ip);
+ goto fail_imm_cmd_construct;
+ }
+ desc[num_cmd].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+ desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+ desc[num_cmd++].type = IPA_IMM_CMD_DESC;
+ }
+
+ if (ipa3_send_cmd(num_cmd, desc)) {
+ IPAERR("fail to send immediate command\n");
+ rc = -EFAULT;
+ goto fail_imm_cmd_construct;
+ }
+
+ IPADBG_LOW("Hashable HEAD\n");
+ IPA_DUMP_BUFF(alloc_params.hash_hdr.base,
+ alloc_params.hash_hdr.phys_base, alloc_params.hash_hdr.size);
+
+ IPADBG_LOW("Non-Hashable HEAD\n");
+ IPA_DUMP_BUFF(alloc_params.nhash_hdr.base,
+ alloc_params.nhash_hdr.phys_base, alloc_params.nhash_hdr.size);
+
+ if (alloc_params.hash_bdy.size) {
+ IPADBG_LOW("Hashable BODY\n");
+ IPA_DUMP_BUFF(alloc_params.hash_bdy.base,
+ alloc_params.hash_bdy.phys_base,
+ alloc_params.hash_bdy.size);
+ }
+
+ if (alloc_params.nhash_bdy.size) {
+ IPADBG_LOW("Non-Hashable BODY\n");
+ IPA_DUMP_BUFF(alloc_params.nhash_bdy.base,
+ alloc_params.nhash_bdy.phys_base,
+ alloc_params.nhash_bdy.size);
+ }
+
+ __ipa_reap_sys_flt_tbls(ip, IPA_RULE_HASHABLE);
+ __ipa_reap_sys_flt_tbls(ip, IPA_RULE_NON_HASHABLE);
+
+fail_imm_cmd_construct:
+ for (i = 0 ; i < num_cmd ; i++)
+ ipahal_destroy_imm_cmd(cmd_pyld[i]);
+fail_reg_write_construct:
+ kfree(desc);
+ kfree(cmd_pyld);
+fail_size_valid:
+ if (alloc_params.hash_hdr.size)
+ ipahal_free_dma_mem(&alloc_params.hash_hdr);
+ ipahal_free_dma_mem(&alloc_params.nhash_hdr);
+ if (alloc_params.hash_bdy.size)
+ ipahal_free_dma_mem(&alloc_params.hash_bdy);
+ if (alloc_params.nhash_bdy.size)
+ ipahal_free_dma_mem(&alloc_params.nhash_bdy);
+prep_failed:
+ return rc;
+}
+
+static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule,
+ struct ipa3_rt_tbl **rt_tbl, enum ipa_ip_type ip)
+{
+ if (rule->action != IPA_PASS_TO_EXCEPTION) {
+ if (!rule->eq_attrib_type) {
+ if (!rule->rt_tbl_hdl) {
+ IPAERR("invalid RT tbl\n");
+ goto error;
+ }
+
+ *rt_tbl = ipa3_id_find(rule->rt_tbl_hdl);
+ if (*rt_tbl == NULL) {
+ IPAERR("RT tbl not found\n");
+ goto error;
+ }
+
+ if ((*rt_tbl)->cookie != IPA_COOKIE) {
+ IPAERR("RT table cookie is invalid\n");
+ goto error;
+ }
+ } else {
+ if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ?
+ IPA_MEM_PART(v4_modem_rt_index_hi) :
+ IPA_MEM_PART(v6_modem_rt_index_hi))) {
+ IPAERR("invalid RT tbl\n");
+ goto error;
+ }
+ }
+ }
+
+ if (rule->rule_id) {
+ if (!(rule->rule_id & ipahal_get_rule_id_hi_bit())) {
+ IPAERR("invalid rule_id provided 0x%x\n"
+ "rule_id with bit 0x%x are auto generated\n",
+ rule->rule_id, ipahal_get_rule_id_hi_bit());
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ return -EPERM;
+}
+
+static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry,
+ const struct ipa_flt_rule *rule, struct ipa3_rt_tbl *rt_tbl,
+ struct ipa3_flt_tbl *tbl)
+{
+ int id;
+
+ *entry = kmem_cache_zalloc(ipa3_ctx->flt_rule_cache, GFP_KERNEL);
+ if (!*entry) {
+ IPAERR("failed to alloc FLT rule object\n");
+ goto error;
+ }
+ INIT_LIST_HEAD(&((*entry)->link));
+ (*entry)->rule = *rule;
+ (*entry)->cookie = IPA_COOKIE;
+ (*entry)->rt_tbl = rt_tbl;
+ (*entry)->tbl = tbl;
+ if (rule->rule_id) {
+ id = rule->rule_id;
+ } else {
+ id = ipa3_alloc_rule_id(&tbl->rule_ids);
+ if (id < 0) {
+ IPAERR("failed to allocate rule id\n");
+ WARN_ON(1);
+ goto rule_id_fail;
+ }
+ }
+ (*entry)->rule_id = id;
+
+ return 0;
+
+rule_id_fail:
+ kmem_cache_free(ipa3_ctx->flt_rule_cache, *entry);
+error:
+ return -EPERM;
+}
+
+static int __ipa_finish_flt_rule_add(struct ipa3_flt_tbl *tbl,
+ struct ipa3_flt_entry *entry, u32 *rule_hdl)
+{
+ int id;
+
+ tbl->rule_cnt++;
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt++;
+ id = ipa3_id_alloc(entry);
+ if (id < 0) {
+ IPAERR("failed to add to tree\n");
+ WARN_ON(1);
+ }
+ *rule_hdl = id;
+ entry->id = id;
+ IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
+
+ return 0;
+}
+
+static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip,
+ const struct ipa_flt_rule *rule, u8 add_rear,
+ u32 *rule_hdl)
+{
+ struct ipa3_flt_entry *entry;
+ struct ipa3_rt_tbl *rt_tbl = NULL;
+
+ if (__ipa_validate_flt_rule(rule, &rt_tbl, ip))
+ goto error;
+
+ if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl))
+ goto error;
+
+ if (add_rear) {
+ if (tbl->sticky_rear)
+ list_add_tail(&entry->link,
+ tbl->head_flt_rule_list.prev);
+ else
+ list_add_tail(&entry->link, &tbl->head_flt_rule_list);
+ } else {
+ list_add(&entry->link, &tbl->head_flt_rule_list);
+ }
+
+ __ipa_finish_flt_rule_add(tbl, entry, rule_hdl);
+
+ return 0;
+
+error:
+ return -EPERM;
+}
+
+static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl,
+ const struct ipa_flt_rule *rule,
+ u32 *rule_hdl,
+ enum ipa_ip_type ip,
+ struct ipa3_flt_entry **add_after_entry)
+{
+ struct ipa3_flt_entry *entry;
+ struct ipa3_rt_tbl *rt_tbl = NULL;
+
+ if (!*add_after_entry)
+ goto error;
+
+ if (rule == NULL || rule_hdl == NULL) {
+ IPAERR("bad parms rule=%p rule_hdl=%p\n", rule,
+ rule_hdl);
+ goto error;
+ }
+
+ if (__ipa_validate_flt_rule(rule, &rt_tbl, ip))
+ goto error;
+
+ if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl))
+ goto error;
+
+ list_add(&entry->link, &((*add_after_entry)->link));
+
+ __ipa_finish_flt_rule_add(tbl, entry, rule_hdl);
+
+ /*
+ * prepare for next insertion
+ */
+ *add_after_entry = entry;
+
+ return 0;
+
+error:
+ *add_after_entry = NULL;
+ return -EPERM;
+}
+
+static int __ipa_del_flt_rule(u32 rule_hdl)
+{
+ struct ipa3_flt_entry *entry;
+ int id;
+
+ entry = ipa3_id_find(rule_hdl);
+ if (entry == NULL) {
+ IPAERR("lookup failed\n");
+ return -EINVAL;
+ }
+
+ if (entry->cookie != IPA_COOKIE) {
+ IPAERR("bad params\n");
+ return -EINVAL;
+ }
+ id = entry->id;
+
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt--;
+ IPADBG("del flt rule rule_cnt=%d rule_id=%d\n",
+ entry->tbl->rule_cnt, entry->rule_id);
+ entry->cookie = 0;
+ /* if rule id was allocated from idr, remove it */
+ if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
+ idr_remove(&entry->tbl->rule_ids, entry->rule_id);
+
+ kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
+
+ /* remove the handle from the database */
+ ipa3_id_remove(id);
+
+ return 0;
+}
+
+static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule,
+ enum ipa_ip_type ip)
+{
+ struct ipa3_flt_entry *entry;
+ struct ipa3_rt_tbl *rt_tbl = NULL;
+
+ entry = ipa3_id_find(frule->rule_hdl);
+ if (entry == NULL) {
+ IPAERR("lookup failed\n");
+ goto error;
+ }
+
+ if (entry->cookie != IPA_COOKIE) {
+ IPAERR("bad params\n");
+ goto error;
+ }
+
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt--;
+
+ if (frule->rule.action != IPA_PASS_TO_EXCEPTION) {
+ if (!frule->rule.eq_attrib_type) {
+ if (!frule->rule.rt_tbl_hdl) {
+ IPAERR("invalid RT tbl\n");
+ goto error;
+ }
+
+ rt_tbl = ipa3_id_find(frule->rule.rt_tbl_hdl);
+ if (rt_tbl == NULL) {
+ IPAERR("RT tbl not found\n");
+ goto error;
+ }
+
+ if (rt_tbl->cookie != IPA_COOKIE) {
+ IPAERR("RT table cookie is invalid\n");
+ goto error;
+ }
+ } else {
+ if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ?
+ IPA_MEM_PART(v4_modem_rt_index_hi) :
+ IPA_MEM_PART(v6_modem_rt_index_hi))) {
+ IPAERR("invalid RT tbl\n");
+ goto error;
+ }
+ }
+ }
+
+ entry->rule = frule->rule;
+ entry->rt_tbl = rt_tbl;
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt++;
+ entry->hw_len = 0;
+ entry->prio = 0;
+
+ return 0;
+
+error:
+ return -EPERM;
+}
+
+static int __ipa_add_flt_get_ep_idx(enum ipa_client_type ep, int *ipa_ep_idx)
+{
+ *ipa_ep_idx = ipa3_get_ep_mapping(ep);
+ if (*ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND) {
+ IPAERR("ep not valid ep=%d\n", ep);
+ return -EINVAL;
+ }
+ if (ipa3_ctx->ep[*ipa_ep_idx].valid == 0)
+ IPADBG("ep not connected ep_idx=%d\n", *ipa_ep_idx);
+
+ if (!ipa_is_ep_support_flt(*ipa_ep_idx)) {
+ IPAERR("ep do not support filtering ep=%d\n", ep);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
+ const struct ipa_flt_rule *rule, u8 add_rear,
+ u32 *rule_hdl)
+{
+ struct ipa3_flt_tbl *tbl;
+ int ipa_ep_idx;
+
+ if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
+ IPAERR("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
+ rule_hdl, ep);
+
+ return -EINVAL;
+ }
+
+ if (__ipa_add_flt_get_ep_idx(ep, &ipa_ep_idx))
+ return -EINVAL;
+
+ tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][ip];
+ IPADBG_LOW("add ep flt rule ip=%d ep=%d\n", ip, ep);
+
+ return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
+}
+
+/**
+ * ipa3_add_flt_rule() - Add the specified filtering rules to SW and optionally
+ * commit to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
+{
+ int i;
+ int result;
+
+ if (rules == NULL || rules->num_rules == 0 ||
+ rules->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+ for (i = 0; i < rules->num_rules; i++) {
+ if (!rules->global)
+ result = __ipa_add_ep_flt_rule(rules->ip, rules->ep,
+ &rules->rules[i].rule,
+ rules->rules[i].at_rear,
+ &rules->rules[i].flt_rule_hdl);
+ else
+ result = -1;
+
+ if (result) {
+ IPAERR("failed to add flt rule %d\n", i);
+ rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
+ } else {
+ rules->rules[i].status = 0;
+ }
+ }
+
+ if (rules->global) {
+ IPAERR("no support for global filter rules\n");
+ result = -EPERM;
+ goto bail;
+ }
+
+ if (rules->commit)
+ if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return result;
+}
+
+/**
+ * ipa3_add_flt_rule_after() - Add the specified filtering rules to SW after
+ * the rule which its handle is given and optionally commit to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
+{
+ int i;
+ int result;
+ struct ipa3_flt_tbl *tbl;
+ int ipa_ep_idx;
+ struct ipa3_flt_entry *entry;
+
+ if (rules == NULL || rules->num_rules == 0 ||
+ rules->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ if (rules->ep >= IPA_CLIENT_MAX) {
+ IPAERR("bad parms ep=%d\n", rules->ep);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+
+ if (__ipa_add_flt_get_ep_idx(rules->ep, &ipa_ep_idx)) {
+ result = -EINVAL;
+ goto bail;
+ }
+
+ tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][rules->ip];
+
+ entry = ipa3_id_find(rules->add_after_hdl);
+ if (entry == NULL) {
+ IPAERR("lookup failed\n");
+ result = -EINVAL;
+ goto bail;
+ }
+
+ if (entry->tbl != tbl) {
+ IPAERR("given entry does not match the table\n");
+ result = -EINVAL;
+ goto bail;
+ }
+
+ if (tbl->sticky_rear)
+ if (&entry->link == tbl->head_flt_rule_list.prev) {
+ IPAERR("cannot add rule at end of a sticky table");
+ result = -EINVAL;
+ goto bail;
+ }
+
+ IPADBG("add ep flt rule ip=%d ep=%d after hdl %d\n",
+ rules->ip, rules->ep, rules->add_after_hdl);
+
+ /*
+ * we add all rules one after the other, if one insertion fails, it cuts
+ * the chain (all following will receive fail status) following calls to
+ * __ipa_add_flt_rule_after will fail (entry == NULL)
+ */
+
+ for (i = 0; i < rules->num_rules; i++) {
+ result = __ipa_add_flt_rule_after(tbl,
+ &rules->rules[i].rule,
+ &rules->rules[i].flt_rule_hdl,
+ rules->ip,
+ &entry);
+
+ if (result) {
+ IPAERR("failed to add flt rule %d\n", i);
+ rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
+ } else {
+ rules->rules[i].status = 0;
+ }
+ }
+
+ if (rules->commit)
+ if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
+ IPAERR("failed to commit flt rules\n");
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return result;
+}
+
+/**
+ * ipa3_del_flt_rule() - Remove the specified filtering rules from SW and
+ * optionally commit to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
+{
+ int i;
+ int result;
+
+ if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+ for (i = 0; i < hdls->num_hdls; i++) {
+ if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
+ IPAERR("failed to del flt rule %i\n", i);
+ hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
+ } else {
+ hdls->hdl[i].status = 0;
+ }
+ }
+
+ if (hdls->commit)
+ if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return result;
+}
+
+/**
+ * ipa3_mdfy_flt_rule() - Modify the specified filtering rules in SW and
+ * optionally commit to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
+{
+ int i;
+ int result;
+
+ if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+ for (i = 0; i < hdls->num_rules; i++) {
+ if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) {
+ IPAERR("failed to mdfy flt rule %i\n", i);
+ hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
+ } else {
+ hdls->rules[i].status = 0;
+ }
+ }
+
+ if (hdls->commit)
+ if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return result;
+}
+
+
+/**
+ * ipa3_commit_flt() - Commit the current SW filtering table of specified type
+ * to IPA HW
+ * @ip: [in] the family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_commit_flt(enum ipa_ip_type ip)
+{
+ int result;
+
+ if (ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+
+ if (ipa3_ctx->ctrl->ipa3_commit_flt(ip)) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return result;
+}
+
+/**
+ * ipa3_reset_flt() - Reset the current SW filtering table of specified type
+ * (does not commit to HW)
+ * @ip: [in] the family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_reset_flt(enum ipa_ip_type ip)
+{
+ struct ipa3_flt_tbl *tbl;
+ struct ipa3_flt_entry *entry;
+ struct ipa3_flt_entry *next;
+ int i;
+ int id;
+
+ if (ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+ for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+ if (!ipa_is_ep_support_flt(i))
+ continue;
+
+ tbl = &ipa3_ctx->flt_tbl[i][ip];
+ list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list,
+ link) {
+ if (ipa3_id_find(entry->id) == NULL) {
+ WARN_ON(1);
+ mutex_unlock(&ipa3_ctx->lock);
+ return -EFAULT;
+ }
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt--;
+ /* if rule id was allocated from idr, remove it */
+ if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
+ idr_remove(&entry->tbl->rule_ids,
+ entry->rule_id);
+ entry->cookie = 0;
+ id = entry->id;
+ kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
+
+ /* remove the handle from the database */
+ ipa3_id_remove(id);
+ }
+ }
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return 0;
+}
+
+void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx)
+{
+ struct ipa3_flt_tbl *tbl;
+ struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx];
+ struct ipa_flt_rule rule;
+
+ if (!ipa_is_ep_support_flt(ipa_ep_idx)) {
+ IPADBG("cannot add flt rules to non filtering pipe num %d\n",
+ ipa_ep_idx);
+ return;
+ }
+
+ memset(&rule, 0, sizeof(rule));
+
+ mutex_lock(&ipa3_ctx->lock);
+ tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
+ tbl->sticky_rear = true;
+ rule.action = IPA_PASS_TO_EXCEPTION;
+ __ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, false,
+ &ep->dflt_flt4_rule_hdl);
+ ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
+
+ tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
+ tbl->sticky_rear = true;
+ rule.action = IPA_PASS_TO_EXCEPTION;
+ __ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, false,
+ &ep->dflt_flt6_rule_hdl);
+ ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
+ mutex_unlock(&ipa3_ctx->lock);
+}
+
+void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx)
+{
+ struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+ mutex_lock(&ipa3_ctx->lock);
+ if (ep->dflt_flt4_rule_hdl) {
+ __ipa_del_flt_rule(ep->dflt_flt4_rule_hdl);
+ ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
+ ep->dflt_flt4_rule_hdl = 0;
+ }
+ if (ep->dflt_flt6_rule_hdl) {
+ __ipa_del_flt_rule(ep->dflt_flt6_rule_hdl);
+ ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
+ ep->dflt_flt6_rule_hdl = 0;
+ }
+ mutex_unlock(&ipa3_ctx->lock);
+}
+
+/**
+ * ipa3_set_flt_tuple_mask() - Sets the flt tuple masking for the given pipe
+ * Pipe must be for AP EP (not modem) and support filtering
+ * updates the the filtering masking values without changing the rt ones.
+ *
+ * @pipe_idx: filter pipe index to configure the tuple masking
+ * @tuple: the tuple members masking
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple)
+{
+ struct ipahal_reg_fltrt_hash_tuple fltrt_tuple;
+
+ if (!tuple) {
+ IPAERR("bad tuple\n");
+ return -EINVAL;
+ }
+
+ if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
+ IPAERR("bad pipe index!\n");
+ return -EINVAL;
+ }
+
+ if (!ipa_is_ep_support_flt(pipe_idx)) {
+ IPAERR("pipe %d not filtering pipe\n", pipe_idx);
+ return -EINVAL;
+ }
+
+ if (ipa_is_modem_pipe(pipe_idx)) {
+ IPAERR("modem pipe tuple is not configured by AP\n");
+ return -EINVAL;
+ }
+
+ ipahal_read_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+ pipe_idx, &fltrt_tuple);
+ fltrt_tuple.flt = *tuple;
+ ipahal_write_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+ pipe_idx, &fltrt_tuple);
+
+ return 0;
+}
+
+/**
+ * ipa3_flt_read_tbl_from_hw() -Read filtering table from IPA HW
+ * @pipe_idx: IPA endpoint index
+ * @ip_type: IPv4 or IPv6 table
+ * @hashable: hashable or non-hashable table
+ * @entry: array to fill the table entries
+ * @num_entry: number of entries in entry array. set by the caller to indicate
+ * entry array size. Then set by this function as an output parameter to
+ * indicate the number of entries in the array
+ *
+ * This function reads the filtering table from IPA SRAM and prepares an array
+ * of entries. This function is mainly used for debugging purposes.
+ *
+ * If empty table or Modem Apps table, zero entries will be returned.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_flt_read_tbl_from_hw(u32 pipe_idx, enum ipa_ip_type ip_type,
+ bool hashable, struct ipahal_flt_rule_entry entry[], int *num_entry)
+{
+ void *ipa_sram_mmio;
+ u64 hdr_base_ofst;
+ int tbl_entry_idx;
+ int i;
+ int res = 0;
+ u64 tbl_addr;
+ bool is_sys;
+ u8 *rule_addr;
+ struct ipa_mem_buffer *sys_tbl_mem;
+ int rule_idx;
+
+ IPADBG("pipe_idx=%d ip=%d hashable=%d entry=0x%p num_entry=0x%p\n",
+ pipe_idx, ip_type, hashable, entry, num_entry);
+
+ if (pipe_idx >= ipa3_ctx->ipa_num_pipes || ip_type >= IPA_IP_MAX ||
+ !entry || !num_entry) {
+ IPAERR("Invalid params\n");
+ return -EFAULT;
+ }
+
+ if (!ipa_is_ep_support_flt(pipe_idx)) {
+ IPAERR("pipe %d does not support filtering\n", pipe_idx);
+ return -EINVAL;
+ }
+
+ /* map IPA SRAM */
+ ipa_sram_mmio = ioremap(ipa3_ctx->ipa_wrapper_base +
+ ipa3_ctx->ctrl->ipa_reg_base_ofst +
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
+ ipa3_ctx->smem_restricted_bytes / 4),
+ ipa3_ctx->smem_sz);
+ if (!ipa_sram_mmio) {
+ IPAERR("fail to ioremap IPA SRAM\n");
+ return -ENOMEM;
+ }
+
+ memset(entry, 0, sizeof(*entry) * (*num_entry));
+ if (hashable) {
+ if (ip_type == IPA_IP_v4)
+ hdr_base_ofst =
+ IPA_MEM_PART(v4_flt_hash_ofst);
+ else
+ hdr_base_ofst =
+ IPA_MEM_PART(v6_flt_hash_ofst);
+ } else {
+ if (ip_type == IPA_IP_v4)
+ hdr_base_ofst =
+ IPA_MEM_PART(v4_flt_nhash_ofst);
+ else
+ hdr_base_ofst =
+ IPA_MEM_PART(v6_flt_nhash_ofst);
+ }
+
+ /* calculate the index of the tbl entry */
+ tbl_entry_idx = 1; /* skip the bitmap */
+ for (i = 0; i < pipe_idx; i++)
+ if (ipa3_ctx->ep_flt_bitmap & (1 << i))
+ tbl_entry_idx++;
+
+ IPADBG("hdr_base_ofst=0x%llx tbl_entry_idx=%d\n",
+ hdr_base_ofst, tbl_entry_idx);
+
+ res = ipahal_fltrt_read_addr_from_hdr(ipa_sram_mmio + hdr_base_ofst,
+ tbl_entry_idx, &tbl_addr, &is_sys);
+ if (res) {
+ IPAERR("failed to read table address from header structure\n");
+ goto bail;
+ }
+ IPADBG("flt tbl ep=%d: tbl_addr=0x%llx is_sys=%d\n",
+ pipe_idx, tbl_addr, is_sys);
+ if (!tbl_addr) {
+ IPAERR("invalid flt tbl addr\n");
+ res = -EFAULT;
+ goto bail;
+ }
+
+ /* for tables resides in DDR access it from the virtual memory */
+ if (is_sys) {
+ sys_tbl_mem = &ipa3_ctx->flt_tbl[pipe_idx][ip_type].
+ curr_mem[hashable ? IPA_RULE_HASHABLE :
+ IPA_RULE_NON_HASHABLE];
+ if (sys_tbl_mem->phys_base &&
+ sys_tbl_mem->phys_base != tbl_addr) {
+ IPAERR("mismatch addr: parsed=%llx sw=%pad\n",
+ tbl_addr, &sys_tbl_mem->phys_base);
+ }
+ if (sys_tbl_mem->phys_base)
+ rule_addr = sys_tbl_mem->base;
+ else
+ rule_addr = NULL;
+ } else {
+ rule_addr = ipa_sram_mmio + hdr_base_ofst + tbl_addr;
+ }
+
+ IPADBG("First rule addr 0x%p\n", rule_addr);
+
+ if (!rule_addr) {
+ /* Modem table in system memory or empty table */
+ *num_entry = 0;
+ goto bail;
+ }
+
+ rule_idx = 0;
+ while (rule_idx < *num_entry) {
+ res = ipahal_flt_parse_hw_rule(rule_addr, &entry[rule_idx]);
+ if (res) {
+ IPAERR("failed parsing flt rule\n");
+ goto bail;
+ }
+
+ IPADBG("rule_size=%d\n", entry[rule_idx].rule_size);
+ if (!entry[rule_idx].rule_size)
+ break;
+
+ rule_addr += entry[rule_idx].rule_size;
+ rule_idx++;
+ }
+ *num_entry = rule_idx;
+bail:
+ iounmap(ipa_sram_mmio);
+ return 0;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
new file mode 100644
index 0000000..da52b26
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -0,0 +1,1173 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+
+static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 60};
+static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64};
+
+#define HDR_TYPE_IS_VALID(type) \
+ ((type) >= 0 && (type) < IPA_HDR_L2_MAX)
+
+#define HDR_PROC_TYPE_IS_VALID(type) \
+ ((type) >= 0 && (type) < IPA_HDR_PROC_MAX)
+
+/**
+ * ipa3_generate_hdr_hw_tbl() - generates the headers table
+ * @mem: [out] buffer to put the header table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+static int ipa3_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
+{
+ struct ipa3_hdr_entry *entry;
+
+ mem->size = ipa3_ctx->hdr_tbl.end;
+
+ if (mem->size == 0) {
+ IPAERR("hdr tbl empty\n");
+ return -EPERM;
+ }
+ IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_tbl.end);
+
+ mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
+ &mem->phys_base, GFP_KERNEL);
+ if (!mem->base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+ return -ENOMEM;
+ }
+
+ memset(mem->base, 0, mem->size);
+ list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
+ link) {
+ if (entry->is_hdr_proc_ctx)
+ continue;
+ IPADBG_LOW("hdr of len %d ofst=%d\n", entry->hdr_len,
+ entry->offset_entry->offset);
+ ipahal_cp_hdr_to_hw_buff(mem->base, entry->offset_entry->offset,
+ entry->hdr, entry->hdr_len);
+ }
+
+ return 0;
+}
+
+static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
+ u32 hdr_base_addr)
+{
+ struct ipa3_hdr_proc_ctx_entry *entry;
+ int ret;
+
+ list_for_each_entry(entry,
+ &ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
+ link) {
+ IPADBG_LOW("processing type %d ofst=%d\n",
+ entry->type, entry->offset_entry->offset);
+ ret = ipahal_cp_proc_ctx_to_hw_buff(entry->type, mem->base,
+ entry->offset_entry->offset,
+ entry->hdr->hdr_len,
+ entry->hdr->is_hdr_proc_ctx,
+ entry->hdr->phys_base,
+ hdr_base_addr,
+ entry->hdr->offset_entry);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ipa3_generate_hdr_proc_ctx_hw_tbl() -
+ * generates the headers processing context table.
+ * @mem: [out] buffer to put the processing context table
+ * @aligned_mem: [out] actual processing context table (with alignment).
+ * Processing context table needs to be 8 Bytes aligned.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+static int ipa3_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr,
+ struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
+{
+ u32 hdr_base_addr;
+
+ mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4;
+
+ /* make sure table is aligned */
+ mem->size += IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
+
+ IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end);
+
+ mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
+ &mem->phys_base, GFP_KERNEL);
+ if (!mem->base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+ return -ENOMEM;
+ }
+
+ aligned_mem->phys_base =
+ IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(mem->phys_base);
+ aligned_mem->base = mem->base +
+ (aligned_mem->phys_base - mem->phys_base);
+ aligned_mem->size = mem->size - IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
+ memset(aligned_mem->base, 0, aligned_mem->size);
+ hdr_base_addr = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_ofst) :
+ hdr_sys_addr;
+ return ipa3_hdr_proc_ctx_to_hw_format(aligned_mem, hdr_base_addr);
+}
+
+/**
+ * __ipa_commit_hdr_v3_0() - Commits the header table from memory to HW
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int __ipa_commit_hdr_v3_0(void)
+{
+ struct ipa3_desc desc[2];
+ struct ipa_mem_buffer hdr_mem;
+ struct ipa_mem_buffer ctx_mem;
+ struct ipa_mem_buffer aligned_ctx_mem;
+ struct ipahal_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
+ struct ipahal_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
+ struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
+ struct ipahal_imm_cmd_hdr_init_system hdr_init_cmd = {0};
+ struct ipahal_imm_cmd_pyld *hdr_cmd_pyld = NULL;
+ struct ipahal_imm_cmd_pyld *ctx_cmd_pyld = NULL;
+ int rc = -EFAULT;
+ u32 proc_ctx_size;
+ u32 proc_ctx_ofst;
+ u32 proc_ctx_size_ddr;
+
+ memset(desc, 0, 2 * sizeof(struct ipa3_desc));
+
+ if (ipa3_generate_hdr_hw_tbl(&hdr_mem)) {
+ IPAERR("fail to generate HDR HW TBL\n");
+ goto end;
+ }
+
+ if (ipa3_generate_hdr_proc_ctx_hw_tbl(hdr_mem.phys_base, &ctx_mem,
+ &aligned_ctx_mem)) {
+ IPAERR("fail to generate HDR PROC CTX HW TBL\n");
+ goto end;
+ }
+
+ if (ipa3_ctx->hdr_tbl_lcl) {
+ if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) {
+ IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
+ IPA_MEM_PART(apps_hdr_size));
+ goto end;
+ } else {
+ dma_cmd_hdr.is_read = false; /* write operation */
+ dma_cmd_hdr.skip_pipeline_clear = false;
+ dma_cmd_hdr.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ dma_cmd_hdr.system_addr = hdr_mem.phys_base;
+ dma_cmd_hdr.size = hdr_mem.size;
+ dma_cmd_hdr.local_addr =
+ ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(apps_hdr_ofst);
+ hdr_cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM,
+ &dma_cmd_hdr, false);
+ if (!hdr_cmd_pyld) {
+ IPAERR("fail construct dma_shared_mem cmd\n");
+ goto end;
+ }
+ desc[0].opcode = ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[0].pyld = hdr_cmd_pyld->data;
+ desc[0].len = hdr_cmd_pyld->len;
+ }
+ } else {
+ if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
+ IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
+ IPA_MEM_PART(apps_hdr_size_ddr));
+ goto end;
+ } else {
+ hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base;
+ hdr_cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_HDR_INIT_SYSTEM,
+ &hdr_init_cmd, false);
+ if (!hdr_cmd_pyld) {
+ IPAERR("fail construct hdr_init_system cmd\n");
+ goto end;
+ }
+ desc[0].opcode = ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_HDR_INIT_SYSTEM);
+ desc[0].pyld = hdr_cmd_pyld->data;
+ desc[0].len = hdr_cmd_pyld->len;
+ }
+ }
+ desc[0].type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
+
+ proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size);
+ proc_ctx_ofst = IPA_MEM_PART(apps_hdr_proc_ctx_ofst);
+ if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
+ if (aligned_ctx_mem.size > proc_ctx_size) {
+ IPAERR("tbl too big needed %d avail %d\n",
+ aligned_ctx_mem.size,
+ proc_ctx_size);
+ goto end;
+ } else {
+ dma_cmd_ctx.is_read = false; /* Write operation */
+ dma_cmd_ctx.skip_pipeline_clear = false;
+ dma_cmd_ctx.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ dma_cmd_ctx.system_addr = aligned_ctx_mem.phys_base;
+ dma_cmd_ctx.size = aligned_ctx_mem.size;
+ dma_cmd_ctx.local_addr =
+ ipa3_ctx->smem_restricted_bytes +
+ proc_ctx_ofst;
+ ctx_cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM,
+ &dma_cmd_ctx, false);
+ if (!ctx_cmd_pyld) {
+ IPAERR("fail construct dma_shared_mem cmd\n");
+ goto end;
+ }
+ desc[1].opcode = ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[1].pyld = ctx_cmd_pyld->data;
+ desc[1].len = ctx_cmd_pyld->len;
+ }
+ } else {
+ proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
+ if (aligned_ctx_mem.size > proc_ctx_size_ddr) {
+ IPAERR("tbl too big, needed %d avail %d\n",
+ aligned_ctx_mem.size,
+ proc_ctx_size_ddr);
+ goto end;
+ } else {
+ reg_write_cmd.skip_pipeline_clear = false;
+ reg_write_cmd.pipeline_clear_options =
+ IPAHAL_HPS_CLEAR;
+ reg_write_cmd.offset =
+ ipahal_get_reg_ofst(
+ IPA_SYS_PKT_PROC_CNTXT_BASE);
+ reg_write_cmd.value = aligned_ctx_mem.phys_base;
+ reg_write_cmd.value_mask =
+ ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1);
+ ctx_cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_REGISTER_WRITE,
+ ®_write_cmd, false);
+ if (!ctx_cmd_pyld) {
+ IPAERR("fail construct register_write cmd\n");
+ goto end;
+ }
+ desc[1].opcode = ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_REGISTER_WRITE);
+ desc[1].pyld = ctx_cmd_pyld->data;
+ desc[1].len = ctx_cmd_pyld->len;
+ }
+ }
+ desc[1].type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(ctx_mem.base, ctx_mem.phys_base, ctx_mem.size);
+
+ if (ipa3_send_cmd(2, desc))
+ IPAERR("fail to send immediate command\n");
+ else
+ rc = 0;
+
+ if (ipa3_ctx->hdr_tbl_lcl) {
+ dma_free_coherent(ipa3_ctx->pdev, hdr_mem.size, hdr_mem.base,
+ hdr_mem.phys_base);
+ } else {
+ if (!rc) {
+ if (ipa3_ctx->hdr_mem.phys_base)
+ dma_free_coherent(ipa3_ctx->pdev,
+ ipa3_ctx->hdr_mem.size,
+ ipa3_ctx->hdr_mem.base,
+ ipa3_ctx->hdr_mem.phys_base);
+ ipa3_ctx->hdr_mem = hdr_mem;
+ }
+ }
+
+ if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
+ dma_free_coherent(ipa3_ctx->pdev, ctx_mem.size, ctx_mem.base,
+ ctx_mem.phys_base);
+ } else {
+ if (!rc) {
+ if (ipa3_ctx->hdr_proc_ctx_mem.phys_base)
+ dma_free_coherent(ipa3_ctx->pdev,
+ ipa3_ctx->hdr_proc_ctx_mem.size,
+ ipa3_ctx->hdr_proc_ctx_mem.base,
+ ipa3_ctx->hdr_proc_ctx_mem.phys_base);
+ ipa3_ctx->hdr_proc_ctx_mem = ctx_mem;
+ }
+ }
+
+end:
+ if (ctx_cmd_pyld)
+ ipahal_destroy_imm_cmd(ctx_cmd_pyld);
+
+ if (hdr_cmd_pyld)
+ ipahal_destroy_imm_cmd(hdr_cmd_pyld);
+
+ return rc;
+}
+
+static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
+ bool add_ref_hdr)
+{
+ struct ipa3_hdr_entry *hdr_entry;
+ struct ipa3_hdr_proc_ctx_entry *entry;
+ struct ipa3_hdr_proc_ctx_offset_entry *offset;
+ u32 bin;
+ struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
+ int id;
+ int needed_len;
+ int mem_size;
+
+ IPADBG_LOW("processing type %d hdr_hdl %d\n",
+ proc_ctx->type, proc_ctx->hdr_hdl);
+
+ if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
+ IPAERR("invalid processing type %d\n", proc_ctx->type);
+ return -EINVAL;
+ }
+
+ hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl);
+ if (!hdr_entry || (hdr_entry->cookie != IPA_COOKIE)) {
+ IPAERR("hdr_hdl is invalid\n");
+ return -EINVAL;
+ }
+
+ entry = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_cache, GFP_KERNEL);
+ if (!entry) {
+ IPAERR("failed to alloc proc_ctx object\n");
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&entry->link);
+
+ entry->type = proc_ctx->type;
+ entry->hdr = hdr_entry;
+ if (add_ref_hdr)
+ hdr_entry->ref_cnt++;
+ entry->cookie = IPA_COOKIE;
+
+ needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type);
+
+ if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0]) {
+ bin = IPA_HDR_PROC_CTX_BIN0;
+ } else if (needed_len <=
+ ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
+ bin = IPA_HDR_PROC_CTX_BIN1;
+ } else {
+ IPAERR("unexpected needed len %d\n", needed_len);
+ WARN_ON(1);
+ goto bad_len;
+ }
+
+ mem_size = (ipa3_ctx->hdr_proc_ctx_tbl_lcl) ?
+ IPA_MEM_PART(apps_hdr_proc_ctx_size) :
+ IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
+ if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
+ IPAERR("hdr proc ctx table overflow\n");
+ goto bad_len;
+ }
+
+ if (list_empty(&htbl->head_free_offset_list[bin])) {
+ offset = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_offset_cache,
+ GFP_KERNEL);
+ if (!offset) {
+ IPAERR("failed to alloc offset object\n");
+ goto bad_len;
+ }
+ INIT_LIST_HEAD(&offset->link);
+ /*
+ * for a first item grow, set the bin and offset which are set
+ * in stone
+ */
+ offset->offset = htbl->end;
+ offset->bin = bin;
+ htbl->end += ipa_hdr_proc_ctx_bin_sz[bin];
+ list_add(&offset->link,
+ &htbl->head_offset_list[bin]);
+ } else {
+ /* get the first free slot */
+ offset =
+ list_first_entry(&htbl->head_free_offset_list[bin],
+ struct ipa3_hdr_proc_ctx_offset_entry, link);
+ list_move(&offset->link, &htbl->head_offset_list[bin]);
+ }
+
+ entry->offset_entry = offset;
+ list_add(&entry->link, &htbl->head_proc_ctx_entry_list);
+ htbl->proc_ctx_cnt++;
+ IPADBG_LOW("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
+ htbl->proc_ctx_cnt, offset->offset);
+
+ id = ipa3_id_alloc(entry);
+ if (id < 0) {
+ IPAERR("failed to alloc id\n");
+ WARN_ON(1);
+ }
+ entry->id = id;
+ proc_ctx->proc_ctx_hdl = id;
+ entry->ref_cnt++;
+
+ return 0;
+
+bad_len:
+ if (add_ref_hdr)
+ hdr_entry->ref_cnt--;
+ entry->cookie = 0;
+ kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
+ return -EPERM;
+}
+
+
+static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
+{
+ struct ipa3_hdr_entry *entry;
+ struct ipa_hdr_offset_entry *offset;
+ u32 bin;
+ struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
+ int id;
+ int mem_size;
+
+ if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
+ IPAERR("bad parm\n");
+ goto error;
+ }
+
+ if (!HDR_TYPE_IS_VALID(hdr->type)) {
+ IPAERR("invalid hdr type %d\n", hdr->type);
+ goto error;
+ }
+
+ entry = kmem_cache_zalloc(ipa3_ctx->hdr_cache, GFP_KERNEL);
+ if (!entry) {
+ IPAERR("failed to alloc hdr object\n");
+ goto error;
+ }
+
+ INIT_LIST_HEAD(&entry->link);
+
+ memcpy(entry->hdr, hdr->hdr, hdr->hdr_len);
+ entry->hdr_len = hdr->hdr_len;
+ strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX);
+ entry->is_partial = hdr->is_partial;
+ entry->type = hdr->type;
+ entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
+ entry->eth2_ofst = hdr->eth2_ofst;
+ entry->cookie = IPA_COOKIE;
+
+ if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
+ bin = IPA_HDR_BIN0;
+ else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
+ bin = IPA_HDR_BIN1;
+ else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2])
+ bin = IPA_HDR_BIN2;
+ else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3])
+ bin = IPA_HDR_BIN3;
+ else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
+ bin = IPA_HDR_BIN4;
+ else {
+ IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
+ goto bad_hdr_len;
+ }
+
+ mem_size = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) :
+ IPA_MEM_PART(apps_hdr_size_ddr);
+
+ /* if header does not fit to table, place it in DDR */
+ if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
+ entry->is_hdr_proc_ctx = true;
+ entry->phys_base = dma_map_single(ipa3_ctx->pdev,
+ entry->hdr,
+ entry->hdr_len,
+ DMA_TO_DEVICE);
+ } else {
+ entry->is_hdr_proc_ctx = false;
+ if (list_empty(&htbl->head_free_offset_list[bin])) {
+ offset = kmem_cache_zalloc(ipa3_ctx->hdr_offset_cache,
+ GFP_KERNEL);
+ if (!offset) {
+ IPAERR("failed to alloc hdr offset object\n");
+ goto bad_hdr_len;
+ }
+ INIT_LIST_HEAD(&offset->link);
+ /*
+ * for a first item grow, set the bin and offset which
+ * are set in stone
+ */
+ offset->offset = htbl->end;
+ offset->bin = bin;
+ htbl->end += ipa_hdr_bin_sz[bin];
+ list_add(&offset->link,
+ &htbl->head_offset_list[bin]);
+ } else {
+ /* get the first free slot */
+ offset =
+ list_first_entry(&htbl->head_free_offset_list[bin],
+ struct ipa_hdr_offset_entry, link);
+ list_move(&offset->link, &htbl->head_offset_list[bin]);
+ }
+
+ entry->offset_entry = offset;
+ }
+
+ list_add(&entry->link, &htbl->head_hdr_entry_list);
+ htbl->hdr_cnt++;
+ if (entry->is_hdr_proc_ctx)
+ IPADBG_LOW("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
+ hdr->hdr_len,
+ htbl->hdr_cnt,
+ &entry->phys_base);
+ else
+ IPADBG_LOW("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
+ hdr->hdr_len,
+ htbl->hdr_cnt,
+ entry->offset_entry->offset);
+
+ id = ipa3_id_alloc(entry);
+ if (id < 0) {
+ IPAERR("failed to alloc id\n");
+ WARN_ON(1);
+ }
+ entry->id = id;
+ hdr->hdr_hdl = id;
+ entry->ref_cnt++;
+
+ if (entry->is_hdr_proc_ctx) {
+ struct ipa_hdr_proc_ctx_add proc_ctx;
+
+ IPADBG("adding processing context for header %s\n", hdr->name);
+ proc_ctx.type = IPA_HDR_PROC_NONE;
+ proc_ctx.hdr_hdl = id;
+ if (__ipa_add_hdr_proc_ctx(&proc_ctx, false)) {
+ IPAERR("failed to add hdr proc ctx\n");
+ goto fail_add_proc_ctx;
+ }
+ entry->proc_ctx = ipa3_id_find(proc_ctx.proc_ctx_hdl);
+ }
+
+ return 0;
+
+fail_add_proc_ctx:
+ entry->ref_cnt--;
+ hdr->hdr_hdl = 0;
+ ipa3_id_remove(id);
+ htbl->hdr_cnt--;
+ list_del(&entry->link);
+ dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
+ entry->hdr_len, DMA_TO_DEVICE);
+bad_hdr_len:
+ entry->cookie = 0;
+ kmem_cache_free(ipa3_ctx->hdr_cache, entry);
+error:
+ return -EPERM;
+}
+
+static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl, bool release_hdr)
+{
+ struct ipa3_hdr_proc_ctx_entry *entry;
+ struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
+
+ entry = ipa3_id_find(proc_ctx_hdl);
+ if (!entry || (entry->cookie != IPA_COOKIE)) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ IPADBG("del ctx proc cnt=%d ofst=%d\n",
+ htbl->proc_ctx_cnt, entry->offset_entry->offset);
+
+ if (--entry->ref_cnt) {
+ IPADBG("proc_ctx_hdl %x ref_cnt %d\n",
+ proc_ctx_hdl, entry->ref_cnt);
+ return 0;
+ }
+
+ if (release_hdr)
+ __ipa3_del_hdr(entry->hdr->id);
+
+ /* move the offset entry to appropriate free list */
+ list_move(&entry->offset_entry->link,
+ &htbl->head_free_offset_list[entry->offset_entry->bin]);
+ list_del(&entry->link);
+ htbl->proc_ctx_cnt--;
+ entry->cookie = 0;
+ kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
+
+ /* remove the handle from the database */
+ ipa3_id_remove(proc_ctx_hdl);
+
+ return 0;
+}
+
+
+int __ipa3_del_hdr(u32 hdr_hdl)
+{
+ struct ipa3_hdr_entry *entry;
+ struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
+
+ entry = ipa3_id_find(hdr_hdl);
+ if (entry == NULL) {
+ IPAERR("lookup failed\n");
+ return -EINVAL;
+ }
+
+ if (!entry || (entry->cookie != IPA_COOKIE)) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ if (entry->is_hdr_proc_ctx)
+ IPADBG("del hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
+ entry->hdr_len, htbl->hdr_cnt, &entry->phys_base);
+ else
+ IPADBG("del hdr of sz=%d hdr_cnt=%d ofst=%d\n", entry->hdr_len,
+ htbl->hdr_cnt, entry->offset_entry->offset);
+
+ if (--entry->ref_cnt) {
+ IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
+ return 0;
+ }
+
+ if (entry->is_hdr_proc_ctx) {
+ dma_unmap_single(ipa3_ctx->pdev,
+ entry->phys_base,
+ entry->hdr_len,
+ DMA_TO_DEVICE);
+ __ipa3_del_hdr_proc_ctx(entry->proc_ctx->id, false);
+ } else {
+ /* move the offset entry to appropriate free list */
+ list_move(&entry->offset_entry->link,
+ &htbl->head_free_offset_list[entry->offset_entry->bin]);
+ }
+ list_del(&entry->link);
+ htbl->hdr_cnt--;
+ entry->cookie = 0;
+ kmem_cache_free(ipa3_ctx->hdr_cache, entry);
+
+ /* remove the handle from the database */
+ ipa3_id_remove(hdr_hdl);
+
+ return 0;
+}
+
+/**
+ * ipa3_add_hdr() - add the specified headers to SW and optionally commit them
+ * to IPA HW
+ * @hdrs: [inout] set of headers to add
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs)
+{
+ int i;
+ int result = -EFAULT;
+
+ if (hdrs == NULL || hdrs->num_hdrs == 0) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+ IPADBG("adding %d headers to IPA driver internal data struct\n",
+ hdrs->num_hdrs);
+ for (i = 0; i < hdrs->num_hdrs; i++) {
+ if (__ipa_add_hdr(&hdrs->hdr[i])) {
+ IPAERR("failed to add hdr %d\n", i);
+ hdrs->hdr[i].status = -1;
+ } else {
+ hdrs->hdr[i].status = 0;
+ }
+ }
+
+ if (hdrs->commit) {
+ IPADBG("committing all headers to IPA core");
+ if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+ result = -EPERM;
+ goto bail;
+ }
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+ return result;
+}
+
+/**
+ * ipa3_del_hdr() - Remove the specified headers from SW and optionally commit
+ * them to IPA HW
+ * @hdls: [inout] set of headers to delete
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls)
+{
+ int i;
+ int result = -EFAULT;
+
+ if (hdls == NULL || hdls->num_hdls == 0) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+ for (i = 0; i < hdls->num_hdls; i++) {
+ if (__ipa3_del_hdr(hdls->hdl[i].hdl)) {
+ IPAERR("failed to del hdr %i\n", i);
+ hdls->hdl[i].status = -1;
+ } else {
+ hdls->hdl[i].status = 0;
+ }
+ }
+
+ if (hdls->commit) {
+ if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+ result = -EPERM;
+ goto bail;
+ }
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+ return result;
+}
+
+/**
+ * ipa3_add_hdr_proc_ctx() - add the specified headers to SW
+ * and optionally commit them to IPA HW
+ * @proc_ctxs: [inout] set of processing context headers to add
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
+{
+ int i;
+ int result = -EFAULT;
+
+ if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+ IPADBG("adding %d header processing contextes to IPA driver\n",
+ proc_ctxs->num_proc_ctxs);
+ for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
+ if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
+ IPAERR("failed to add hdr pric ctx %d\n", i);
+ proc_ctxs->proc_ctx[i].status = -1;
+ } else {
+ proc_ctxs->proc_ctx[i].status = 0;
+ }
+ }
+
+ if (proc_ctxs->commit) {
+ IPADBG("committing all headers to IPA core");
+ if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+ result = -EPERM;
+ goto bail;
+ }
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+ return result;
+}
+
+/**
+ * ipa3_del_hdr_proc_ctx() -
+ * Remove the specified processing context headers from SW and
+ * optionally commit them to IPA HW.
+ * @hdls: [inout] set of processing context headers to delete
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
+{
+ int i;
+ int result;
+
+ if (hdls == NULL || hdls->num_hdls == 0) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+ for (i = 0; i < hdls->num_hdls; i++) {
+ if (__ipa3_del_hdr_proc_ctx(hdls->hdl[i].hdl, true)) {
+ IPAERR("failed to del hdr %i\n", i);
+ hdls->hdl[i].status = -1;
+ } else {
+ hdls->hdl[i].status = 0;
+ }
+ }
+
+ if (hdls->commit) {
+ if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+ result = -EPERM;
+ goto bail;
+ }
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+ return result;
+}
+
+/**
+ * ipa3_commit_hdr() - commit to IPA HW the current header table in SW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_commit_hdr(void)
+{
+ int result = -EFAULT;
+
+ /*
+ * issue a commit on the routing module since routing rules point to
+ * header table entries
+ */
+ if (ipa3_commit_rt(IPA_IP_v4))
+ return -EPERM;
+ if (ipa3_commit_rt(IPA_IP_v6))
+ return -EPERM;
+
+ mutex_lock(&ipa3_ctx->lock);
+ if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+ return result;
+}
+
+/**
+ * ipa3_reset_hdr() - reset the current header table in SW (does not commit to
+ * HW)
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_reset_hdr(void)
+{
+ struct ipa3_hdr_entry *entry;
+ struct ipa3_hdr_entry *next;
+ struct ipa3_hdr_proc_ctx_entry *ctx_entry;
+ struct ipa3_hdr_proc_ctx_entry *ctx_next;
+ struct ipa_hdr_offset_entry *off_entry;
+ struct ipa_hdr_offset_entry *off_next;
+ struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_entry;
+ struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_next;
+ int i;
+
+ /*
+ * issue a reset on the routing module since routing rules point to
+ * header table entries
+ */
+ if (ipa3_reset_rt(IPA_IP_v4))
+ IPAERR("fail to reset v4 rt\n");
+ if (ipa3_reset_rt(IPA_IP_v6))
+ IPAERR("fail to reset v4 rt\n");
+
+ mutex_lock(&ipa3_ctx->lock);
+ IPADBG("reset hdr\n");
+ list_for_each_entry_safe(entry, next,
+ &ipa3_ctx->hdr_tbl.head_hdr_entry_list, link) {
+
+ /* do not remove the default header */
+ if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
+ if (entry->is_hdr_proc_ctx) {
+ IPAERR("default header is proc ctx\n");
+ mutex_unlock(&ipa3_ctx->lock);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+ continue;
+ }
+
+ if (ipa3_id_find(entry->id) == NULL) {
+ mutex_unlock(&ipa3_ctx->lock);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+ if (entry->is_hdr_proc_ctx) {
+ dma_unmap_single(ipa3_ctx->pdev,
+ entry->phys_base,
+ entry->hdr_len,
+ DMA_TO_DEVICE);
+ entry->proc_ctx = NULL;
+ }
+ list_del(&entry->link);
+ entry->ref_cnt = 0;
+ entry->cookie = 0;
+
+ /* remove the handle from the database */
+ ipa3_id_remove(entry->id);
+ kmem_cache_free(ipa3_ctx->hdr_cache, entry);
+
+ }
+ for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+ list_for_each_entry_safe(off_entry, off_next,
+ &ipa3_ctx->hdr_tbl.head_offset_list[i],
+ link) {
+
+ /*
+ * do not remove the default exception header which is
+ * at offset 0
+ */
+ if (off_entry->offset == 0)
+ continue;
+
+ list_del(&off_entry->link);
+ kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
+ }
+ list_for_each_entry_safe(off_entry, off_next,
+ &ipa3_ctx->hdr_tbl.head_free_offset_list[i],
+ link) {
+ list_del(&off_entry->link);
+ kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
+ }
+ }
+ /* there is one header of size 8 */
+ ipa3_ctx->hdr_tbl.end = 8;
+ ipa3_ctx->hdr_tbl.hdr_cnt = 1;
+
+ IPADBG("reset hdr proc ctx\n");
+ list_for_each_entry_safe(
+ ctx_entry,
+ ctx_next,
+ &ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
+ link) {
+
+ if (ipa3_id_find(ctx_entry->id) == NULL) {
+ mutex_unlock(&ipa3_ctx->lock);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+ list_del(&ctx_entry->link);
+ ctx_entry->ref_cnt = 0;
+ ctx_entry->cookie = 0;
+
+ /* remove the handle from the database */
+ ipa3_id_remove(ctx_entry->id);
+ kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, ctx_entry);
+
+ }
+ for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
+ list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+ &ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i],
+ link) {
+
+ list_del(&ctx_off_entry->link);
+ kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
+ ctx_off_entry);
+ }
+ list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+ &ipa3_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i],
+ link) {
+ list_del(&ctx_off_entry->link);
+ kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
+ ctx_off_entry);
+ }
+ }
+ ipa3_ctx->hdr_proc_ctx_tbl.end = 0;
+ ipa3_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0;
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return 0;
+}
+
+static struct ipa3_hdr_entry *__ipa_find_hdr(const char *name)
+{
+ struct ipa3_hdr_entry *entry;
+
+ if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
+ IPAERR("Header name too long: %s\n", name);
+ return NULL;
+ }
+
+ list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
+ link) {
+ if (!strcmp(name, entry->name))
+ return entry;
+ }
+
+ return NULL;
+}
+
+/**
+ * ipa3_get_hdr() - Lookup the specified header resource
+ * @lookup: [inout] header to lookup and its handle
+ *
+ * lookup the specified header resource and return handle if it exists
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ * Caller should call ipa3_put_hdr later if this function succeeds
+ */
+int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup)
+{
+ struct ipa3_hdr_entry *entry;
+ int result = -1;
+
+ if (lookup == NULL) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+ mutex_lock(&ipa3_ctx->lock);
+ entry = __ipa_find_hdr(lookup->name);
+ if (entry) {
+ lookup->hdl = entry->id;
+ result = 0;
+ }
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return result;
+}
+
+/**
+ * __ipa3_release_hdr() - drop reference to header and cause
+ * deletion if reference count permits
+ * @hdr_hdl: [in] handle of header to be released
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int __ipa3_release_hdr(u32 hdr_hdl)
+{
+ int result = 0;
+
+ if (__ipa3_del_hdr(hdr_hdl)) {
+ IPADBG("fail to del hdr %x\n", hdr_hdl);
+ result = -EFAULT;
+ goto bail;
+ }
+
+ /* commit for put */
+ if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+ IPAERR("fail to commit hdr\n");
+ result = -EFAULT;
+ goto bail;
+ }
+
+bail:
+ return result;
+}
+
+/**
+ * __ipa3_release_hdr_proc_ctx() - drop reference to processing context
+ * and cause deletion if reference count permits
+ * @proc_ctx_hdl: [in] handle of processing context to be released
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl)
+{
+ int result = 0;
+
+ if (__ipa3_del_hdr_proc_ctx(proc_ctx_hdl, true)) {
+ IPADBG("fail to del hdr %x\n", proc_ctx_hdl);
+ result = -EFAULT;
+ goto bail;
+ }
+
+ /* commit for put */
+ if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+ IPAERR("fail to commit hdr\n");
+ result = -EFAULT;
+ goto bail;
+ }
+
+bail:
+ return result;
+}
+
+/**
+ * ipa3_put_hdr() - Release the specified header handle
+ * @hdr_hdl: [in] the header handle to release
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_put_hdr(u32 hdr_hdl)
+{
+ struct ipa3_hdr_entry *entry;
+ int result = -EFAULT;
+
+ mutex_lock(&ipa3_ctx->lock);
+
+ entry = ipa3_id_find(hdr_hdl);
+ if (entry == NULL) {
+ IPAERR("lookup failed\n");
+ result = -EINVAL;
+ goto bail;
+ }
+
+ if (entry->cookie != IPA_COOKIE) {
+ IPAERR("invalid header entry\n");
+ result = -EINVAL;
+ goto bail;
+ }
+
+ result = 0;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+ return result;
+}
+
+/**
+ * ipa3_copy_hdr() - Lookup the specified header resource and return a copy of
+ * it
+ * @copy: [inout] header to lookup and its copy
+ *
+ * lookup the specified header resource and return a copy of it (along with its
+ * attributes) if it exists, this would be called for partial headers
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy)
+{
+ struct ipa3_hdr_entry *entry;
+ int result = -EFAULT;
+
+ if (copy == NULL) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+ mutex_lock(&ipa3_ctx->lock);
+ entry = __ipa_find_hdr(copy->name);
+ if (entry) {
+ memcpy(copy->hdr, entry->hdr, entry->hdr_len);
+ copy->hdr_len = entry->hdr_len;
+ copy->type = entry->type;
+ copy->is_partial = entry->is_partial;
+ copy->is_eth2_ofst_valid = entry->is_eth2_ofst_valid;
+ copy->eth2_ofst = entry->eth2_ofst;
+ result = 0;
+ }
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h
new file mode 100644
index 0000000..dff3a3f
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h
@@ -0,0 +1,44 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_HW_DEFS_H
+#define _IPA_HW_DEFS_H
+#include <linux/bitops.h>
+
+/* This header defines various HW related data types */
+
+
+#define IPA_A5_MUX_HDR_EXCP_FLAG_IP BIT(7)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_NAT BIT(6)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT BIT(5)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_TAG BIT(4)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED BIT(3)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_IHL BIT(2)
+
+/**
+ * struct ipa3_a5_mux_hdr - A5 MUX header definition
+ * @interface_id: interface ID
+ * @src_pipe_index: source pipe index
+ * @flags: flags
+ * @metadata: metadata
+ *
+ * A5 MUX header is in BE, A5 runs in LE. This struct definition
+ * allows A5 SW to correctly parse the header
+ */
+struct ipa3_a5_mux_hdr {
+ u16 interface_id;
+ u8 src_pipe_index;
+ u8 flags;
+ u32 metadata;
+};
+
+#endif /* _IPA_HW_DEFS_H */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
new file mode 100644
index 0000000..4cb4d5a
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -0,0 +1,2022 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA3_I_H_
+#define _IPA3_I_H_
+
+#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/export.h>
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/ipa.h>
+#include <linux/ipa_usb.h>
+#include <linux/msm-sps.h>
+#include <asm/dma-iommu.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include "ipa_hw_defs.h"
+#include "ipa_qmi_service.h"
+#include "../ipa_api.h"
+#include "ipahal/ipahal_reg.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+#include "../ipa_common_i.h"
+#include "ipa_uc_offload_i.h"
+
+#define DRV_NAME "ipa"
+#define NAT_DEV_NAME "ipaNatTable"
+#define IPA_COOKIE 0x57831603
+#define MTU_BYTE 1500
+
+#define IPA3_MAX_NUM_PIPES 31
+#define IPA_WAN_CONS_DESC_FIFO_SZ 0x5E80
+#define IPA_WAN_NAPI_CONS_RX_POOL_SZ 3000
+#define IPA_SYS_DESC_FIFO_SZ 0x800
+#define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000
+#define IPA_LAN_RX_HEADER_LENGTH (2)
+#define IPA_QMAP_HEADER_LENGTH (4)
+#define IPA_DL_CHECKSUM_LENGTH (8)
+#define IPA_NUM_DESC_PER_SW_TX (3)
+#define IPA_GENERIC_RX_POOL_SZ 192
+#define IPA_UC_FINISH_MAX 6
+#define IPA_UC_WAIT_MIN_SLEEP 1000
+#define IPA_UC_WAII_MAX_SLEEP 1200
+
+#define IPA_MAX_STATUS_STAT_NUM 30
+
+#define IPA_IPC_LOG_PAGES 50
+
+#define IPADBG(fmt, args...) \
+ do { \
+ pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\
+ if (ipa3_ctx) { \
+ IPA_IPC_LOGGING(ipa3_ctx->logbuf, \
+ DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \
+ DRV_NAME " %s:%d " fmt, ## args); \
+ } \
+ } while (0)
+
+#define IPADBG_LOW(fmt, args...) \
+ do { \
+ pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\
+ if (ipa3_ctx) \
+ IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \
+ DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPAERR(fmt, args...) \
+ do { \
+ pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\
+ if (ipa3_ctx) { \
+ IPA_IPC_LOGGING(ipa3_ctx->logbuf, \
+ DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \
+ DRV_NAME " %s:%d " fmt, ## args); \
+ } \
+ } while (0)
+
+#define WLAN_AMPDU_TX_EP 15
+#define WLAN_PROD_TX_EP 19
+#define WLAN1_CONS_RX_EP 14
+#define WLAN2_CONS_RX_EP 16
+#define WLAN3_CONS_RX_EP 17
+#define WLAN4_CONS_RX_EP 18
+
+#define IPA_RAM_NAT_OFST 0
+#define IPA_RAM_NAT_SIZE 0
+#define IPA_MEM_CANARY_VAL 0xdeadbeef
+
+#define IPA_STATS
+
+#ifdef IPA_STATS
+#define IPA_STATS_INC_CNT(val) (++val)
+#define IPA_STATS_DEC_CNT(val) (--val)
+#define IPA_STATS_EXCP_CNT(__excp, __base) do { \
+ if (__excp < 0 || __excp >= IPAHAL_PKT_STATUS_EXCEPTION_MAX) \
+ break; \
+ ++__base[__excp]; \
+ } while (0)
+#else
+#define IPA_STATS_INC_CNT(x) do { } while (0)
+#define IPA_STATS_DEC_CNT(x)
+#define IPA_STATS_EXCP_CNT(__excp, __base) do { } while (0)
+#endif
+
+#define IPA_HDR_BIN0 0
+#define IPA_HDR_BIN1 1
+#define IPA_HDR_BIN2 2
+#define IPA_HDR_BIN3 3
+#define IPA_HDR_BIN4 4
+#define IPA_HDR_BIN_MAX 5
+
+#define IPA_HDR_PROC_CTX_BIN0 0
+#define IPA_HDR_PROC_CTX_BIN1 1
+#define IPA_HDR_PROC_CTX_BIN_MAX 2
+
+#define IPA_EVENT_THRESHOLD 0x10
+
+/*
+ * Due to ZLT issue with USB 3.0 core, IPA BAM threashold need to be set
+ * to max packet size + 1. After setting the threshold, USB core
+ * will not be notified on ZLTs
+ */
+#define IPA_USB_EVENT_THRESHOLD 0x4001
+
+#define IPA_RX_POOL_CEIL 32
+#define IPA_RX_SKB_SIZE 1792
+
+#define IPA_A5_MUX_HDR_NAME "ipa_excp_hdr"
+#define IPA_LAN_RX_HDR_NAME "ipa_lan_hdr"
+#define IPA_INVALID_L4_PROTOCOL 0xFF
+
+#define IPA_CLIENT_IS_PROD(x) (x >= IPA_CLIENT_PROD && x < IPA_CLIENT_CONS)
+#define IPA_CLIENT_IS_CONS(x) (x >= IPA_CLIENT_CONS && x < IPA_CLIENT_MAX)
+
+#define IPA_PIPE_MEM_START_OFST_ALIGNMENT(start_ofst) \
+ (((start_ofst) + 127) & ~127)
+
+#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE 8
+#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(start_ofst) \
+ (((start_ofst) + IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1) & \
+ ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1))
+
+#define MAX_RESOURCE_TO_CLIENTS (IPA_CLIENT_MAX)
+#define IPA_MEM_PART(x_) (ipa3_ctx->ctrl->mem_partition.x_)
+
+#define IPA_GSI_CHANNEL_STOP_MAX_RETRY 10
+#define IPA_GSI_CHANNEL_STOP_PKT_SIZE 1
+
+#define IPA_GSI_CHANNEL_EMPTY_MAX_RETRY 15
+#define IPA_GSI_CHANNEL_EMPTY_SLEEP_MIN_USEC (1000)
+#define IPA_GSI_CHANNEL_EMPTY_SLEEP_MAX_USEC (2000)
+
+#define IPA_SLEEP_CLK_RATE_KHZ (32)
+
+#define IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES 120
+#define IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN 96
+#define IPA3_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50
+#define IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN 40
+
+struct ipa3_active_client_htable_entry {
+ struct hlist_node list;
+ char id_string[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
+ int count;
+ enum ipa_active_client_log_type type;
+};
+
+struct ipa3_active_clients_log_ctx {
+ char *log_buffer[IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES];
+ int log_head;
+ int log_tail;
+ bool log_rdy;
+ struct hlist_head htable[IPA3_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE];
+};
+
+struct ipa3_client_names {
+ enum ipa_client_type names[MAX_RESOURCE_TO_CLIENTS];
+ int length;
+};
+
+struct ipa_smmu_cb_ctx {
+ bool valid;
+ struct device *dev;
+ struct dma_iommu_mapping *mapping;
+ struct iommu_domain *iommu;
+ unsigned long next_addr;
+ u32 va_start;
+ u32 va_size;
+ u32 va_end;
+};
+
+/**
+ * struct ipa3_flt_entry - IPA filtering table entry
+ * @link: entry's link in global filtering enrties list
+ * @rule: filter rule
+ * @cookie: cookie used for validity check
+ * @tbl: filter table
+ * @rt_tbl: routing table
+ * @hw_len: entry's size
+ * @id: rule handle - globally unique
+ * @prio: rule 10bit priority which defines the order of the rule
+ * among other rules at the same integrated table
+ * @rule_id: rule 10bit ID to be returned in packet status
+ */
+struct ipa3_flt_entry {
+ struct list_head link;
+ struct ipa_flt_rule rule;
+ u32 cookie;
+ struct ipa3_flt_tbl *tbl;
+ struct ipa3_rt_tbl *rt_tbl;
+ u32 hw_len;
+ int id;
+ u16 prio;
+ u16 rule_id;
+};
+
+/**
+ * struct ipa3_rt_tbl - IPA routing table
+ * @link: table's link in global routing tables list
+ * @head_rt_rule_list: head of routing rules list
+ * @name: routing table name
+ * @idx: routing table index
+ * @rule_cnt: number of rules in routing table
+ * @ref_cnt: reference counter of routing table
+ * @set: collection of routing tables
+ * @cookie: cookie used for validity check
+ * @in_sys: flag indicating if the table is located in system memory
+ * @sz: the size of the routing table
+ * @curr_mem: current routing tables block in sys memory
+ * @prev_mem: previous routing table block in sys memory
+ * @id: routing table id
+ * @rule_ids: idr structure that holds the rule_id for each rule
+ */
+struct ipa3_rt_tbl {
+ struct list_head link;
+ struct list_head head_rt_rule_list;
+ char name[IPA_RESOURCE_NAME_MAX];
+ u32 idx;
+ u32 rule_cnt;
+ u32 ref_cnt;
+ struct ipa3_rt_tbl_set *set;
+ u32 cookie;
+ bool in_sys[IPA_RULE_TYPE_MAX];
+ u32 sz[IPA_RULE_TYPE_MAX];
+ struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX];
+ struct ipa_mem_buffer prev_mem[IPA_RULE_TYPE_MAX];
+ int id;
+ struct idr rule_ids;
+};
+
+/**
+ * struct ipa3_hdr_entry - IPA header table entry
+ * @link: entry's link in global header table entries list
+ * @hdr: the header
+ * @hdr_len: header length
+ * @name: name of header table entry
+ * @type: l2 header type
+ * @is_partial: flag indicating if header table entry is partial
+ * @is_hdr_proc_ctx: false - hdr entry resides in hdr table,
+ * true - hdr entry resides in DDR and pointed to by proc ctx
+ * @phys_base: physical address of entry in DDR when is_hdr_proc_ctx is true,
+ * else 0
+ * @proc_ctx: processing context header
+ * @offset_entry: entry's offset
+ * @cookie: cookie used for validity check
+ * @ref_cnt: reference counter of routing table
+ * @id: header entry id
+ * @is_eth2_ofst_valid: is eth2_ofst field valid?
+ * @eth2_ofst: offset to start of Ethernet-II/802.3 header
+ */
+struct ipa3_hdr_entry {
+ struct list_head link;
+ u8 hdr[IPA_HDR_MAX_SIZE];
+ u32 hdr_len;
+ char name[IPA_RESOURCE_NAME_MAX];
+ enum ipa_hdr_l2_type type;
+ u8 is_partial;
+ bool is_hdr_proc_ctx;
+ dma_addr_t phys_base;
+ struct ipa3_hdr_proc_ctx_entry *proc_ctx;
+ struct ipa_hdr_offset_entry *offset_entry;
+ u32 cookie;
+ u32 ref_cnt;
+ int id;
+ u8 is_eth2_ofst_valid;
+ u16 eth2_ofst;
+};
+
+/**
+ * struct ipa3_hdr_tbl - IPA header table
+ * @head_hdr_entry_list: header entries list
+ * @head_offset_list: header offset list
+ * @head_free_offset_list: header free offset list
+ * @hdr_cnt: number of headers
+ * @end: the last header index
+ */
+struct ipa3_hdr_tbl {
+ struct list_head head_hdr_entry_list;
+ struct list_head head_offset_list[IPA_HDR_BIN_MAX];
+ struct list_head head_free_offset_list[IPA_HDR_BIN_MAX];
+ u32 hdr_cnt;
+ u32 end;
+};
+
+/**
+ * struct ipa3_hdr_offset_entry - IPA header offset entry
+ * @link: entry's link in global processing context header offset entries list
+ * @offset: the offset
+ * @bin: bin
+ */
+struct ipa3_hdr_proc_ctx_offset_entry {
+ struct list_head link;
+ u32 offset;
+ u32 bin;
+};
+
+/**
+ * struct ipa3_hdr_proc_ctx_entry - IPA processing context header table entry
+ * @link: entry's link in global header table entries list
+ * @type:
+ * @offset_entry: entry's offset
+ * @hdr: the header
+ * @cookie: cookie used for validity check
+ * @ref_cnt: reference counter of routing table
+ * @id: processing context header entry id
+ */
+struct ipa3_hdr_proc_ctx_entry {
+ struct list_head link;
+ enum ipa_hdr_proc_type type;
+ struct ipa3_hdr_proc_ctx_offset_entry *offset_entry;
+ struct ipa3_hdr_entry *hdr;
+ u32 cookie;
+ u32 ref_cnt;
+ int id;
+};
+
+/**
+ * struct ipa3_hdr_proc_ctx_tbl - IPA processing context header table
+ * @head_proc_ctx_entry_list: header entries list
+ * @head_offset_list: header offset list
+ * @head_free_offset_list: header free offset list
+ * @proc_ctx_cnt: number of processing context headers
+ * @end: the last processing context header index
+ * @start_offset: offset in words of processing context header table
+ */
+struct ipa3_hdr_proc_ctx_tbl {
+ struct list_head head_proc_ctx_entry_list;
+ struct list_head head_offset_list[IPA_HDR_PROC_CTX_BIN_MAX];
+ struct list_head head_free_offset_list[IPA_HDR_PROC_CTX_BIN_MAX];
+ u32 proc_ctx_cnt;
+ u32 end;
+ u32 start_offset;
+};
+
+/**
+ * struct ipa3_flt_tbl - IPA filter table
+ * @head_flt_rule_list: filter rules list
+ * @rule_cnt: number of filter rules
+ * @in_sys: flag indicating if filter table is located in system memory
+ * @sz: the size of the filter tables
+ * @end: the last header index
+ * @curr_mem: current filter tables block in sys memory
+ * @prev_mem: previous filter table block in sys memory
+ * @rule_ids: idr structure that holds the rule_id for each rule
+ */
+struct ipa3_flt_tbl {
+ struct list_head head_flt_rule_list;
+ u32 rule_cnt;
+ bool in_sys[IPA_RULE_TYPE_MAX];
+ u32 sz[IPA_RULE_TYPE_MAX];
+ struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX];
+ struct ipa_mem_buffer prev_mem[IPA_RULE_TYPE_MAX];
+ bool sticky_rear;
+ struct idr rule_ids;
+};
+
+/**
+ * struct ipa3_rt_entry - IPA routing table entry
+ * @link: entry's link in global routing table entries list
+ * @rule: routing rule
+ * @cookie: cookie used for validity check
+ * @tbl: routing table
+ * @hdr: header table
+ * @proc_ctx: processing context table
+ * @hw_len: the length of the table
+ * @id: rule handle - globaly unique
+ * @prio: rule 10bit priority which defines the order of the rule
+ * among other rules at the integrated same table
+ * @rule_id: rule 10bit ID to be returned in packet status
+ */
+struct ipa3_rt_entry {
+ struct list_head link;
+ struct ipa_rt_rule rule;
+ u32 cookie;
+ struct ipa3_rt_tbl *tbl;
+ struct ipa3_hdr_entry *hdr;
+ struct ipa3_hdr_proc_ctx_entry *proc_ctx;
+ u32 hw_len;
+ int id;
+ u16 prio;
+ u16 rule_id;
+};
+
+/**
+ * struct ipa3_rt_tbl_set - collection of routing tables
+ * @head_rt_tbl_list: collection of routing tables
+ * @tbl_cnt: number of routing tables
+ */
+struct ipa3_rt_tbl_set {
+ struct list_head head_rt_tbl_list;
+ u32 tbl_cnt;
+};
+
+/**
+ * struct ipa3_wlan_stats - Wlan stats for each wlan endpoint
+ * @rx_pkts_rcvd: Packets sent by wlan driver
+ * @rx_pkts_status_rcvd: Status packets received from ipa hw
+ * @rx_hd_processed: Data Descriptors processed by IPA Driver
+ * @rx_hd_reply: Data Descriptors recycled by wlan driver
+ * @rx_hd_rcvd: Data Descriptors sent by wlan driver
+ * @rx_pkt_leak: Packet count that are not recycled
+ * @rx_dp_fail: Packets failed to transfer to IPA HW
+ * @tx_pkts_rcvd: SKB Buffers received from ipa hw
+ * @tx_pkts_sent: SKB Buffers sent to wlan driver
+ * @tx_pkts_dropped: Dropped packets count
+ */
+struct ipa3_wlan_stats {
+ u32 rx_pkts_rcvd;
+ u32 rx_pkts_status_rcvd;
+ u32 rx_hd_processed;
+ u32 rx_hd_reply;
+ u32 rx_hd_rcvd;
+ u32 rx_pkt_leak;
+ u32 rx_dp_fail;
+ u32 tx_pkts_rcvd;
+ u32 tx_pkts_sent;
+ u32 tx_pkts_dropped;
+};
+
+/**
+ * struct ipa3_wlan_comm_memb - Wlan comm members
+ * @wlan_spinlock: protects wlan comm buff list and its size
+ * @ipa_tx_mul_spinlock: protects tx dp mul transfer
+ * @wlan_comm_total_cnt: wlan common skb buffers allocated count
+ * @wlan_comm_free_cnt: wlan common skb buffer free count
+ * @total_tx_pkts_freed: Recycled Buffer count
+ * @wlan_comm_desc_list: wlan common skb buffer list
+ */
+struct ipa3_wlan_comm_memb {
+ spinlock_t wlan_spinlock;
+ spinlock_t ipa_tx_mul_spinlock;
+ u32 wlan_comm_total_cnt;
+ u32 wlan_comm_free_cnt;
+ u32 total_tx_pkts_freed;
+ struct list_head wlan_comm_desc_list;
+ atomic_t active_clnt_cnt;
+};
+
+struct ipa_gsi_ep_mem_info {
+ u16 evt_ring_len;
+ u64 evt_ring_base_addr;
+ void *evt_ring_base_vaddr;
+ u16 chan_ring_len;
+ u64 chan_ring_base_addr;
+ void *chan_ring_base_vaddr;
+};
+
+struct ipa3_status_stats {
+ struct ipahal_pkt_status status[IPA_MAX_STATUS_STAT_NUM];
+ int curr;
+};
+
+/**
+ * struct ipa3_ep_context - IPA end point context
+ * @valid: flag indicating id EP context is valid
+ * @client: EP client type
+ * @ep_hdl: EP's client SPS handle
+ * @gsi_chan_hdl: EP's GSI channel handle
+ * @gsi_evt_ring_hdl: EP's GSI channel event ring handle
+ * @gsi_mem_info: EP's GSI channel rings info
+ * @chan_scratch: EP's GSI channel scratch info
+ * @cfg: EP cionfiguration
+ * @dst_pipe_index: destination pipe index
+ * @rt_tbl_idx: routing table index
+ * @connect: SPS connect
+ * @priv: user provided information which will forwarded once the user is
+ * notified for new data avail
+ * @client_notify: user provided CB for EP events notification, the event is
+ * data revived.
+ * @desc_fifo_in_pipe_mem: flag indicating if descriptors FIFO uses pipe memory
+ * @data_fifo_in_pipe_mem: flag indicating if data FIFO uses pipe memory
+ * @desc_fifo_pipe_mem_ofst: descriptors FIFO pipe memory offset
+ * @data_fifo_pipe_mem_ofst: data FIFO pipe memory offset
+ * @desc_fifo_client_allocated: if descriptors FIFO was allocated by a client
+ * @data_fifo_client_allocated: if data FIFO was allocated by a client
+ * @skip_ep_cfg: boolean field that determines if EP should be configured
+ * by IPA driver
+ * @keep_ipa_awake: when true, IPA will not be clock gated
+ * @disconnect_in_progress: Indicates client disconnect in progress.
+ * @qmi_request_sent: Indicates whether QMI request to enable clear data path
+ * request is sent or not.
+ * @napi_enabled: when true, IPA call client callback to start polling
+ */
+struct ipa3_ep_context {
+ int valid;
+ enum ipa_client_type client;
+ struct sps_pipe *ep_hdl;
+ unsigned long gsi_chan_hdl;
+ unsigned long gsi_evt_ring_hdl;
+ struct ipa_gsi_ep_mem_info gsi_mem_info;
+ union __packed gsi_channel_scratch chan_scratch;
+ bool bytes_xfered_valid;
+ u16 bytes_xfered;
+ dma_addr_t phys_base;
+ struct ipa_ep_cfg cfg;
+ struct ipa_ep_cfg_holb holb;
+ struct ipahal_reg_ep_cfg_status status;
+ u32 dst_pipe_index;
+ u32 rt_tbl_idx;
+ struct sps_connect connect;
+ void *priv;
+ void (*client_notify)(void *priv, enum ipa_dp_evt_type evt,
+ unsigned long data);
+ bool desc_fifo_in_pipe_mem;
+ bool data_fifo_in_pipe_mem;
+ u32 desc_fifo_pipe_mem_ofst;
+ u32 data_fifo_pipe_mem_ofst;
+ bool desc_fifo_client_allocated;
+ bool data_fifo_client_allocated;
+ atomic_t avail_fifo_desc;
+ u32 dflt_flt4_rule_hdl;
+ u32 dflt_flt6_rule_hdl;
+ bool skip_ep_cfg;
+ bool keep_ipa_awake;
+ struct ipa3_wlan_stats wstats;
+ u32 uc_offload_state;
+ bool disconnect_in_progress;
+ u32 qmi_request_sent;
+ bool napi_enabled;
+ bool switch_to_intr;
+ int inactive_cycles;
+ u32 eot_in_poll_err;
+
+ /* sys MUST be the last element of this struct */
+ struct ipa3_sys_context *sys;
+};
+
+/**
+ * ipa_usb_xdci_chan_params - xDCI channel related properties
+ *
+ * @ipa_ep_cfg: IPA EP configuration
+ * @client: type of "client"
+ * @priv: callback cookie
+ * @notify: callback
+ * priv - callback cookie evt - type of event data - data relevant
+ * to event. May not be valid. See event_type enum for valid
+ * cases.
+ * @skip_ep_cfg: boolean field that determines if EP should be
+ * configured by IPA driver
+ * @keep_ipa_awake: when true, IPA will not be clock gated
+ * @evt_ring_params: parameters for the channel's event ring
+ * @evt_scratch: parameters for the channel's event ring scratch
+ * @chan_params: parameters for the channel
+ * @chan_scratch: parameters for the channel's scratch
+ *
+ */
+struct ipa_request_gsi_channel_params {
+ struct ipa_ep_cfg ipa_ep_cfg;
+ enum ipa_client_type client;
+ void *priv;
+ ipa_notify_cb notify;
+ bool skip_ep_cfg;
+ bool keep_ipa_awake;
+ struct gsi_evt_ring_props evt_ring_params;
+ union __packed gsi_evt_scratch evt_scratch;
+ struct gsi_chan_props chan_params;
+ union __packed gsi_channel_scratch chan_scratch;
+};
+
+enum ipa3_sys_pipe_policy {
+ IPA_POLICY_INTR_MODE,
+ IPA_POLICY_NOINTR_MODE,
+ IPA_POLICY_INTR_POLL_MODE,
+};
+
+struct ipa3_repl_ctx {
+ struct ipa3_rx_pkt_wrapper **cache;
+ atomic_t head_idx;
+ atomic_t tail_idx;
+ u32 capacity;
+};
+
+/**
+ * struct ipa3_sys_context - IPA endpoint context for system to BAM pipes
+ * @head_desc_list: header descriptors list
+ * @len: the size of the above list
+ * @spinlock: protects the list and its size
+ * @event: used to request CALLBACK mode from SPS driver
+ * @ep: IPA EP context
+ *
+ * IPA context specific to the system-bam pipes a.k.a LAN IN/OUT and WAN
+ */
+struct ipa3_sys_context {
+ u32 len;
+ struct sps_register_event event;
+ atomic_t curr_polling_state;
+ struct delayed_work switch_to_intr_work;
+ enum ipa3_sys_pipe_policy policy;
+ int (*pyld_hdlr)(struct sk_buff *skb, struct ipa3_sys_context *sys);
+ struct sk_buff * (*get_skb)(unsigned int len, gfp_t flags);
+ void (*free_skb)(struct sk_buff *skb);
+ void (*free_rx_wrapper)(struct ipa3_rx_pkt_wrapper *rk_pkt);
+ u32 rx_buff_sz;
+ u32 rx_pool_sz;
+ struct sk_buff *prev_skb;
+ unsigned int len_rem;
+ unsigned int len_pad;
+ unsigned int len_partial;
+ bool drop_packet;
+ struct work_struct work;
+ void (*sps_callback)(struct sps_event_notify *notify);
+ enum sps_option sps_option;
+ struct delayed_work replenish_rx_work;
+ struct work_struct repl_work;
+ void (*repl_hdlr)(struct ipa3_sys_context *sys);
+ struct ipa3_repl_ctx repl;
+
+ /* ordering is important - mutable fields go above */
+ struct ipa3_ep_context *ep;
+ struct list_head head_desc_list;
+ struct list_head rcycl_list;
+ spinlock_t spinlock;
+ struct workqueue_struct *wq;
+ struct workqueue_struct *repl_wq;
+ struct ipa3_status_stats *status_stat;
+ /* ordering is important - other immutable fields go below */
+};
+
+/**
+ * enum ipa3_desc_type - IPA decriptors type
+ *
+ * IPA decriptors type, IPA supports DD and ICD but no CD
+ */
+enum ipa3_desc_type {
+ IPA_DATA_DESC,
+ IPA_DATA_DESC_SKB,
+ IPA_DATA_DESC_SKB_PAGED,
+ IPA_IMM_CMD_DESC,
+};
+
+/**
+ * struct ipa3_tx_pkt_wrapper - IPA Tx packet wrapper
+ * @type: specify if this packet is for the skb or immediate command
+ * @mem: memory buffer used by this Tx packet
+ * @work: work struct for current Tx packet
+ * @link: linked to the wrappers on that pipe
+ * @callback: IPA client provided callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @sys: corresponding IPA sys context
+ * @mult: valid only for first of a "multiple" transfer,
+ * holds info for the "sps_transfer" buffer
+ * @cnt: 1 for single transfers,
+ * >1 and <0xFFFF for first of a "multiple" transfer,
+ * 0xFFFF for last desc, 0 for rest of "multiple' transfer
+ * @bounce: va of bounce buffer
+ * @unmap_dma: in case this is true, the buffer will not be dma unmapped
+ *
+ * This struct can wrap both data packet and immediate command packet.
+ */
+struct ipa3_tx_pkt_wrapper {
+ enum ipa3_desc_type type;
+ struct ipa_mem_buffer mem;
+ struct work_struct work;
+ struct list_head link;
+ void (*callback)(void *user1, int user2);
+ void *user1;
+ int user2;
+ struct ipa3_sys_context *sys;
+ struct ipa_mem_buffer mult;
+ u32 cnt;
+ void *bounce;
+ bool no_unmap_dma;
+};
+
+/**
+ * struct ipa3_dma_xfer_wrapper - IPADMA transfer descr wrapper
+ * @phys_addr_src: physical address of the source data to copy
+ * @phys_addr_dest: physical address to store the copied data
+ * @len: len in bytes to copy
+ * @link: linked to the wrappers list on the proper(sync/async) cons pipe
+ * @xfer_done: completion object for sync_memcpy completion
+ * @callback: IPADMA client provided completion callback
+ * @user1: cookie1 for above callback
+ *
+ * This struct can wrap both sync and async memcpy transfers descriptors.
+ */
+struct ipa3_dma_xfer_wrapper {
+ u64 phys_addr_src;
+ u64 phys_addr_dest;
+ u16 len;
+ struct list_head link;
+ struct completion xfer_done;
+ void (*callback)(void *user1);
+ void *user1;
+};
+
+/**
+ * struct ipa3_desc - IPA descriptor
+ * @type: skb or immediate command or plain old data
+ * @pyld: points to skb
+ * @frag: points to paged fragment
+ * or kmalloc'ed immediate command parameters/plain old data
+ * @dma_address: dma mapped address of pyld
+ * @dma_address_valid: valid field for dma_address
+ * @len: length of the pyld
+ * @opcode: for immediate commands
+ * @callback: IPA client provided completion callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @xfer_done: completion object for sync completion
+ */
+struct ipa3_desc {
+ enum ipa3_desc_type type;
+ void *pyld;
+ skb_frag_t *frag;
+ dma_addr_t dma_address;
+ bool dma_address_valid;
+ u16 len;
+ u16 opcode;
+ void (*callback)(void *user1, int user2);
+ void *user1;
+ int user2;
+ struct completion xfer_done;
+};
+
+/**
+ * struct ipa3_rx_pkt_wrapper - IPA Rx packet wrapper
+ * @skb: skb
+ * @dma_address: DMA address of this Rx packet
+ * @link: linked to the Rx packets on that pipe
+ * @len: how many bytes are copied into skb's flat buffer
+ */
+struct ipa3_rx_pkt_wrapper {
+ struct list_head link;
+ struct ipa_rx_data data;
+ u32 len;
+ struct work_struct work;
+ struct ipa3_sys_context *sys;
+};
+
+/**
+ * struct ipa3_nat_mem - IPA NAT memory description
+ * @class: pointer to the struct class
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ * @dev_num: device number
+ * @vaddr: virtual address
+ * @dma_handle: DMA handle
+ * @size: NAT memory size
+ * @is_mapped: flag indicating if NAT memory is mapped
+ * @is_sys_mem: flag indicating if NAT memory is sys memory
+ * @is_dev_init: flag indicating if NAT device is initialized
+ * @lock: NAT memory mutex
+ * @nat_base_address: nat table virutal address
+ * @ipv4_rules_addr: base nat table address
+ * @ipv4_expansion_rules_addr: expansion table address
+ * @index_table_addr: index table address
+ * @index_table_expansion_addr: index expansion table address
+ * @size_base_tables: base table size
+ * @size_expansion_tables: expansion table size
+ * @public_ip_addr: ip address of nat table
+ */
+struct ipa3_nat_mem {
+ struct class *class;
+ struct device *dev;
+ struct cdev cdev;
+ dev_t dev_num;
+ void *vaddr;
+ dma_addr_t dma_handle;
+ size_t size;
+ bool is_mapped;
+ bool is_sys_mem;
+ bool is_dev_init;
+ bool is_dev;
+ struct mutex lock;
+ void *nat_base_address;
+ char *ipv4_rules_addr;
+ char *ipv4_expansion_rules_addr;
+ char *index_table_addr;
+ char *index_table_expansion_addr;
+ u32 size_base_tables;
+ u32 size_expansion_tables;
+ u32 public_ip_addr;
+ void *tmp_vaddr;
+ dma_addr_t tmp_dma_handle;
+ bool is_tmp_mem;
+};
+
+/**
+ * enum ipa3_hw_mode - IPA hardware mode
+ * @IPA_HW_Normal: Regular IPA hardware
+ * @IPA_HW_Virtual: IPA hardware supporting virtual memory allocation
+ * @IPA_HW_PCIE: IPA hardware supporting memory allocation over PCIE Bridge
+ */
+enum ipa3_hw_mode {
+ IPA_HW_MODE_NORMAL = 0,
+ IPA_HW_MODE_VIRTUAL = 1,
+ IPA_HW_MODE_PCIE = 2
+};
+
+enum ipa3_config_this_ep {
+ IPA_CONFIGURE_THIS_EP,
+ IPA_DO_NOT_CONFIGURE_THIS_EP,
+};
+
+struct ipa3_stats {
+ u32 tx_sw_pkts;
+ u32 tx_hw_pkts;
+ u32 rx_pkts;
+ u32 rx_excp_pkts[IPAHAL_PKT_STATUS_EXCEPTION_MAX];
+ u32 rx_repl_repost;
+ u32 tx_pkts_compl;
+ u32 rx_q_len;
+ u32 msg_w[IPA_EVENT_MAX_NUM];
+ u32 msg_r[IPA_EVENT_MAX_NUM];
+ u32 stat_compl;
+ u32 aggr_close;
+ u32 wan_aggr_close;
+ u32 wan_rx_empty;
+ u32 wan_repl_rx_empty;
+ u32 lan_rx_empty;
+ u32 lan_repl_rx_empty;
+ u32 flow_enable;
+ u32 flow_disable;
+ u32 tx_non_linear;
+};
+
+struct ipa3_active_clients {
+ struct mutex mutex;
+ spinlock_t spinlock;
+ bool mutex_locked;
+ int cnt;
+};
+
+struct ipa3_wakelock_ref_cnt {
+ spinlock_t spinlock;
+ int cnt;
+};
+
+struct ipa3_tag_completion {
+ struct completion comp;
+ atomic_t cnt;
+};
+
+struct ipa3_controller;
+
+/**
+ * struct ipa3_uc_hdlrs - IPA uC callback functions
+ * @ipa_uc_loaded_hdlr: Function handler when uC is loaded
+ * @ipa_uc_event_hdlr: Event handler function
+ * @ipa3_uc_response_hdlr: Response handler function
+ * @ipa_uc_event_log_info_hdlr: Log event handler function
+ */
+struct ipa3_uc_hdlrs {
+ void (*ipa_uc_loaded_hdlr)(void);
+
+ void (*ipa_uc_event_hdlr)
+ (struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio);
+
+ int (*ipa3_uc_response_hdlr)
+ (struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio,
+ u32 *uc_status);
+
+ void (*ipa_uc_event_log_info_hdlr)
+ (struct IpaHwEventLogInfoData_t *uc_event_top_mmio);
+};
+
+/**
+ * enum ipa3_hw_flags - flags which defines the behavior of HW
+ *
+ * @IPA_HW_FLAG_HALT_SYSTEM_ON_ASSERT_FAILURE: Halt system in case of assert
+ * failure.
+ * @IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR: Channel error would be reported
+ * in the event ring only. No event to CPU.
+ * @IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP: No need to report event
+ * IPA_HW_2_CPU_EVENT_MHI_WAKE_UP_REQUEST
+ * @IPA_HW_FLAG_WORK_OVER_DDR: Perform all transaction to external addresses by
+ * QMB (avoid memcpy)
+ * @IPA_HW_FLAG_NO_REPORT_OOB: If set do not report that the device is OOB in
+ * IN Channel
+ * @IPA_HW_FLAG_NO_REPORT_DB_MODE: If set, do not report that the device is
+ * entering a mode where it expects a doorbell to be rung for OUT Channel
+ * @IPA_HW_FLAG_NO_START_OOB_TIMER
+ */
+enum ipa3_hw_flags {
+ IPA_HW_FLAG_HALT_SYSTEM_ON_ASSERT_FAILURE = 0x01,
+ IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR = 0x02,
+ IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP = 0x04,
+ IPA_HW_FLAG_WORK_OVER_DDR = 0x08,
+ IPA_HW_FLAG_NO_REPORT_OOB = 0x10,
+ IPA_HW_FLAG_NO_REPORT_DB_MODE = 0x20,
+ IPA_HW_FLAG_NO_START_OOB_TIMER = 0x40
+};
+
+/**
+ * struct ipa3_uc_ctx - IPA uC context
+ * @uc_inited: Indicates if uC interface has been initialized
+ * @uc_loaded: Indicates if uC has loaded
+ * @uc_failed: Indicates if uC has failed / returned an error
+ * @uc_lock: uC interface lock to allow only one uC interaction at a time
+ * @uc_spinlock: same as uc_lock but for irq contexts
+ * @uc_completation: Completion mechanism to wait for uC commands
+ * @uc_sram_mmio: Pointer to uC mapped memory
+ * @pending_cmd: The last command sent waiting to be ACKed
+ * @uc_status: The last status provided by the uC
+ * @uc_error_type: error type from uC error event
+ * @uc_error_timestamp: tag timer sampled after uC crashed
+ */
+struct ipa3_uc_ctx {
+ bool uc_inited;
+ bool uc_loaded;
+ bool uc_failed;
+ struct mutex uc_lock;
+ spinlock_t uc_spinlock;
+ struct completion uc_completion;
+ struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio;
+ struct IpaHwEventLogInfoData_t *uc_event_top_mmio;
+ u32 uc_event_top_ofst;
+ u32 pending_cmd;
+ u32 uc_status;
+ u32 uc_error_type;
+ u32 uc_error_timestamp;
+ phys_addr_t rdy_ring_base_pa;
+ phys_addr_t rdy_ring_rp_pa;
+ u32 rdy_ring_size;
+ phys_addr_t rdy_comp_ring_base_pa;
+ phys_addr_t rdy_comp_ring_wp_pa;
+ u32 rdy_comp_ring_size;
+ u32 *rdy_ring_rp_va;
+ u32 *rdy_comp_ring_wp_va;
+};
+
+/**
+ * struct ipa3_uc_wdi_ctx
+ * @wdi_uc_top_ofst:
+ * @wdi_uc_top_mmio:
+ * @wdi_uc_stats_ofst:
+ * @wdi_uc_stats_mmio:
+ */
+struct ipa3_uc_wdi_ctx {
+ /* WDI specific fields */
+ u32 wdi_uc_stats_ofst;
+ struct IpaHwStatsWDIInfoData_t *wdi_uc_stats_mmio;
+ void *priv;
+ ipa_uc_ready_cb uc_ready_cb;
+};
+
+/**
+ * struct ipa3_transport_pm - transport power management related members
+ * @lock: lock for ensuring atomic operations
+ * @res_granted: true if SPS requested IPA resource and IPA granted it
+ * @res_rel_in_prog: true if releasing IPA resource is in progress
+ */
+struct ipa3_transport_pm {
+ spinlock_t lock;
+ bool res_granted;
+ bool res_rel_in_prog;
+ atomic_t dec_clients;
+ atomic_t eot_activity;
+};
+
+/**
+ * struct ipa3cm_client_info - the client-info indicated from IPACM
+ * @ipacm_client_enum: the enum to indicate tether-client
+ * @ipacm_client_uplink: the bool to indicate pipe for uplink
+ */
+struct ipa3cm_client_info {
+ enum ipacm_client_enum client_enum;
+ bool uplink;
+};
+
+struct ipa3_smp2p_info {
+ u32 out_base_id;
+ u32 in_base_id;
+ bool res_sent;
+};
+
+/**
+ * struct ipa3_ready_cb_info - A list of all the registrations
+ * for an indication of IPA driver readiness
+ *
+ * @link: linked list link
+ * @ready_cb: callback
+ * @user_data: User data
+ *
+ */
+struct ipa3_ready_cb_info {
+ struct list_head link;
+ ipa_ready_cb ready_cb;
+ void *user_data;
+};
+
+/**
+ * struct ipa3_context - IPA context
+ * @class: pointer to the struct class
+ * @dev_num: device number
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ * @bam_handle: IPA driver's BAM handle
+ * @ep: list of all end points
+ * @skip_ep_cfg_shadow: state to update filter table correctly across
+ power-save
+ * @ep_flt_bitmap: End-points supporting filtering bitmap
+ * @ep_flt_num: End-points supporting filtering number
+ * @resume_on_connect: resume ep on ipa3_connect
+ * @flt_tbl: list of all IPA filter tables
+ * @mode: IPA operating mode
+ * @mmio: iomem
+ * @ipa_wrapper_base: IPA wrapper base address
+ * @hdr_tbl: IPA header table
+ * @hdr_proc_ctx_tbl: IPA processing context table
+ * @rt_tbl_set: list of routing tables each of which is a list of rules
+ * @reap_rt_tbl_set: list of sys mem routing tables waiting to be reaped
+ * @flt_rule_cache: filter rule cache
+ * @rt_rule_cache: routing rule cache
+ * @hdr_cache: header cache
+ * @hdr_offset_cache: header offset cache
+ * @hdr_proc_ctx_cache: processing context cache
+ * @hdr_proc_ctx_offset_cache: processing context offset cache
+ * @rt_tbl_cache: routing table cache
+ * @tx_pkt_wrapper_cache: Tx packets cache
+ * @rx_pkt_wrapper_cache: Rx packets cache
+ * @rt_idx_bitmap: routing table index bitmap
+ * @lock: this does NOT protect the linked lists within ipa3_sys_context
+ * @smem_sz: shared memory size available for SW use starting
+ * from non-restricted bytes
+ * @smem_restricted_bytes: the bytes that SW should not use in the shared mem
+ * @nat_mem: NAT memory
+ * @excp_hdr_hdl: exception header handle
+ * @dflt_v4_rt_rule_hdl: default v4 routing rule handle
+ * @dflt_v6_rt_rule_hdl: default v6 routing rule handle
+ * @aggregation_type: aggregation type used on USB client endpoint
+ * @aggregation_byte_limit: aggregation byte limit used on USB client endpoint
+ * @aggregation_time_limit: aggregation time limit used on USB client endpoint
+ * @hdr_tbl_lcl: where hdr tbl resides 1-local, 0-system
+ * @hdr_proc_ctx_tbl_lcl: where proc_ctx tbl resides true-local, false-system
+ * @hdr_mem: header memory
+ * @hdr_proc_ctx_mem: processing context memory
+ * @ip4_rt_tbl_lcl: where ip4 rt tables reside 1-local; 0-system
+ * @ip6_rt_tbl_lcl: where ip6 rt tables reside 1-local; 0-system
+ * @ip4_flt_tbl_lcl: where ip4 flt tables reside 1-local; 0-system
+ * @ip6_flt_tbl_lcl: where ip6 flt tables reside 1-local; 0-system
+ * @power_mgmt_wq: workqueue for power management
+ * @transport_power_mgmt_wq: workqueue transport related power management
+ * @tag_process_before_gating: indicates whether to start tag process before
+ * gating IPA clocks
+ * @transport_pm: transport power management related information
+ * @disconnect_lock: protects LAN_CONS packet receive notification CB
+ * @pipe_mem_pool: pipe memory pool
+ * @dma_pool: special purpose DMA pool
+ * @ipa3_active_clients: structure for reference counting connected IPA clients
+ * @ipa_hw_type: type of IPA HW type (e.g. IPA 1.0, IPA 1.1 etc')
+ * @ipa3_hw_mode: mode of IPA HW mode (e.g. Normal, Virtual or over PCIe)
+ * @use_ipa_teth_bridge: use tethering bridge driver
+ * @ipa_bam_remote_mode: ipa bam is in remote mode
+ * @modem_cfg_emb_pipe_flt: modem configure embedded pipe filtering rules
+ * @logbuf: ipc log buffer for high priority messages
+ * @logbuf_low: ipc log buffer for low priority messages
+ * @ipa_wdi2: using wdi-2.0
+ * @use_64_bit_dma_mask: using 64bits dma mask
+ * @ipa_bus_hdl: msm driver handle for the data path bus
+ * @ctrl: holds the core specific operations based on
+ * core version (vtable like)
+ * @enable_clock_scaling: clock scaling is enabled ?
+ * @curr_ipa_clk_rate: ipa3_clk current rate
+ * @wcstats: wlan common buffer stats
+ * @uc_ctx: uC interface context
+ * @uc_wdi_ctx: WDI specific fields for uC interface
+ * @ipa_num_pipes: The number of pipes used by IPA HW
+ * @skip_uc_pipe_reset: Indicates whether pipe reset via uC needs to be avoided
+ * @ipa_client_apps_wan_cons_agg_gro: RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA
+ * @apply_rg10_wa: Indicates whether to use register group 10 workaround
+ * @gsi_ch20_wa: Indicates whether to apply GSI physical channel 20 workaround
+ * @w_lock: Indicates the wakeup source.
+ * @wakelock_ref_cnt: Indicates the number of times wakelock is acquired
+ * @ipa_initialization_complete: Indicates that IPA is fully initialized
+ * @ipa_ready_cb_list: A list of all the clients who require a CB when IPA
+ * driver is ready/initialized.
+ * @init_completion_obj: Completion object to be used in case IPA driver hasn't
+ * finished initializing. Example of use - IOCTLs to /dev/ipa
+ * IPA context - holds all relevant info about IPA driver and its state
+ */
+struct ipa3_context {
+ struct class *class;
+ dev_t dev_num;
+ struct device *dev;
+ struct cdev cdev;
+ unsigned long bam_handle;
+ struct ipa3_ep_context ep[IPA3_MAX_NUM_PIPES];
+ bool skip_ep_cfg_shadow[IPA3_MAX_NUM_PIPES];
+ u32 ep_flt_bitmap;
+ u32 ep_flt_num;
+ bool resume_on_connect[IPA_CLIENT_MAX];
+ struct ipa3_flt_tbl flt_tbl[IPA3_MAX_NUM_PIPES][IPA_IP_MAX];
+ void __iomem *mmio;
+ u32 ipa_wrapper_base;
+ u32 ipa_wrapper_size;
+ struct ipa3_hdr_tbl hdr_tbl;
+ struct ipa3_hdr_proc_ctx_tbl hdr_proc_ctx_tbl;
+ struct ipa3_rt_tbl_set rt_tbl_set[IPA_IP_MAX];
+ struct ipa3_rt_tbl_set reap_rt_tbl_set[IPA_IP_MAX];
+ struct kmem_cache *flt_rule_cache;
+ struct kmem_cache *rt_rule_cache;
+ struct kmem_cache *hdr_cache;
+ struct kmem_cache *hdr_offset_cache;
+ struct kmem_cache *hdr_proc_ctx_cache;
+ struct kmem_cache *hdr_proc_ctx_offset_cache;
+ struct kmem_cache *rt_tbl_cache;
+ struct kmem_cache *tx_pkt_wrapper_cache;
+ struct kmem_cache *rx_pkt_wrapper_cache;
+ unsigned long rt_idx_bitmap[IPA_IP_MAX];
+ struct mutex lock;
+ u16 smem_sz;
+ u16 smem_restricted_bytes;
+ u16 smem_reqd_sz;
+ struct ipa3_nat_mem nat_mem;
+ u32 excp_hdr_hdl;
+ u32 dflt_v4_rt_rule_hdl;
+ u32 dflt_v6_rt_rule_hdl;
+ uint aggregation_type;
+ uint aggregation_byte_limit;
+ uint aggregation_time_limit;
+ bool hdr_tbl_lcl;
+ bool hdr_proc_ctx_tbl_lcl;
+ struct ipa_mem_buffer hdr_mem;
+ struct ipa_mem_buffer hdr_proc_ctx_mem;
+ bool ip4_rt_tbl_hash_lcl;
+ bool ip4_rt_tbl_nhash_lcl;
+ bool ip6_rt_tbl_hash_lcl;
+ bool ip6_rt_tbl_nhash_lcl;
+ bool ip4_flt_tbl_hash_lcl;
+ bool ip4_flt_tbl_nhash_lcl;
+ bool ip6_flt_tbl_hash_lcl;
+ bool ip6_flt_tbl_nhash_lcl;
+ struct gen_pool *pipe_mem_pool;
+ struct dma_pool *dma_pool;
+ struct ipa3_active_clients ipa3_active_clients;
+ struct ipa3_active_clients_log_ctx ipa3_active_clients_logging;
+ struct workqueue_struct *power_mgmt_wq;
+ struct workqueue_struct *transport_power_mgmt_wq;
+ bool tag_process_before_gating;
+ struct ipa3_transport_pm transport_pm;
+ u32 clnt_hdl_cmd;
+ u32 clnt_hdl_data_in;
+ u32 clnt_hdl_data_out;
+ spinlock_t disconnect_lock;
+ u8 a5_pipe_index;
+ struct list_head intf_list;
+ struct list_head msg_list;
+ struct list_head pull_msg_list;
+ struct mutex msg_lock;
+ wait_queue_head_t msg_waitq;
+ enum ipa_hw_type ipa_hw_type;
+ enum ipa3_hw_mode ipa3_hw_mode;
+ bool use_ipa_teth_bridge;
+ bool ipa_bam_remote_mode;
+ bool modem_cfg_emb_pipe_flt;
+ bool ipa_wdi2;
+ bool use_64_bit_dma_mask;
+ /* featurize if memory footprint becomes a concern */
+ struct ipa3_stats stats;
+ void *smem_pipe_mem;
+ void *logbuf;
+ void *logbuf_low;
+ u32 ipa_bus_hdl;
+ struct ipa3_controller *ctrl;
+ struct idr ipa_idr;
+ struct device *pdev;
+ struct device *uc_pdev;
+ spinlock_t idr_lock;
+ u32 enable_clock_scaling;
+ u32 curr_ipa_clk_rate;
+ bool q6_proxy_clk_vote_valid;
+ u32 ipa_num_pipes;
+
+ struct ipa3_wlan_comm_memb wc_memb;
+
+ struct ipa3_uc_ctx uc_ctx;
+
+ struct ipa3_uc_wdi_ctx uc_wdi_ctx;
+ struct ipa3_uc_ntn_ctx uc_ntn_ctx;
+ u32 wan_rx_ring_size;
+ u32 lan_rx_ring_size;
+ bool skip_uc_pipe_reset;
+ enum ipa_transport_type transport_prototype;
+ unsigned long gsi_dev_hdl;
+ u32 ee;
+ bool apply_rg10_wa;
+ bool gsi_ch20_wa;
+ bool smmu_present;
+ bool smmu_s1_bypass;
+ unsigned long peer_bam_iova;
+ phys_addr_t peer_bam_pa;
+ u32 peer_bam_map_size;
+ unsigned long peer_bam_dev;
+ u32 peer_bam_map_cnt;
+ u32 wdi_map_cnt;
+ struct wakeup_source w_lock;
+ struct ipa3_wakelock_ref_cnt wakelock_ref_cnt;
+ /* RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA */
+ bool ipa_client_apps_wan_cons_agg_gro;
+ /* M-release support to know client pipes */
+ struct ipa3cm_client_info ipacm_client[IPA3_MAX_NUM_PIPES];
+ bool tethered_flow_control;
+ bool ipa_initialization_complete;
+ struct list_head ipa_ready_cb_list;
+ struct completion init_completion_obj;
+ struct ipa3_smp2p_info smp2p_info;
+};
+
+/**
+ * enum ipa3_pipe_mem_type - IPA pipe memory type
+ * @IPA_SPS_PIPE_MEM: Default, SPS dedicated pipe memory
+ * @IPA_PRIVATE_MEM: IPA's private memory
+ * @IPA_SYSTEM_MEM: System RAM, requires allocation
+ */
+enum ipa3_pipe_mem_type {
+ IPA_SPS_PIPE_MEM = 0,
+ IPA_PRIVATE_MEM = 1,
+ IPA_SYSTEM_MEM = 2,
+};
+
+struct ipa3_plat_drv_res {
+ bool use_ipa_teth_bridge;
+ u32 ipa_mem_base;
+ u32 ipa_mem_size;
+ u32 transport_mem_base;
+ u32 transport_mem_size;
+ u32 ipa_irq;
+ u32 transport_irq;
+ u32 ipa_pipe_mem_start_ofst;
+ u32 ipa_pipe_mem_size;
+ enum ipa_hw_type ipa_hw_type;
+ enum ipa3_hw_mode ipa3_hw_mode;
+ u32 ee;
+ bool ipa_bam_remote_mode;
+ bool modem_cfg_emb_pipe_flt;
+ bool ipa_wdi2;
+ bool use_64_bit_dma_mask;
+ u32 wan_rx_ring_size;
+ u32 lan_rx_ring_size;
+ bool skip_uc_pipe_reset;
+ enum ipa_transport_type transport_prototype;
+ bool apply_rg10_wa;
+ bool gsi_ch20_wa;
+ bool tethered_flow_control;
+};
+
+/**
+ * struct ipa3_mem_partition - represents IPA RAM Map as read from DTS
+ * Order and type of members should not be changed without a suitable change
+ * to DTS file or the code that reads it.
+ *
+ * IPA v3.0 SRAM memory layout:
+ * +-------------------------+
+ * | UC INFO |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | V4 FLT HDR HASHABLE |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | V4 FLT HDR NON-HASHABLE |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | V6 FLT HDR HASHABLE |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | V6 FLT HDR NON-HASHABLE |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | V4 RT HDR HASHABLE |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | V4 RT HDR NON-HASHABLE |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | V6 RT HDR HASHABLE |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | V6 RT HDR NON-HASHABLE |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | MODEM HDR |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | MODEM PROC CTX |
+ * +-------------------------+
+ * | APPS PROC CTX |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | MODEM MEM |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ */
+struct ipa3_mem_partition {
+ u32 ofst_start;
+ u32 nat_ofst;
+ u32 nat_size;
+ u32 v4_flt_hash_ofst;
+ u32 v4_flt_hash_size;
+ u32 v4_flt_hash_size_ddr;
+ u32 v4_flt_nhash_ofst;
+ u32 v4_flt_nhash_size;
+ u32 v4_flt_nhash_size_ddr;
+ u32 v6_flt_hash_ofst;
+ u32 v6_flt_hash_size;
+ u32 v6_flt_hash_size_ddr;
+ u32 v6_flt_nhash_ofst;
+ u32 v6_flt_nhash_size;
+ u32 v6_flt_nhash_size_ddr;
+ u32 v4_rt_num_index;
+ u32 v4_modem_rt_index_lo;
+ u32 v4_modem_rt_index_hi;
+ u32 v4_apps_rt_index_lo;
+ u32 v4_apps_rt_index_hi;
+ u32 v4_rt_hash_ofst;
+ u32 v4_rt_hash_size;
+ u32 v4_rt_hash_size_ddr;
+ u32 v4_rt_nhash_ofst;
+ u32 v4_rt_nhash_size;
+ u32 v4_rt_nhash_size_ddr;
+ u32 v6_rt_num_index;
+ u32 v6_modem_rt_index_lo;
+ u32 v6_modem_rt_index_hi;
+ u32 v6_apps_rt_index_lo;
+ u32 v6_apps_rt_index_hi;
+ u32 v6_rt_hash_ofst;
+ u32 v6_rt_hash_size;
+ u32 v6_rt_hash_size_ddr;
+ u32 v6_rt_nhash_ofst;
+ u32 v6_rt_nhash_size;
+ u32 v6_rt_nhash_size_ddr;
+ u32 modem_hdr_ofst;
+ u32 modem_hdr_size;
+ u32 apps_hdr_ofst;
+ u32 apps_hdr_size;
+ u32 apps_hdr_size_ddr;
+ u32 modem_hdr_proc_ctx_ofst;
+ u32 modem_hdr_proc_ctx_size;
+ u32 apps_hdr_proc_ctx_ofst;
+ u32 apps_hdr_proc_ctx_size;
+ u32 apps_hdr_proc_ctx_size_ddr;
+ u32 modem_comp_decomp_ofst;
+ u32 modem_comp_decomp_size;
+ u32 modem_ofst;
+ u32 modem_size;
+ u32 apps_v4_flt_hash_ofst;
+ u32 apps_v4_flt_hash_size;
+ u32 apps_v4_flt_nhash_ofst;
+ u32 apps_v4_flt_nhash_size;
+ u32 apps_v6_flt_hash_ofst;
+ u32 apps_v6_flt_hash_size;
+ u32 apps_v6_flt_nhash_ofst;
+ u32 apps_v6_flt_nhash_size;
+ u32 uc_info_ofst;
+ u32 uc_info_size;
+ u32 end_ofst;
+ u32 apps_v4_rt_hash_ofst;
+ u32 apps_v4_rt_hash_size;
+ u32 apps_v4_rt_nhash_ofst;
+ u32 apps_v4_rt_nhash_size;
+ u32 apps_v6_rt_hash_ofst;
+ u32 apps_v6_rt_hash_size;
+ u32 apps_v6_rt_nhash_ofst;
+ u32 apps_v6_rt_nhash_size;
+};
+
+struct ipa3_controller {
+ struct ipa3_mem_partition mem_partition;
+ u32 ipa_clk_rate_turbo;
+ u32 ipa_clk_rate_nominal;
+ u32 ipa_clk_rate_svs;
+ u32 clock_scaling_bw_threshold_turbo;
+ u32 clock_scaling_bw_threshold_nominal;
+ u32 ipa_reg_base_ofst;
+ u32 max_holb_tmr_val;
+ void (*ipa_sram_read_settings)(void);
+ int (*ipa_init_sram)(void);
+ int (*ipa_init_hdr)(void);
+ int (*ipa_init_rt4)(void);
+ int (*ipa_init_rt6)(void);
+ int (*ipa_init_flt4)(void);
+ int (*ipa_init_flt6)(void);
+ int (*ipa3_read_ep_reg)(char *buff, int max_len, int pipe);
+ int (*ipa3_commit_flt)(enum ipa_ip_type ip);
+ int (*ipa3_commit_rt)(enum ipa_ip_type ip);
+ int (*ipa3_commit_hdr)(void);
+ void (*ipa3_enable_clks)(void);
+ void (*ipa3_disable_clks)(void);
+ struct msm_bus_scale_pdata *msm_bus_data_ptr;
+};
+
+extern struct ipa3_context *ipa3_ctx;
+
+/* public APIs */
+/*
+ * Connect / Disconnect
+ */
+int ipa3_connect(const struct ipa_connect_params *in,
+ struct ipa_sps_params *sps,
+ u32 *clnt_hdl);
+int ipa3_disconnect(u32 clnt_hdl);
+
+/* Generic GSI channels functions */
+int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
+ struct ipa_req_chan_out_params *out_params);
+
+int ipa3_release_gsi_channel(u32 clnt_hdl);
+
+int ipa3_start_gsi_channel(u32 clnt_hdl);
+
+int ipa3_stop_gsi_channel(u32 clnt_hdl);
+
+int ipa3_reset_gsi_channel(u32 clnt_hdl);
+
+int ipa3_reset_gsi_event_ring(u32 clnt_hdl);
+
+/* Specific xDCI channels functions */
+int ipa3_set_usb_max_packet_size(
+ enum ipa_usb_max_usb_packet_size usb_max_packet_size);
+
+int ipa3_xdci_connect(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid);
+
+int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id);
+
+int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ bool should_force_clear, u32 qmi_req_id, bool is_dpl);
+
+int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl);
+
+/*
+ * Resume / Suspend
+ */
+int ipa3_reset_endpoint(u32 clnt_hdl);
+
+/*
+ * Remove ep delay
+ */
+int ipa3_clear_endpoint_delay(u32 clnt_hdl);
+
+/*
+ * Configuration
+ */
+int ipa3_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
+
+int ipa3_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg);
+
+int ipa3_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg);
+
+int ipa3_cfg_ep_hdr_ext(u32 clnt_hdl,
+ const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg);
+
+int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg);
+
+int ipa3_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg);
+
+int ipa3_cfg_ep_deaggr(u32 clnt_hdl,
+ const struct ipa_ep_cfg_deaggr *ipa_ep_cfg);
+
+int ipa3_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg);
+
+int ipa3_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+int ipa3_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *ipa_ep_cfg);
+
+int ipa3_cfg_ep_metadata_mask(u32 clnt_hdl,
+ const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg);
+
+int ipa3_cfg_ep_holb_by_client(enum ipa_client_type client,
+ const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl);
+
+/*
+ * Header removal / addition
+ */
+int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs);
+
+int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls);
+
+int ipa3_commit_hdr(void);
+
+int ipa3_reset_hdr(void);
+
+int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup);
+
+int ipa3_put_hdr(u32 hdr_hdl);
+
+int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy);
+
+/*
+ * Header Processing Context
+ */
+int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs);
+
+int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls);
+
+/*
+ * Routing
+ */
+int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
+
+int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules);
+
+int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
+
+int ipa3_commit_rt(enum ipa_ip_type ip);
+
+int ipa3_reset_rt(enum ipa_ip_type ip);
+
+int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup);
+
+int ipa3_put_rt_tbl(u32 rt_tbl_hdl);
+
+int ipa3_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in);
+
+int ipa3_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules);
+
+/*
+ * Filtering
+ */
+int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules);
+
+int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules);
+
+int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls);
+
+int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules);
+
+int ipa3_commit_flt(enum ipa_ip_type ip);
+
+int ipa3_reset_flt(enum ipa_ip_type ip);
+
+/*
+ * NAT
+ */
+int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem);
+
+int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init);
+
+int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma);
+
+int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del);
+
+/*
+ * Messaging
+ */
+int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff,
+ ipa_msg_free_fn callback);
+int ipa3_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback);
+int ipa3_deregister_pull_msg(struct ipa_msg_meta *meta);
+
+/*
+ * Interface
+ */
+int ipa3_register_intf(const char *name, const struct ipa_tx_intf *tx,
+ const struct ipa_rx_intf *rx);
+int ipa3_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
+ const struct ipa_rx_intf *rx,
+ const struct ipa_ext_intf *ext);
+int ipa3_deregister_intf(const char *name);
+
+/*
+ * Aggregation
+ */
+int ipa3_set_aggr_mode(enum ipa_aggr_mode mode);
+
+int ipa3_set_qcncm_ndp_sig(char sig[3]);
+
+int ipa3_set_single_ndp_per_mbim(bool enable);
+
+/*
+ * Data path
+ */
+int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+ struct ipa_tx_meta *metadata);
+
+/*
+ * To transfer multiple data packets
+ * While passing the data descriptor list, the anchor node
+ * should be of type struct ipa_tx_data_desc not list_head
+*/
+int ipa3_tx_dp_mul(enum ipa_client_type dst,
+ struct ipa_tx_data_desc *data_desc);
+
+void ipa3_free_skb(struct ipa_rx_data *);
+
+/*
+ * System pipes
+ */
+int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl);
+
+int ipa3_teardown_sys_pipe(u32 clnt_hdl);
+
+int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in,
+ unsigned long *ipa_bam_hdl,
+ u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status);
+
+int ipa3_sys_teardown(u32 clnt_hdl);
+
+int ipa3_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
+ unsigned long gsi_ev_hdl);
+
+int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+ struct ipa_wdi_out_params *out);
+int ipa3_disconnect_wdi_pipe(u32 clnt_hdl);
+int ipa3_enable_wdi_pipe(u32 clnt_hdl);
+int ipa3_disable_wdi_pipe(u32 clnt_hdl);
+int ipa3_resume_wdi_pipe(u32 clnt_hdl);
+int ipa3_suspend_wdi_pipe(u32 clnt_hdl);
+int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
+u16 ipa3_get_smem_restr_bytes(void);
+int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp);
+int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+
+/*
+ * To retrieve doorbell physical address of
+ * wlan pipes
+ */
+int ipa3_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out);
+
+/*
+ * To register uC ready callback if uC not ready
+ * and also check uC readiness
+ * if uC not ready only, register callback
+ */
+int ipa3_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param);
+/*
+ * To de-register uC ready callback
+ */
+int ipa3_uc_dereg_rdyCB(void);
+
+/*
+ * Tethering bridge (Rmnet / MBIM)
+ */
+int ipa3_teth_bridge_init(struct teth_bridge_init_params *params);
+
+int ipa3_teth_bridge_disconnect(enum ipa_client_type client);
+
+int ipa3_teth_bridge_connect(struct teth_bridge_connect_params *connect_params);
+
+/*
+ * Tethering client info
+ */
+void ipa3_set_client(int index, enum ipacm_client_enum client, bool uplink);
+
+enum ipacm_client_enum ipa3_get_client(int pipe_idx);
+
+bool ipa3_get_client_uplink(int pipe_idx);
+
+/*
+ * IPADMA
+ */
+int ipa3_dma_init(void);
+
+int ipa3_dma_enable(void);
+
+int ipa3_dma_disable(void);
+
+int ipa3_dma_sync_memcpy(u64 dest, u64 src, int len);
+
+int ipa3_dma_async_memcpy(u64 dest, u64 src, int len,
+ void (*user_cb)(void *user1), void *user_param);
+
+int ipa3_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
+
+void ipa3_dma_destroy(void);
+
+/*
+ * MHI
+ */
+
+int ipa3_mhi_init_engine(struct ipa_mhi_init_engine *params);
+
+int ipa3_connect_mhi_pipe(
+ struct ipa_mhi_connect_params_internal *in,
+ u32 *clnt_hdl);
+
+int ipa3_disconnect_mhi_pipe(u32 clnt_hdl);
+
+bool ipa3_mhi_stop_gsi_channel(enum ipa_client_type client);
+
+int ipa3_mhi_reset_channel_internal(enum ipa_client_type client);
+
+int ipa3_mhi_start_channel_internal(enum ipa_client_type client);
+
+bool ipa3_has_open_aggr_frame(enum ipa_client_type client);
+
+int ipa3_mhi_resume_channels_internal(enum ipa_client_type client,
+ bool LPTransitionRejected, bool brstmode_enabled,
+ union __packed gsi_channel_scratch ch_scratch, u8 index);
+
+int ipa3_mhi_destroy_channel(enum ipa_client_type client);
+
+/*
+ * mux id
+ */
+int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in);
+
+/*
+ * interrupts
+ */
+int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt,
+ ipa_irq_handler_t handler,
+ bool deferred_flag,
+ void *private_data);
+
+int ipa3_remove_interrupt_handler(enum ipa_irq_type interrupt);
+
+/*
+ * Miscellaneous
+ */
+void ipa3_bam_reg_dump(void);
+
+int ipa3_get_ep_mapping(enum ipa_client_type client);
+
+bool ipa3_is_ready(void);
+
+void ipa3_proxy_clk_vote(void);
+void ipa3_proxy_clk_unvote(void);
+
+bool ipa3_is_client_handle_valid(u32 clnt_hdl);
+
+enum ipa_client_type ipa3_get_client_mapping(int pipe_idx);
+
+void ipa_init_ep_flt_bitmap(void);
+
+bool ipa_is_ep_support_flt(int pipe_idx);
+
+enum ipa_rm_resource_name ipa3_get_rm_resource_from_ep(int pipe_idx);
+
+bool ipa3_get_modem_cfg_emb_pipe_flt(void);
+
+u8 ipa3_get_qmb_master_sel(enum ipa_client_type client);
+
+/* internal functions */
+
+int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
+ struct ipa_api_controller *api_ctrl);
+
+bool ipa_is_modem_pipe(int pipe_idx);
+
+int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc,
+ bool in_atomic);
+int ipa3_send(struct ipa3_sys_context *sys,
+ u32 num_desc,
+ struct ipa3_desc *desc,
+ bool in_atomic);
+int ipa3_get_ep_mapping(enum ipa_client_type client);
+int ipa_get_ep_group(enum ipa_client_type client);
+
+int ipa3_generate_hw_rule(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib,
+ u8 **buf,
+ u16 *en_rule);
+int ipa3_init_hw(void);
+struct ipa3_rt_tbl *__ipa3_find_rt_tbl(enum ipa_ip_type ip, const char *name);
+int ipa3_set_single_ndp_per_mbim(bool);
+void ipa3_debugfs_init(void);
+void ipa3_debugfs_remove(void);
+
+void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size);
+#ifdef IPA_DEBUG
+#define IPA_DUMP_BUFF(base, phy_base, size) \
+ ipa3_dump_buff_internal(base, phy_base, size)
+#else
+#define IPA_DUMP_BUFF(base, phy_base, size)
+#endif
+int ipa3_init_mem_partition(struct device_node *dev_node);
+int ipa3_controller_static_bind(struct ipa3_controller *controller,
+ enum ipa_hw_type ipa_hw_type);
+int ipa3_cfg_route(struct ipahal_reg_route *route);
+int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr);
+int ipa3_cfg_filter(u32 disable);
+int ipa3_pipe_mem_init(u32 start_ofst, u32 size);
+int ipa3_pipe_mem_alloc(u32 *ofst, u32 size);
+int ipa3_pipe_mem_free(u32 ofst, u32 size);
+int ipa3_straddle_boundary(u32 start, u32 end, u32 boundary);
+struct ipa3_context *ipa3_get_ctx(void);
+void ipa3_enable_clks(void);
+void ipa3_disable_clks(void);
+void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id);
+int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
+ *id);
+void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id);
+void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
+ bool int_ctx);
+void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id,
+ bool int_ctx);
+int ipa3_active_clients_log_print_buffer(char *buf, int size);
+int ipa3_active_clients_log_print_table(char *buf, int size);
+void ipa3_active_clients_log_clear(void);
+int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev);
+int __ipa3_del_rt_rule(u32 rule_hdl);
+int __ipa3_del_hdr(u32 hdr_hdl);
+int __ipa3_release_hdr(u32 hdr_hdl);
+int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl);
+int _ipa_read_ep_reg_v3_0(char *buf, int max_len, int pipe);
+void _ipa_enable_clks_v3_0(void);
+void _ipa_disable_clks_v3_0(void);
+struct device *ipa3_get_dma_dev(void);
+void ipa3_suspend_active_aggr_wa(u32 clnt_hdl);
+void ipa3_suspend_handler(enum ipa_irq_type interrupt,
+ void *private_data,
+ void *interrupt_data);
+
+
+int ipa_bridge_init(void);
+void ipa_bridge_cleanup(void);
+
+ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos);
+int ipa3_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count);
+int ipa3_query_intf(struct ipa_ioc_query_intf *lookup);
+int ipa3_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx);
+int ipa3_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx);
+int ipa3_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext);
+
+void wwan_cleanup(void);
+
+int ipa3_teth_bridge_driver_init(void);
+void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data);
+
+int _ipa_init_sram_v3_0(void);
+int _ipa_init_hdr_v3_0(void);
+int _ipa_init_rt4_v3(void);
+int _ipa_init_rt6_v3(void);
+int _ipa_init_flt4_v3(void);
+int _ipa_init_flt6_v3(void);
+
+int __ipa_commit_flt_v3(enum ipa_ip_type ip);
+int __ipa_commit_rt_v3(enum ipa_ip_type ip);
+
+int __ipa_commit_hdr_v3_0(void);
+void ipa3_skb_recycle(struct sk_buff *skb);
+void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx);
+void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx);
+
+int ipa3_enable_data_path(u32 clnt_hdl);
+int ipa3_disable_data_path(u32 clnt_hdl);
+int ipa3_alloc_rule_id(struct idr *rule_ids);
+int ipa3_id_alloc(void *ptr);
+void *ipa3_id_find(u32 id);
+void ipa3_id_remove(u32 id);
+
+int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+ u32 bandwidth_mbps);
+
+int ipa3_cfg_ep_status(u32 clnt_hdl,
+ const struct ipahal_reg_ep_cfg_status *ipa_ep_cfg);
+
+int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name name);
+int ipa3_suspend_resource_sync(enum ipa_rm_resource_name name);
+int ipa3_resume_resource(enum ipa_rm_resource_name name);
+bool ipa3_should_pipe_be_suspended(enum ipa_client_type client);
+int ipa3_tag_aggr_force_close(int pipe_num);
+
+void ipa3_active_clients_lock(void);
+int ipa3_active_clients_trylock(unsigned long *flags);
+void ipa3_active_clients_unlock(void);
+void ipa3_active_clients_trylock_unlock(unsigned long *flags);
+int ipa3_wdi_init(void);
+int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id);
+int ipa3_tag_process(struct ipa3_desc *desc, int num_descs,
+ unsigned long timeout);
+
+void ipa3_q6_pre_shutdown_cleanup(void);
+void ipa3_q6_post_shutdown_cleanup(void);
+int ipa3_init_q6_smem(void);
+
+int ipa3_sps_connect_safe(struct sps_pipe *h, struct sps_connect *connect,
+ enum ipa_client_type ipa_client);
+
+int ipa3_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req);
+int ipa3_mhi_query_ch_info(enum ipa_client_type client,
+ struct gsi_chan_info *ch_info);
+
+int ipa3_uc_interface_init(void);
+int ipa3_uc_reset_pipe(enum ipa_client_type ipa_client);
+int ipa3_uc_is_gsi_channel_empty(enum ipa_client_type ipa_client);
+int ipa3_uc_state_check(void);
+int ipa3_uc_loaded_check(void);
+void ipa3_uc_load_notify(void);
+int ipa3_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
+ bool polling_mode, unsigned long timeout_jiffies);
+void ipa3_uc_register_handlers(enum ipa3_hw_features feature,
+ struct ipa3_uc_hdlrs *hdlrs);
+int ipa3_create_nat_device(void);
+int ipa3_uc_notify_clk_state(bool enabled);
+void ipa3_dma_async_memcpy_notify_cb(void *priv,
+ enum ipa_dp_evt_type evt, unsigned long data);
+
+int ipa3_uc_update_hw_flags(u32 flags);
+
+int ipa3_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void));
+void ipa3_uc_mhi_cleanup(void);
+int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd);
+int ipa3_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
+ u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx,
+ u32 first_evt_idx);
+int ipa3_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle,
+ int contexArrayIndex, int channelDirection);
+int ipa3_uc_mhi_reset_channel(int channelHandle);
+int ipa3_uc_mhi_suspend_channel(int channelHandle);
+int ipa3_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected);
+int ipa3_uc_mhi_stop_event_update_channel(int channelHandle);
+int ipa3_uc_mhi_print_stats(char *dbg_buff, int size);
+int ipa3_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
+void ipa3_tag_destroy_imm(void *user1, int user2);
+struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info(int ipa_ep_idx);
+void ipa3_uc_rg10_write_reg(enum ipahal_reg_name reg, u32 n, u32 val);
+
+u32 ipa3_get_num_pipes(void);
+struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(void);
+struct ipa_smmu_cb_ctx *ipa3_get_wlan_smmu_ctx(void);
+struct ipa_smmu_cb_ctx *ipa3_get_uc_smmu_ctx(void);
+struct iommu_domain *ipa3_get_smmu_domain(void);
+struct iommu_domain *ipa3_get_uc_smmu_domain(void);
+struct iommu_domain *ipa3_get_wlan_smmu_domain(void);
+int ipa3_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot);
+int ipa3_ap_suspend(struct device *dev);
+int ipa3_ap_resume(struct device *dev);
+int ipa3_init_interrupts(void);
+struct iommu_domain *ipa3_get_smmu_domain(void);
+int ipa3_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
+int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
+int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple);
+int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple);
+void ipa3_set_resorce_groups_min_max_limits(void);
+void ipa3_suspend_apps_pipes(bool suspend);
+void ipa3_flow_control(enum ipa_client_type ipa_client, bool enable,
+ uint32_t qmap_id);
+int ipa3_flt_read_tbl_from_hw(u32 pipe_idx,
+ enum ipa_ip_type ip_type,
+ bool hashable,
+ struct ipahal_flt_rule_entry entry[],
+ int *num_entry);
+int ipa3_rt_read_tbl_from_hw(u32 tbl_idx,
+ enum ipa_ip_type ip_type,
+ bool hashable,
+ struct ipahal_rt_rule_entry entry[],
+ int *num_entry);
+int ipa3_restore_suspend_handler(void);
+int ipa3_inject_dma_task_for_gsi(void);
+int ipa3_uc_panic_notifier(struct notifier_block *this,
+ unsigned long event, void *ptr);
+void ipa3_inc_acquire_wakelock(void);
+void ipa3_dec_release_wakelock(void);
+int ipa3_load_fws(const struct firmware *firmware);
+int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data);
+const char *ipa_hw_error_str(enum ipa3_hw_errors err_type);
+int ipa_gsi_ch20_wa(void);
+int ipa3_rx_poll(u32 clnt_hdl, int budget);
+void ipa3_recycle_wan_skb(struct sk_buff *skb);
+int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map);
+int ipa3_smmu_map_peer_buff(u64 iova, phys_addr_t phys_addr,
+ u32 size, bool map);
+int ipa3_ntn_init(void);
+int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats);
+struct dentry *ipa_debugfs_get_root(void);
+bool ipa3_is_msm_device(void);
+#endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
new file mode 100644
index 0000000..75711c0
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
@@ -0,0 +1,567 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/interrupt.h>
+#include "ipa_i.h"
+
+#define INTERRUPT_WORKQUEUE_NAME "ipa_interrupt_wq"
+#define DIS_SUSPEND_INTERRUPT_TIMEOUT 5
+#define IPA_IRQ_NUM_MAX 32
+
+struct ipa3_interrupt_info {
+ ipa_irq_handler_t handler;
+ enum ipa_irq_type interrupt;
+ void *private_data;
+ bool deferred_flag;
+};
+
+struct ipa3_interrupt_work_wrap {
+ struct work_struct interrupt_work;
+ ipa_irq_handler_t handler;
+ enum ipa_irq_type interrupt;
+ void *private_data;
+ void *interrupt_data;
+};
+
+static struct ipa3_interrupt_info ipa_interrupt_to_cb[IPA_IRQ_NUM_MAX];
+static struct workqueue_struct *ipa_interrupt_wq;
+static u32 ipa_ee;
+
+static void ipa3_tx_suspend_interrupt_wa(void);
+static void ipa3_enable_tx_suspend_wa(struct work_struct *work);
+static DECLARE_DELAYED_WORK(dwork_en_suspend_int,
+ ipa3_enable_tx_suspend_wa);
+static spinlock_t suspend_wa_lock;
+static void ipa3_process_interrupts(bool isr_context);
+
+static int ipa3_irq_mapping[IPA_IRQ_MAX] = {
+ [IPA_UC_TX_CMD_Q_NOT_FULL_IRQ] = -1,
+ [IPA_UC_TO_PROC_ACK_Q_NOT_FULL_IRQ] = -1,
+ [IPA_BAD_SNOC_ACCESS_IRQ] = 0,
+ [IPA_EOT_COAL_IRQ] = -1,
+ [IPA_UC_IRQ_0] = 2,
+ [IPA_UC_IRQ_1] = 3,
+ [IPA_UC_IRQ_2] = 4,
+ [IPA_UC_IRQ_3] = 5,
+ [IPA_UC_IN_Q_NOT_EMPTY_IRQ] = 6,
+ [IPA_UC_RX_CMD_Q_NOT_FULL_IRQ] = 7,
+ [IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ] = 8,
+ [IPA_RX_ERR_IRQ] = 9,
+ [IPA_DEAGGR_ERR_IRQ] = 10,
+ [IPA_TX_ERR_IRQ] = 11,
+ [IPA_STEP_MODE_IRQ] = 12,
+ [IPA_PROC_ERR_IRQ] = 13,
+ [IPA_TX_SUSPEND_IRQ] = 14,
+ [IPA_TX_HOLB_DROP_IRQ] = 15,
+ [IPA_BAM_GSI_IDLE_IRQ] = 16,
+};
+
+static void ipa3_interrupt_defer(struct work_struct *work);
+static DECLARE_WORK(ipa3_interrupt_defer_work, ipa3_interrupt_defer);
+
+static void ipa3_deferred_interrupt_work(struct work_struct *work)
+{
+ struct ipa3_interrupt_work_wrap *work_data =
+ container_of(work,
+ struct ipa3_interrupt_work_wrap,
+ interrupt_work);
+ IPADBG("call handler from workq...\n");
+ work_data->handler(work_data->interrupt, work_data->private_data,
+ work_data->interrupt_data);
+ kfree(work_data->interrupt_data);
+ kfree(work_data);
+}
+
+static bool ipa3_is_valid_ep(u32 ep_suspend_data)
+{
+ u32 bmsk = 1;
+ u32 i = 0;
+
+ for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+ if ((ep_suspend_data & bmsk) && (ipa3_ctx->ep[i].valid))
+ return true;
+ bmsk = bmsk << 1;
+ }
+ return false;
+}
+
+static int ipa3_handle_interrupt(int irq_num, bool isr_context)
+{
+ struct ipa3_interrupt_info interrupt_info;
+ struct ipa3_interrupt_work_wrap *work_data;
+ u32 suspend_data;
+ void *interrupt_data = NULL;
+ struct ipa_tx_suspend_irq_data *suspend_interrupt_data = NULL;
+ int res;
+
+ interrupt_info = ipa_interrupt_to_cb[irq_num];
+ if (interrupt_info.handler == NULL) {
+ IPAERR("A callback function wasn't set for interrupt num %d\n",
+ irq_num);
+ return -EINVAL;
+ }
+
+ switch (interrupt_info.interrupt) {
+ case IPA_TX_SUSPEND_IRQ:
+ IPADBG_LOW("processing TX_SUSPEND interrupt work-around\n");
+ ipa3_tx_suspend_interrupt_wa();
+ suspend_data = ipahal_read_reg_n(IPA_IRQ_SUSPEND_INFO_EE_n,
+ ipa_ee);
+ IPADBG_LOW("get interrupt %d\n", suspend_data);
+
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) {
+ /* Clearing L2 interrupts status */
+ ipahal_write_reg_n(IPA_SUSPEND_IRQ_CLR_EE_n,
+ ipa_ee, suspend_data);
+ }
+ if (!ipa3_is_valid_ep(suspend_data))
+ return 0;
+
+ suspend_interrupt_data =
+ kzalloc(sizeof(*suspend_interrupt_data), GFP_ATOMIC);
+ if (!suspend_interrupt_data) {
+ IPAERR("failed allocating suspend_interrupt_data\n");
+ return -ENOMEM;
+ }
+ suspend_interrupt_data->endpoints = suspend_data;
+ interrupt_data = suspend_interrupt_data;
+ break;
+ case IPA_UC_IRQ_0:
+ if (ipa3_ctx->apply_rg10_wa) {
+ /*
+ * Early detect of uC crash. If RG10 workaround is
+ * enable uC crash will not be detected as before
+ * processing uC event the interrupt is cleared using
+ * uC register write which times out as it crashed
+ * already.
+ */
+ if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+ IPA_HW_2_CPU_EVENT_ERROR)
+ ipa3_ctx->uc_ctx.uc_failed = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* Force defer processing if in ISR context. */
+ if (interrupt_info.deferred_flag || isr_context) {
+ work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap),
+ GFP_ATOMIC);
+ if (!work_data) {
+ IPAERR("failed allocating ipa3_interrupt_work_wrap\n");
+ res = -ENOMEM;
+ goto fail_alloc_work;
+ }
+ INIT_WORK(&work_data->interrupt_work,
+ ipa3_deferred_interrupt_work);
+ work_data->handler = interrupt_info.handler;
+ work_data->interrupt = interrupt_info.interrupt;
+ work_data->private_data = interrupt_info.private_data;
+ work_data->interrupt_data = interrupt_data;
+ queue_work(ipa_interrupt_wq, &work_data->interrupt_work);
+
+ } else {
+ interrupt_info.handler(interrupt_info.interrupt,
+ interrupt_info.private_data,
+ interrupt_data);
+ kfree(interrupt_data);
+ }
+
+ return 0;
+
+fail_alloc_work:
+ kfree(interrupt_data);
+ return res;
+}
+
+static void ipa3_enable_tx_suspend_wa(struct work_struct *work)
+{
+ u32 en;
+ u32 suspend_bmask;
+ int irq_num;
+
+ IPADBG_LOW("Enter\n");
+
+ irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
+ BUG_ON(irq_num == -1);
+
+ /* make sure ipa hw is clocked on*/
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+ suspend_bmask = 1 << irq_num;
+ /*enable TX_SUSPEND_IRQ*/
+ en |= suspend_bmask;
+ IPADBG("enable TX_SUSPEND_IRQ, IPA_IRQ_EN_EE reg, write val = %u\n"
+ , en);
+ ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, en);
+ ipa3_process_interrupts(false);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ IPADBG_LOW("Exit\n");
+}
+
+static void ipa3_tx_suspend_interrupt_wa(void)
+{
+ u32 val;
+ u32 suspend_bmask;
+ int irq_num;
+
+ IPADBG_LOW("Enter\n");
+ irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
+ BUG_ON(irq_num == -1);
+
+ /*disable TX_SUSPEND_IRQ*/
+ val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+ suspend_bmask = 1 << irq_num;
+ val &= ~suspend_bmask;
+ IPADBG("Disabling TX_SUSPEND_IRQ, write val: %u to IPA_IRQ_EN_EE reg\n",
+ val);
+ ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val);
+
+ IPADBG_LOW(" processing suspend interrupt work-around, delayed work\n");
+ queue_delayed_work(ipa_interrupt_wq, &dwork_en_suspend_int,
+ msecs_to_jiffies(DIS_SUSPEND_INTERRUPT_TIMEOUT));
+
+ IPADBG_LOW("Exit\n");
+}
+
+static inline bool is_uc_irq(int irq_num)
+{
+ if (ipa_interrupt_to_cb[irq_num].interrupt >= IPA_UC_IRQ_0 &&
+ ipa_interrupt_to_cb[irq_num].interrupt <= IPA_UC_IRQ_3)
+ return true;
+ else
+ return false;
+}
+
+static void ipa3_process_interrupts(bool isr_context)
+{
+ u32 reg;
+ u32 bmsk;
+ u32 i = 0;
+ u32 en;
+ unsigned long flags;
+ bool uc_irq;
+
+ IPADBG_LOW("Enter\n");
+
+ spin_lock_irqsave(&suspend_wa_lock, flags);
+ en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+ reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee);
+ while (en & reg) {
+ bmsk = 1;
+ for (i = 0; i < IPA_IRQ_NUM_MAX; i++) {
+ if (en & reg & bmsk) {
+ uc_irq = is_uc_irq(i);
+
+ /*
+ * Clear uC interrupt before processing to avoid
+ * clearing unhandled interrupts
+ */
+ if (uc_irq)
+ ipa3_uc_rg10_write_reg(IPA_IRQ_CLR_EE_n,
+ ipa_ee, bmsk);
+
+ /*
+ * handle the interrupt with spin_lock
+ * unlocked to avoid calling client in atomic
+ * context. mutual exclusion still preserved
+ * as the read/clr is done with spin_lock
+ * locked.
+ */
+ spin_unlock_irqrestore(&suspend_wa_lock, flags);
+ ipa3_handle_interrupt(i, isr_context);
+ spin_lock_irqsave(&suspend_wa_lock, flags);
+
+ /*
+ * Clear non uC interrupt after processing
+ * to avoid clearing interrupt data
+ */
+ if (!uc_irq)
+ ipa3_uc_rg10_write_reg(IPA_IRQ_CLR_EE_n,
+ ipa_ee, bmsk);
+ }
+ bmsk = bmsk << 1;
+ }
+ /*
+ * In case uC failed interrupt cannot be cleared.
+ * Device will crash as part of handling uC event handler.
+ */
+ if (ipa3_ctx->apply_rg10_wa && ipa3_ctx->uc_ctx.uc_failed)
+ break;
+
+ reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee);
+ /* since the suspend interrupt HW bug we must
+ * read again the EN register, otherwise the while is endless
+ */
+ en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+ }
+
+ spin_unlock_irqrestore(&suspend_wa_lock, flags);
+ IPADBG_LOW("Exit\n");
+}
+
+static void ipa3_interrupt_defer(struct work_struct *work)
+{
+ IPADBG("processing interrupts in wq\n");
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ ipa3_process_interrupts(false);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPADBG("Done\n");
+}
+
+static irqreturn_t ipa3_isr(int irq, void *ctxt)
+{
+ unsigned long flags;
+
+ IPADBG_LOW("Enter\n");
+ /* defer interrupt handling in case IPA is not clocked on */
+ if (ipa3_active_clients_trylock(&flags) == 0) {
+ IPADBG("defer interrupt processing\n");
+ queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_interrupt_defer_work);
+ return IRQ_HANDLED;
+ }
+
+ if (ipa3_ctx->ipa3_active_clients.cnt == 0) {
+ IPADBG("defer interrupt processing\n");
+ queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_interrupt_defer_work);
+ goto bail;
+ }
+
+ ipa3_process_interrupts(true);
+ IPADBG_LOW("Exit\n");
+
+bail:
+ ipa3_active_clients_trylock_unlock(&flags);
+ return IRQ_HANDLED;
+}
+/**
+* ipa3_add_interrupt_handler() - Adds handler to an interrupt type
+* @interrupt: Interrupt type
+* @handler: The handler to be added
+* @deferred_flag: whether the handler processing should be deferred in
+* a workqueue
+* @private_data: the client's private data
+*
+* Adds handler to an interrupt type and enable the specific bit
+* in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled
+*/
+int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt,
+ ipa_irq_handler_t handler,
+ bool deferred_flag,
+ void *private_data)
+{
+ u32 val;
+ u32 bmsk;
+ int irq_num;
+ int client_idx, ep_idx;
+
+ IPADBG("in ipa3_add_interrupt_handler interrupt_enum(%d)\n", interrupt);
+ if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ ||
+ interrupt >= IPA_IRQ_MAX) {
+ IPAERR("invalid interrupt number %d\n", interrupt);
+ return -EINVAL;
+ }
+
+ irq_num = ipa3_irq_mapping[interrupt];
+ if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) {
+ IPAERR("interrupt %d not supported\n", interrupt);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+ IPADBG("ipa_interrupt_to_cb irq_num(%d)\n", irq_num);
+
+ ipa_interrupt_to_cb[irq_num].deferred_flag = deferred_flag;
+ ipa_interrupt_to_cb[irq_num].handler = handler;
+ ipa_interrupt_to_cb[irq_num].private_data = private_data;
+ ipa_interrupt_to_cb[irq_num].interrupt = interrupt;
+
+ val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+ IPADBG("read IPA_IRQ_EN_EE_n register. reg = %d\n", val);
+ bmsk = 1 << irq_num;
+ val |= bmsk;
+ ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val);
+ IPADBG("wrote IPA_IRQ_EN_EE_n register. reg = %d\n", val);
+
+ /* register SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt*/
+ if ((interrupt == IPA_TX_SUSPEND_IRQ) &&
+ (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1)) {
+ val = ~0;
+ for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
+ if (IPA_CLIENT_IS_Q6_CONS(client_idx) ||
+ IPA_CLIENT_IS_Q6_PROD(client_idx)) {
+ ep_idx = ipa3_get_ep_mapping(client_idx);
+ IPADBG("modem ep_idx(%d) client_idx = %d\n",
+ ep_idx, client_idx);
+ if (ep_idx == -1)
+ IPADBG("Invalid IPA client\n");
+ else
+ val &= ~(1 << ep_idx);
+ }
+
+ ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, val);
+ IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", val);
+ }
+ return 0;
+}
+
+/**
+* ipa3_remove_interrupt_handler() - Removes handler to an interrupt type
+* @interrupt: Interrupt type
+*
+* Removes the handler and disable the specific bit in IRQ_EN register
+*/
+int ipa3_remove_interrupt_handler(enum ipa_irq_type interrupt)
+{
+ u32 val;
+ u32 bmsk;
+ int irq_num;
+
+ if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ ||
+ interrupt >= IPA_IRQ_MAX) {
+ IPAERR("invalid interrupt number %d\n", interrupt);
+ return -EINVAL;
+ }
+
+ irq_num = ipa3_irq_mapping[interrupt];
+ if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) {
+ IPAERR("interrupt %d not supported\n", interrupt);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ kfree(ipa_interrupt_to_cb[irq_num].private_data);
+ ipa_interrupt_to_cb[irq_num].deferred_flag = false;
+ ipa_interrupt_to_cb[irq_num].handler = NULL;
+ ipa_interrupt_to_cb[irq_num].private_data = NULL;
+ ipa_interrupt_to_cb[irq_num].interrupt = -1;
+
+ /* clean SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt */
+ if ((interrupt == IPA_TX_SUSPEND_IRQ) &&
+ (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1)) {
+ ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, 0);
+ IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", 0);
+ }
+
+ val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+ bmsk = 1 << irq_num;
+ val &= ~bmsk;
+ ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val);
+
+ return 0;
+}
+
+/**
+* ipa3_interrupts_init() - Initialize the IPA interrupts framework
+* @ipa_irq: The interrupt number to allocate
+* @ee: Execution environment
+* @ipa_dev: The basic device structure representing the IPA driver
+*
+* - Initialize the ipa_interrupt_to_cb array
+* - Clear interrupts status
+* - Register the ipa interrupt handler - ipa3_isr
+* - Enable apps processor wakeup by IPA interrupts
+*/
+int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev)
+{
+ int idx;
+ int res = 0;
+
+ ipa_ee = ee;
+ for (idx = 0; idx < IPA_IRQ_NUM_MAX; idx++) {
+ ipa_interrupt_to_cb[idx].deferred_flag = false;
+ ipa_interrupt_to_cb[idx].handler = NULL;
+ ipa_interrupt_to_cb[idx].private_data = NULL;
+ ipa_interrupt_to_cb[idx].interrupt = -1;
+ }
+
+ ipa_interrupt_wq = create_singlethread_workqueue(
+ INTERRUPT_WORKQUEUE_NAME);
+ if (!ipa_interrupt_wq) {
+ IPAERR("workqueue creation failed\n");
+ return -ENOMEM;
+ }
+
+ res = request_irq(ipa_irq, (irq_handler_t) ipa3_isr,
+ IRQF_TRIGGER_RISING, "ipa", ipa_dev);
+ if (res) {
+ IPAERR("fail to register IPA IRQ handler irq=%d\n", ipa_irq);
+ return -ENODEV;
+ }
+ IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq);
+
+ res = enable_irq_wake(ipa_irq);
+ if (res)
+ IPAERR("fail to enable IPA IRQ wakeup irq=%d res=%d\n",
+ ipa_irq, res);
+ else
+ IPADBG("IPA IRQ wakeup enabled irq=%d\n", ipa_irq);
+
+ spin_lock_init(&suspend_wa_lock);
+ return 0;
+}
+
+/**
+* ipa3_suspend_active_aggr_wa() - Emulate suspend IRQ
+* @clnt_hndl: suspended client handle, IRQ is emulated for this pipe
+*
+* Emulate suspend IRQ to unsuspend client which was suspended with an open
+* aggregation frame in order to bypass HW bug of IRQ not generated when
+* endpoint is suspended during an open aggregation.
+*/
+void ipa3_suspend_active_aggr_wa(u32 clnt_hdl)
+{
+ struct ipa3_interrupt_info interrupt_info;
+ struct ipa3_interrupt_work_wrap *work_data;
+ struct ipa_tx_suspend_irq_data *suspend_interrupt_data;
+ int irq_num;
+ int aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+
+ if (aggr_active_bitmap & (1 << clnt_hdl)) {
+ /* force close aggregation */
+ ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << clnt_hdl));
+
+ /* simulate suspend IRQ */
+ irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
+ interrupt_info = ipa_interrupt_to_cb[irq_num];
+ if (interrupt_info.handler == NULL) {
+ IPAERR("no CB function for IPA_TX_SUSPEND_IRQ!\n");
+ return;
+ }
+ suspend_interrupt_data = kzalloc(
+ sizeof(*suspend_interrupt_data),
+ GFP_ATOMIC);
+ if (!suspend_interrupt_data) {
+ IPAERR("failed allocating suspend_interrupt_data\n");
+ return;
+ }
+ suspend_interrupt_data->endpoints = 1 << clnt_hdl;
+
+ work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap),
+ GFP_ATOMIC);
+ if (!work_data) {
+ IPAERR("failed allocating ipa3_interrupt_work_wrap\n");
+ goto fail_alloc_work;
+ }
+ INIT_WORK(&work_data->interrupt_work,
+ ipa3_deferred_interrupt_work);
+ work_data->handler = interrupt_info.handler;
+ work_data->interrupt = IPA_TX_SUSPEND_IRQ;
+ work_data->private_data = interrupt_info.private_data;
+ work_data->interrupt_data = (void *)suspend_interrupt_data;
+ queue_work(ipa_interrupt_wq, &work_data->interrupt_work);
+ return;
+fail_alloc_work:
+ kfree(suspend_interrupt_data);
+ }
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
new file mode 100644
index 0000000..32c5004
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
@@ -0,0 +1,615 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include "ipa_i.h"
+
+struct ipa3_intf {
+ char name[IPA_RESOURCE_NAME_MAX];
+ struct list_head link;
+ u32 num_tx_props;
+ u32 num_rx_props;
+ u32 num_ext_props;
+ struct ipa_ioc_tx_intf_prop *tx;
+ struct ipa_ioc_rx_intf_prop *rx;
+ struct ipa_ioc_ext_intf_prop *ext;
+ enum ipa_client_type excp_pipe;
+};
+
+struct ipa3_push_msg {
+ struct ipa_msg_meta meta;
+ ipa_msg_free_fn callback;
+ void *buff;
+ struct list_head link;
+};
+
+struct ipa3_pull_msg {
+ struct ipa_msg_meta meta;
+ ipa_msg_pull_fn callback;
+ struct list_head link;
+};
+
+/**
+ * ipa3_register_intf() - register "logical" interface
+ * @name: [in] interface name
+ * @tx: [in] TX properties of the interface
+ * @rx: [in] RX properties of the interface
+ *
+ * Register an interface and its tx and rx properties, this allows
+ * configuration of rules from user-space
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_register_intf(const char *name, const struct ipa_tx_intf *tx,
+ const struct ipa_rx_intf *rx)
+{
+ return ipa3_register_intf_ext(name, tx, rx, NULL);
+}
+
+/**
+ * ipa3_register_intf_ext() - register "logical" interface which has only
+ * extended properties
+ * @name: [in] interface name
+ * @tx: [in] TX properties of the interface
+ * @rx: [in] RX properties of the interface
+ * @ext: [in] EXT properties of the interface
+ *
+ * Register an interface and its tx, rx and ext properties, this allows
+ * configuration of rules from user-space
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
+ const struct ipa_rx_intf *rx,
+ const struct ipa_ext_intf *ext)
+{
+ struct ipa3_intf *intf;
+ u32 len;
+
+ if (name == NULL || (tx == NULL && rx == NULL && ext == NULL)) {
+ IPAERR("invalid params name=%p tx=%p rx=%p ext=%p\n", name,
+ tx, rx, ext);
+ return -EINVAL;
+ }
+
+ if (tx && tx->num_props > IPA_NUM_PROPS_MAX) {
+ IPAERR("invalid tx num_props=%d max=%d\n", tx->num_props,
+ IPA_NUM_PROPS_MAX);
+ return -EINVAL;
+ }
+
+ if (rx && rx->num_props > IPA_NUM_PROPS_MAX) {
+ IPAERR("invalid rx num_props=%d max=%d\n", rx->num_props,
+ IPA_NUM_PROPS_MAX);
+ return -EINVAL;
+ }
+
+ if (ext && ext->num_props > IPA_NUM_PROPS_MAX) {
+ IPAERR("invalid ext num_props=%d max=%d\n", ext->num_props,
+ IPA_NUM_PROPS_MAX);
+ return -EINVAL;
+ }
+
+ len = sizeof(struct ipa3_intf);
+ intf = kzalloc(len, GFP_KERNEL);
+ if (intf == NULL) {
+ IPAERR("fail to alloc 0x%x bytes\n", len);
+ return -ENOMEM;
+ }
+
+ strlcpy(intf->name, name, IPA_RESOURCE_NAME_MAX);
+
+ if (tx) {
+ intf->num_tx_props = tx->num_props;
+ len = tx->num_props * sizeof(struct ipa_ioc_tx_intf_prop);
+ intf->tx = kzalloc(len, GFP_KERNEL);
+ if (intf->tx == NULL) {
+ IPAERR("fail to alloc 0x%x bytes\n", len);
+ kfree(intf);
+ return -ENOMEM;
+ }
+ memcpy(intf->tx, tx->prop, len);
+ }
+
+ if (rx) {
+ intf->num_rx_props = rx->num_props;
+ len = rx->num_props * sizeof(struct ipa_ioc_rx_intf_prop);
+ intf->rx = kzalloc(len, GFP_KERNEL);
+ if (intf->rx == NULL) {
+ IPAERR("fail to alloc 0x%x bytes\n", len);
+ kfree(intf->tx);
+ kfree(intf);
+ return -ENOMEM;
+ }
+ memcpy(intf->rx, rx->prop, len);
+ }
+
+ if (ext) {
+ intf->num_ext_props = ext->num_props;
+ len = ext->num_props * sizeof(struct ipa_ioc_ext_intf_prop);
+ intf->ext = kzalloc(len, GFP_KERNEL);
+ if (intf->ext == NULL) {
+ IPAERR("fail to alloc 0x%x bytes\n", len);
+ kfree(intf->rx);
+ kfree(intf->tx);
+ kfree(intf);
+ return -ENOMEM;
+ }
+ memcpy(intf->ext, ext->prop, len);
+ }
+
+ if (ext && ext->excp_pipe_valid)
+ intf->excp_pipe = ext->excp_pipe;
+ else
+ intf->excp_pipe = IPA_CLIENT_APPS_LAN_CONS;
+
+ mutex_lock(&ipa3_ctx->lock);
+ list_add_tail(&intf->link, &ipa3_ctx->intf_list);
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return 0;
+}
+
+/**
+ * ipa3_deregister_intf() - de-register previously registered logical interface
+ * @name: [in] interface name
+ *
+ * De-register a previously registered interface
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_deregister_intf(const char *name)
+{
+ struct ipa3_intf *entry;
+ struct ipa3_intf *next;
+ int result = -EINVAL;
+
+ if ((name == NULL) ||
+ (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX)) {
+ IPAERR("invalid param name=%s\n", name);
+ return result;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+ list_for_each_entry_safe(entry, next, &ipa3_ctx->intf_list, link) {
+ if (!strcmp(entry->name, name)) {
+ list_del(&entry->link);
+ kfree(entry->ext);
+ kfree(entry->rx);
+ kfree(entry->tx);
+ kfree(entry);
+ result = 0;
+ break;
+ }
+ }
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return result;
+}
+
+/**
+ * ipa3_query_intf() - query logical interface properties
+ * @lookup: [inout] interface name and number of properties
+ *
+ * Obtain the handle and number of tx and rx properties for the named
+ * interface, used as part of querying the tx and rx properties for
+ * configuration of various rules from user-space
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_query_intf(struct ipa_ioc_query_intf *lookup)
+{
+ struct ipa3_intf *entry;
+ int result = -EINVAL;
+
+ if (lookup == NULL) {
+ IPAERR("invalid param lookup=%p\n", lookup);
+ return result;
+ }
+
+ if (strnlen(lookup->name, IPA_RESOURCE_NAME_MAX) ==
+ IPA_RESOURCE_NAME_MAX) {
+ IPAERR("Interface name too long. (%s)\n", lookup->name);
+ return result;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+ list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
+ if (!strcmp(entry->name, lookup->name)) {
+ lookup->num_tx_props = entry->num_tx_props;
+ lookup->num_rx_props = entry->num_rx_props;
+ lookup->num_ext_props = entry->num_ext_props;
+ lookup->excp_pipe = entry->excp_pipe;
+ result = 0;
+ break;
+ }
+ }
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return result;
+}
+
+/**
+ * ipa3_query_intf_tx_props() - qeury TX props of an interface
+ * @tx: [inout] interface tx attributes
+ *
+ * Obtain the tx properties for the specified interface
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx)
+{
+ struct ipa3_intf *entry;
+ int result = -EINVAL;
+
+ if (tx == NULL) {
+ IPAERR("invalid param tx=%p\n", tx);
+ return result;
+ }
+
+ if (strnlen(tx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
+ IPAERR("Interface name too long. (%s)\n", tx->name);
+ return result;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+ list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
+ if (!strcmp(entry->name, tx->name)) {
+ memcpy(tx->tx, entry->tx, entry->num_tx_props *
+ sizeof(struct ipa_ioc_tx_intf_prop));
+ result = 0;
+ break;
+ }
+ }
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return result;
+}
+
+/**
+ * ipa3_query_intf_rx_props() - qeury RX props of an interface
+ * @rx: [inout] interface rx attributes
+ *
+ * Obtain the rx properties for the specified interface
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx)
+{
+ struct ipa3_intf *entry;
+ int result = -EINVAL;
+
+ if (rx == NULL) {
+ IPAERR("invalid param rx=%p\n", rx);
+ return result;
+ }
+
+ if (strnlen(rx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
+ IPAERR("Interface name too long. (%s)\n", rx->name);
+ return result;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+ list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
+ if (!strcmp(entry->name, rx->name)) {
+ memcpy(rx->rx, entry->rx, entry->num_rx_props *
+ sizeof(struct ipa_ioc_rx_intf_prop));
+ result = 0;
+ break;
+ }
+ }
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return result;
+}
+
+/**
+ * ipa3_query_intf_ext_props() - qeury EXT props of an interface
+ * @ext: [inout] interface ext attributes
+ *
+ * Obtain the ext properties for the specified interface
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext)
+{
+ struct ipa3_intf *entry;
+ int result = -EINVAL;
+
+ if (ext == NULL) {
+ IPAERR("invalid param ext=%p\n", ext);
+ return result;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+ list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
+ if (!strcmp(entry->name, ext->name)) {
+ memcpy(ext->ext, entry->ext, entry->num_ext_props *
+ sizeof(struct ipa_ioc_ext_intf_prop));
+ result = 0;
+ break;
+ }
+ }
+ mutex_unlock(&ipa3_ctx->lock);
+ return result;
+}
+
+/**
+ * ipa3_send_msg() - Send "message" from kernel client to IPA driver
+ * @meta: [in] message meta-data
+ * @buff: [in] the payload for message
+ * @callback: [in] free callback
+ *
+ * Client supplies the message meta-data and payload which IPA driver buffers
+ * till read by user-space. After read from user space IPA driver invokes the
+ * callback supplied to free the message payload. Client must not touch/free
+ * the message payload after calling this API.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff,
+ ipa_msg_free_fn callback)
+{
+ struct ipa3_push_msg *msg;
+
+ if (meta == NULL || (buff == NULL && callback != NULL) ||
+ (buff != NULL && callback == NULL)) {
+ IPAERR("invalid param meta=%p buff=%p, callback=%p\n",
+ meta, buff, callback);
+ return -EINVAL;
+ }
+
+ if (meta->msg_type >= IPA_EVENT_MAX_NUM) {
+ IPAERR("unsupported message type %d\n", meta->msg_type);
+ return -EINVAL;
+ }
+
+ msg = kzalloc(sizeof(struct ipa3_push_msg), GFP_KERNEL);
+ if (msg == NULL) {
+ IPAERR("fail to alloc ipa_msg container\n");
+ return -ENOMEM;
+ }
+
+ msg->meta = *meta;
+ msg->buff = buff;
+ msg->callback = callback;
+
+ mutex_lock(&ipa3_ctx->msg_lock);
+ list_add_tail(&msg->link, &ipa3_ctx->msg_list);
+ mutex_unlock(&ipa3_ctx->msg_lock);
+ IPA_STATS_INC_CNT(ipa3_ctx->stats.msg_w[meta->msg_type]);
+
+ wake_up(&ipa3_ctx->msg_waitq);
+
+ return 0;
+}
+
+/**
+ * ipa3_register_pull_msg() - register pull message type
+ * @meta: [in] message meta-data
+ * @callback: [in] pull callback
+ *
+ * Register message callback by kernel client with IPA driver for IPA driver to
+ * pull message on-demand.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback)
+{
+ struct ipa3_pull_msg *msg;
+
+ if (meta == NULL || callback == NULL) {
+ IPAERR("invalid param meta=%p callback=%p\n", meta, callback);
+ return -EINVAL;
+ }
+
+ msg = kzalloc(sizeof(struct ipa3_pull_msg), GFP_KERNEL);
+ if (msg == NULL) {
+ IPAERR("fail to alloc ipa_msg container\n");
+ return -ENOMEM;
+ }
+
+ msg->meta = *meta;
+ msg->callback = callback;
+
+ mutex_lock(&ipa3_ctx->msg_lock);
+ list_add_tail(&msg->link, &ipa3_ctx->pull_msg_list);
+ mutex_unlock(&ipa3_ctx->msg_lock);
+
+ return 0;
+}
+
+/**
+ * ipa3_deregister_pull_msg() - De-register pull message type
+ * @meta: [in] message meta-data
+ *
+ * De-register "message" by kernel client from IPA driver
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_deregister_pull_msg(struct ipa_msg_meta *meta)
+{
+ struct ipa3_pull_msg *entry;
+ struct ipa3_pull_msg *next;
+ int result = -EINVAL;
+
+ if (meta == NULL) {
+ IPAERR("invalid param name=%p\n", meta);
+ return result;
+ }
+
+ mutex_lock(&ipa3_ctx->msg_lock);
+ list_for_each_entry_safe(entry, next, &ipa3_ctx->pull_msg_list, link) {
+ if (entry->meta.msg_len == meta->msg_len &&
+ entry->meta.msg_type == meta->msg_type) {
+ list_del(&entry->link);
+ kfree(entry);
+ result = 0;
+ break;
+ }
+ }
+ mutex_unlock(&ipa3_ctx->msg_lock);
+ return result;
+}
+
+/**
+ * ipa3_read() - read message from IPA device
+ * @filp: [in] file pointer
+ * @buf: [out] buffer to read into
+ * @count: [in] size of above buffer
+ * @f_pos: [inout] file position
+ *
+ * Uer-space should continually read from /dev/ipa, read wll block when there
+ * are no messages to read. Upon return, user-space should read the ipa_msg_meta
+ * from the start of the buffer to know what type of message was read and its
+ * length in the remainder of the buffer. Buffer supplied must be big enough to
+ * hold the message meta-data and the largest defined message type
+ *
+ * Returns: how many bytes copied to buffer
+ *
+ * Note: Should not be called from atomic context
+ */
+ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ char __user *start;
+ struct ipa3_push_msg *msg = NULL;
+ int ret;
+ DEFINE_WAIT(wait);
+ int locked;
+
+ start = buf;
+
+ while (1) {
+ prepare_to_wait(&ipa3_ctx->msg_waitq,
+ &wait,
+ TASK_INTERRUPTIBLE);
+
+ mutex_lock(&ipa3_ctx->msg_lock);
+ locked = 1;
+ if (!list_empty(&ipa3_ctx->msg_list)) {
+ msg = list_first_entry(&ipa3_ctx->msg_list,
+ struct ipa3_push_msg, link);
+ list_del(&msg->link);
+ }
+
+ IPADBG_LOW("msg=%p\n", msg);
+
+ if (msg) {
+ locked = 0;
+ mutex_unlock(&ipa3_ctx->msg_lock);
+ if (copy_to_user(buf, &msg->meta,
+ sizeof(struct ipa_msg_meta))) {
+ ret = -EFAULT;
+ break;
+ }
+ buf += sizeof(struct ipa_msg_meta);
+ count -= sizeof(struct ipa_msg_meta);
+ if (msg->buff) {
+ if (copy_to_user(buf, msg->buff,
+ msg->meta.msg_len)) {
+ ret = -EFAULT;
+ break;
+ }
+ buf += msg->meta.msg_len;
+ count -= msg->meta.msg_len;
+ msg->callback(msg->buff, msg->meta.msg_len,
+ msg->meta.msg_type);
+ }
+ IPA_STATS_INC_CNT(
+ ipa3_ctx->stats.msg_r[msg->meta.msg_type]);
+ kfree(msg);
+ }
+
+ ret = -EAGAIN;
+ if (filp->f_flags & O_NONBLOCK)
+ break;
+
+ ret = -EINTR;
+ if (signal_pending(current))
+ break;
+
+ if (start != buf)
+ break;
+
+ locked = 0;
+ mutex_unlock(&ipa3_ctx->msg_lock);
+ schedule();
+ }
+
+ finish_wait(&ipa3_ctx->msg_waitq, &wait);
+ if (start != buf && ret != -EFAULT)
+ ret = buf - start;
+
+ if (locked)
+ mutex_unlock(&ipa3_ctx->msg_lock);
+
+ return ret;
+}
+
+/**
+ * ipa3_pull_msg() - pull the specified message from client
+ * @meta: [in] message meta-data
+ * @buf: [out] buffer to read into
+ * @count: [in] size of above buffer
+ *
+ * Populate the supplied buffer with the pull message which is fetched
+ * from client, the message must have previously been registered with
+ * the IPA driver
+ *
+ * Returns: how many bytes copied to buffer
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count)
+{
+ struct ipa3_pull_msg *entry;
+ int result = -EINVAL;
+
+ if (meta == NULL || buff == NULL || !count) {
+ IPAERR("invalid param name=%p buff=%p count=%zu\n",
+ meta, buff, count);
+ return result;
+ }
+
+ mutex_lock(&ipa3_ctx->msg_lock);
+ list_for_each_entry(entry, &ipa3_ctx->pull_msg_list, link) {
+ if (entry->meta.msg_len == meta->msg_len &&
+ entry->meta.msg_type == meta->msg_type) {
+ result = entry->callback(buff, count, meta->msg_type);
+ break;
+ }
+ }
+ mutex_unlock(&ipa3_ctx->msg_lock);
+ return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
new file mode 100644
index 0000000..4ef1a96
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
@@ -0,0 +1,629 @@
+/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/ipa.h>
+#include <linux/msm_gsi.h>
+#include <linux/ipa_mhi.h>
+#include "../ipa_common_i.h"
+#include "ipa_i.h"
+#include "ipa_qmi_service.h"
+
+#define IPA_MHI_DRV_NAME "ipa_mhi"
+
+
+#define IPA_MHI_DBG(fmt, args...) \
+ do { \
+ pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPA_MHI_DBG_LOW(fmt, args...) \
+ do { \
+ pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+
+#define IPA_MHI_ERR(fmt, args...) \
+ do { \
+ pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+
+#define IPA_MHI_FUNC_ENTRY() \
+ IPA_MHI_DBG_LOW("ENTRY\n")
+#define IPA_MHI_FUNC_EXIT() \
+ IPA_MHI_DBG_LOW("EXIT\n")
+
+#define IPA_MHI_MAX_UL_CHANNELS 1
+#define IPA_MHI_MAX_DL_CHANNELS 1
+
+/* bit #40 in address should be asserted for MHI transfers over pcie */
+#define IPA_MHI_HOST_ADDR_COND(addr) \
+ ((params->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr))
+
+enum ipa3_mhi_polling_mode {
+ IPA_MHI_POLLING_MODE_DB_MODE,
+ IPA_MHI_POLLING_MODE_POLL_MODE,
+};
+
+bool ipa3_mhi_stop_gsi_channel(enum ipa_client_type client)
+{
+ int res;
+ int ipa_ep_idx;
+ struct ipa3_ep_context *ep;
+
+ IPA_MHI_FUNC_ENTRY();
+ ipa_ep_idx = ipa3_get_ep_mapping(client);
+ if (ipa_ep_idx == -1) {
+ IPA_MHI_ERR("Invalid client.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+ IPA_MHI_DBG_LOW("Stopping GSI channel %ld\n", ep->gsi_chan_hdl);
+ res = gsi_stop_channel(ep->gsi_chan_hdl);
+ if (res != 0 &&
+ res != -GSI_STATUS_AGAIN &&
+ res != -GSI_STATUS_TIMED_OUT) {
+ IPA_MHI_ERR("GSI stop channel failed %d\n",
+ res);
+ WARN_ON(1);
+ return false;
+ }
+
+ if (res == 0) {
+ IPA_MHI_DBG_LOW("GSI channel %ld STOP\n",
+ ep->gsi_chan_hdl);
+ return true;
+ }
+
+ return false;
+}
+
+static int ipa3_mhi_reset_gsi_channel(enum ipa_client_type client)
+{
+ int res;
+ int clnt_hdl;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ clnt_hdl = ipa3_get_ep_mapping(client);
+ if (clnt_hdl < 0)
+ return -EFAULT;
+
+ res = ipa3_reset_gsi_channel(clnt_hdl);
+ if (res) {
+ IPA_MHI_ERR("ipa3_reset_gsi_channel failed %d\n", res);
+ return -EFAULT;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+int ipa3_mhi_reset_channel_internal(enum ipa_client_type client)
+{
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ res = ipa3_mhi_reset_gsi_channel(client);
+ if (res) {
+ IPAERR("ipa3_mhi_reset_gsi_channel failed\n");
+ ipa_assert();
+ return res;
+ }
+
+ res = ipa3_disable_data_path(ipa3_get_ep_mapping(client));
+ if (res) {
+ IPA_MHI_ERR("ipa3_disable_data_path failed %d\n", res);
+ return res;
+ }
+ IPA_MHI_FUNC_EXIT();
+
+ return 0;
+}
+
+int ipa3_mhi_start_channel_internal(enum ipa_client_type client)
+{
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ res = ipa3_enable_data_path(ipa3_get_ep_mapping(client));
+ if (res) {
+ IPA_MHI_ERR("ipa3_enable_data_path failed %d\n", res);
+ return res;
+ }
+ IPA_MHI_FUNC_EXIT();
+
+ return 0;
+}
+
+static int ipa3_mhi_get_ch_poll_cfg(enum ipa_client_type client,
+ struct ipa_mhi_ch_ctx *ch_ctx_host, int ring_size)
+{
+ switch (ch_ctx_host->pollcfg) {
+ case 0:
+ /*set default polling configuration according to MHI spec*/
+ if (IPA_CLIENT_IS_PROD(client))
+ return 7;
+ else
+ return (ring_size/2)/8;
+ break;
+ default:
+ return ch_ctx_host->pollcfg;
+ }
+}
+
+static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
+ int ipa_ep_idx, struct start_gsi_channel *params)
+{
+ int res;
+ struct gsi_evt_ring_props ev_props;
+ struct ipa_mhi_msi_info *msi;
+ struct gsi_chan_props ch_props;
+ union __packed gsi_channel_scratch ch_scratch;
+ struct ipa3_ep_context *ep;
+ struct ipa_gsi_ep_config *ep_cfg;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+ msi = params->msi;
+ ep_cfg = ipa_get_gsi_ep_info(ipa_ep_idx);
+ if (!ep_cfg) {
+ IPA_MHI_ERR("Wrong parameter, ep_cfg is NULL\n");
+ return -EPERM;
+ }
+
+ /* allocate event ring only for the first time pipe is connected */
+ if (params->state == IPA_HW_MHI_CHANNEL_STATE_INVALID) {
+ memset(&ev_props, 0, sizeof(ev_props));
+ ev_props.intf = GSI_EVT_CHTYPE_MHI_EV;
+ ev_props.intr = GSI_INTR_MSI;
+ ev_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
+ ev_props.ring_len = params->ev_ctx_host->rlen;
+ ev_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
+ params->ev_ctx_host->rbase);
+ ev_props.int_modt = params->ev_ctx_host->intmodt *
+ IPA_SLEEP_CLK_RATE_KHZ;
+ ev_props.int_modc = params->ev_ctx_host->intmodc;
+ ev_props.intvec = ((msi->data & ~msi->mask) |
+ (params->ev_ctx_host->msivec & msi->mask));
+ ev_props.msi_addr = IPA_MHI_HOST_ADDR_COND(
+ (((u64)msi->addr_hi << 32) | msi->addr_low));
+ ev_props.rp_update_addr = IPA_MHI_HOST_ADDR_COND(
+ params->event_context_addr +
+ offsetof(struct ipa_mhi_ev_ctx, rp));
+ ev_props.exclusive = true;
+ ev_props.err_cb = params->ev_err_cb;
+ ev_props.user_data = params->channel;
+ ev_props.evchid_valid = true;
+ ev_props.evchid = params->evchid;
+ IPA_MHI_DBG("allocating event ring ep:%u evchid:%u\n",
+ ipa_ep_idx, ev_props.evchid);
+ res = gsi_alloc_evt_ring(&ev_props, ipa3_ctx->gsi_dev_hdl,
+ &ep->gsi_evt_ring_hdl);
+ if (res) {
+ IPA_MHI_ERR("gsi_alloc_evt_ring failed %d\n", res);
+ goto fail_alloc_evt;
+ return res;
+ }
+ IPA_MHI_DBG("client %d, caching event ring hdl %lu\n",
+ client,
+ ep->gsi_evt_ring_hdl);
+ *params->cached_gsi_evt_ring_hdl =
+ ep->gsi_evt_ring_hdl;
+
+ } else {
+ IPA_MHI_DBG("event ring already exists: evt_ring_hdl=%lu\n",
+ *params->cached_gsi_evt_ring_hdl);
+ ep->gsi_evt_ring_hdl = *params->cached_gsi_evt_ring_hdl;
+ }
+
+ memset(&ch_props, 0, sizeof(ch_props));
+ ch_props.prot = GSI_CHAN_PROT_MHI;
+ ch_props.dir = IPA_CLIENT_IS_PROD(client) ?
+ GSI_CHAN_DIR_TO_GSI : GSI_CHAN_DIR_FROM_GSI;
+ ch_props.ch_id = ep_cfg->ipa_gsi_chan_num;
+ ch_props.evt_ring_hdl = *params->cached_gsi_evt_ring_hdl;
+ ch_props.re_size = GSI_CHAN_RE_SIZE_16B;
+ ch_props.ring_len = params->ch_ctx_host->rlen;
+ ch_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
+ params->ch_ctx_host->rbase);
+ ch_props.use_db_eng = GSI_CHAN_DB_MODE;
+ ch_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+ ch_props.low_weight = 1;
+ ch_props.err_cb = params->ch_err_cb;
+ ch_props.chan_user_data = params->channel;
+ res = gsi_alloc_channel(&ch_props, ipa3_ctx->gsi_dev_hdl,
+ &ep->gsi_chan_hdl);
+ if (res) {
+ IPA_MHI_ERR("gsi_alloc_channel failed %d\n",
+ res);
+ goto fail_alloc_ch;
+ }
+
+ memset(&ch_scratch, 0, sizeof(ch_scratch));
+ ch_scratch.mhi.mhi_host_wp_addr = IPA_MHI_HOST_ADDR_COND(
+ params->channel_context_addr +
+ offsetof(struct ipa_mhi_ch_ctx, wp));
+ ch_scratch.mhi.assert_bit40 = params->assert_bit40;
+ ch_scratch.mhi.max_outstanding_tre =
+ ep_cfg->ipa_if_tlv * ch_props.re_size;
+ ch_scratch.mhi.outstanding_threshold =
+ min(ep_cfg->ipa_if_tlv / 2, 8) * ch_props.re_size;
+ ch_scratch.mhi.oob_mod_threshold = 4;
+ if (params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_DEFAULT ||
+ params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_ENABLE) {
+ ch_scratch.mhi.burst_mode_enabled = true;
+ ch_scratch.mhi.polling_configuration =
+ ipa3_mhi_get_ch_poll_cfg(client, params->ch_ctx_host,
+ (ch_props.ring_len / ch_props.re_size));
+ ch_scratch.mhi.polling_mode = IPA_MHI_POLLING_MODE_DB_MODE;
+ } else {
+ ch_scratch.mhi.burst_mode_enabled = false;
+ }
+ res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
+ ch_scratch);
+ if (res) {
+ IPA_MHI_ERR("gsi_write_channel_scratch failed %d\n",
+ res);
+ goto fail_ch_scratch;
+ }
+
+ *params->mhi = ch_scratch.mhi;
+
+ IPA_MHI_DBG("Starting channel\n");
+ res = gsi_start_channel(ep->gsi_chan_hdl);
+ if (res) {
+ IPA_MHI_ERR("gsi_start_channel failed %d\n", res);
+ goto fail_ch_start;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+
+fail_ch_start:
+fail_ch_scratch:
+ gsi_dealloc_channel(ep->gsi_chan_hdl);
+fail_alloc_ch:
+ gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+ ep->gsi_evt_ring_hdl = ~0;
+fail_alloc_evt:
+ return res;
+}
+
+int ipa3_mhi_init_engine(struct ipa_mhi_init_engine *params)
+{
+ int res;
+ struct gsi_device_scratch gsi_scratch;
+ struct ipa_gsi_ep_config *gsi_ep_info;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ if (!params) {
+ IPA_MHI_ERR("null args\n");
+ return -EINVAL;
+ }
+
+ /* Initialize IPA MHI engine */
+ gsi_ep_info = ipa_get_gsi_ep_info(
+ ipa_get_ep_mapping(IPA_CLIENT_MHI_PROD));
+ if (!gsi_ep_info) {
+ IPAERR("MHI PROD has no ep allocated\n");
+ ipa_assert();
+ }
+ memset(&gsi_scratch, 0, sizeof(gsi_scratch));
+ gsi_scratch.mhi_base_chan_idx_valid = true;
+ gsi_scratch.mhi_base_chan_idx = gsi_ep_info->ipa_gsi_chan_num +
+ params->gsi.first_ch_idx;
+ res = gsi_write_device_scratch(ipa3_ctx->gsi_dev_hdl,
+ &gsi_scratch);
+ if (res) {
+ IPA_MHI_ERR("failed to write device scratch %d\n", res);
+ goto fail_init_engine;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+
+fail_init_engine:
+ return res;
+}
+
+/**
+ * ipa3_connect_mhi_pipe() - Connect pipe to IPA and start corresponding
+ * MHI channel
+ * @in: connect parameters
+ * @clnt_hdl: [out] client handle for this pipe
+ *
+ * This function is called by IPA MHI client driver on MHI channel start.
+ * This function is called after MHI engine was started.
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa3_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
+ u32 *clnt_hdl)
+{
+ struct ipa3_ep_context *ep;
+ int ipa_ep_idx;
+ int res;
+ enum ipa_client_type client;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ if (!in || !clnt_hdl) {
+ IPA_MHI_ERR("NULL args\n");
+ return -EINVAL;
+ }
+
+ client = in->sys->client;
+ ipa_ep_idx = ipa3_get_ep_mapping(client);
+ if (ipa_ep_idx == -1) {
+ IPA_MHI_ERR("Invalid client.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+ if (ep->valid == 1) {
+ IPA_MHI_ERR("EP already allocated.\n");
+ return -EPERM;
+ }
+
+ memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
+ ep->valid = 1;
+ ep->skip_ep_cfg = in->sys->skip_ep_cfg;
+ ep->client = client;
+ ep->client_notify = in->sys->notify;
+ ep->priv = in->sys->priv;
+ ep->keep_ipa_awake = in->sys->keep_ipa_awake;
+
+ res = ipa_mhi_start_gsi_channel(client,
+ ipa_ep_idx, &in->start.gsi);
+ if (res) {
+ IPA_MHI_ERR("ipa_mhi_start_gsi_channel failed %d\n",
+ res);
+ goto fail_start_channel;
+ }
+
+ res = ipa3_enable_data_path(ipa_ep_idx);
+ if (res) {
+ IPA_MHI_ERR("enable data path failed res=%d clnt=%d.\n", res,
+ ipa_ep_idx);
+ goto fail_ep_cfg;
+ }
+
+ if (!ep->skip_ep_cfg) {
+ if (ipa3_cfg_ep(ipa_ep_idx, &in->sys->ipa_ep_cfg)) {
+ IPAERR("fail to configure EP.\n");
+ goto fail_ep_cfg;
+ }
+ if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
+ IPAERR("fail to configure status of EP.\n");
+ goto fail_ep_cfg;
+ }
+ IPA_MHI_DBG("ep configuration successful\n");
+ } else {
+ IPA_MHI_DBG("skipping ep configuration\n");
+ }
+
+ *clnt_hdl = ipa_ep_idx;
+
+ if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(client))
+ ipa3_install_dflt_flt_rules(ipa_ep_idx);
+
+ ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+ IPA_MHI_DBG("client %d (ep: %d) connected\n", client,
+ ipa_ep_idx);
+
+ IPA_MHI_FUNC_EXIT();
+
+ return 0;
+
+fail_ep_cfg:
+ ipa3_disable_data_path(ipa_ep_idx);
+fail_start_channel:
+ memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
+ return -EPERM;
+}
+
+/**
+ * ipa3_disconnect_mhi_pipe() - Disconnect pipe from IPA and reset corresponding
+ * MHI channel
+ * @clnt_hdl: client handle for this pipe
+ *
+ * This function is called by IPA MHI client driver on MHI channel reset.
+ * This function is called after MHI channel was started.
+ * This function is doing the following:
+ * - Send command to uC/GSI to reset corresponding MHI channel
+ * - Configure IPA EP control
+ *
+ * Return codes: 0 : success
+ * negative : error
+ */
+int ipa3_disconnect_mhi_pipe(u32 clnt_hdl)
+{
+ struct ipa3_ep_context *ep;
+ int res;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes) {
+ IPAERR("invalid handle %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ if (ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("pipe was not connected %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+ res = gsi_dealloc_channel(ep->gsi_chan_hdl);
+ if (res) {
+ IPAERR("gsi_dealloc_channel failed %d\n", res);
+ goto fail_reset_channel;
+ }
+ }
+
+ ep->valid = 0;
+ ipa3_delete_dflt_flt_rules(clnt_hdl);
+
+ IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+
+fail_reset_channel:
+ return res;
+}
+
+int ipa3_mhi_resume_channels_internal(enum ipa_client_type client,
+ bool LPTransitionRejected, bool brstmode_enabled,
+ union __packed gsi_channel_scratch ch_scratch, u8 index)
+{
+ int res;
+ int ipa_ep_idx;
+ struct ipa3_ep_context *ep;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ ipa_ep_idx = ipa3_get_ep_mapping(client);
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+ if (brstmode_enabled && !LPTransitionRejected) {
+ /*
+ * set polling mode bit to DB mode before
+ * resuming the channel
+ */
+ res = gsi_write_channel_scratch(
+ ep->gsi_chan_hdl, ch_scratch);
+ if (res) {
+ IPA_MHI_ERR("write ch scratch fail %d\n"
+ , res);
+ return res;
+ }
+ }
+
+ res = gsi_start_channel(ep->gsi_chan_hdl);
+ if (res) {
+ IPA_MHI_ERR("failed to resume channel error %d\n", res);
+ return res;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+int ipa3_mhi_query_ch_info(enum ipa_client_type client,
+ struct gsi_chan_info *ch_info)
+{
+ int ipa_ep_idx;
+ int res;
+ struct ipa3_ep_context *ep;
+
+ IPA_MHI_FUNC_ENTRY();
+
+ ipa_ep_idx = ipa3_get_ep_mapping(client);
+
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+ res = gsi_query_channel_info(ep->gsi_chan_hdl, ch_info);
+ if (res) {
+ IPAERR("gsi_query_channel_info failed\n");
+ return res;
+ }
+
+ IPA_MHI_FUNC_EXIT();
+ return 0;
+}
+
+bool ipa3_has_open_aggr_frame(enum ipa_client_type client)
+{
+ u32 aggr_state_active;
+ int ipa_ep_idx;
+
+ aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+ IPA_MHI_DBG_LOW("IPA_STATE_AGGR_ACTIVE_OFST 0x%x\n", aggr_state_active);
+
+ ipa_ep_idx = ipa_get_ep_mapping(client);
+ if (ipa_ep_idx == -1) {
+ ipa_assert();
+ return false;
+ }
+
+ if ((1 << ipa_ep_idx) & aggr_state_active)
+ return true;
+
+ return false;
+}
+
+int ipa3_mhi_destroy_channel(enum ipa_client_type client)
+{
+ int res;
+ int ipa_ep_idx;
+ struct ipa3_ep_context *ep;
+
+ ipa_ep_idx = ipa3_get_ep_mapping(client);
+
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+ IPA_MHI_DBG("reset event ring (hdl: %lu, ep: %d)\n",
+ ep->gsi_evt_ring_hdl, ipa_ep_idx);
+
+ res = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
+ if (res) {
+ IPAERR(" failed to reset evt ring %lu, err %d\n"
+ , ep->gsi_evt_ring_hdl, res);
+ goto fail;
+ }
+
+ IPA_MHI_DBG("dealloc event ring (hdl: %lu, ep: %d)\n",
+ ep->gsi_evt_ring_hdl, ipa_ep_idx);
+
+ res = gsi_dealloc_evt_ring(
+ ep->gsi_evt_ring_hdl);
+ if (res) {
+ IPAERR("dealloc evt ring %lu failed, err %d\n"
+ , ep->gsi_evt_ring_hdl, res);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return res;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA MHI driver");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
new file mode 100644
index 0000000..4b22203
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -0,0 +1,763 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+
+#define IPA_NAT_PHYS_MEM_OFFSET 0
+#define IPA_NAT_PHYS_MEM_SIZE IPA_RAM_NAT_SIZE
+
+#define IPA_NAT_TEMP_MEM_SIZE 128
+
+static int ipa3_nat_vma_fault_remap(
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ IPADBG("\n");
+ vmf->page = NULL;
+
+ return VM_FAULT_SIGBUS;
+}
+
+/* VMA related file operations functions */
+static struct vm_operations_struct ipa3_nat_remap_vm_ops = {
+ .fault = ipa3_nat_vma_fault_remap,
+};
+
+static int ipa3_nat_open(struct inode *inode, struct file *filp)
+{
+ struct ipa3_nat_mem *nat_ctx;
+
+ IPADBG("\n");
+ nat_ctx = container_of(inode->i_cdev, struct ipa3_nat_mem, cdev);
+ filp->private_data = nat_ctx;
+ IPADBG("return\n");
+
+ return 0;
+}
+
+static int ipa3_nat_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+ struct ipa3_nat_mem *nat_ctx =
+ (struct ipa3_nat_mem *)filp->private_data;
+ unsigned long phys_addr;
+ int result;
+
+ mutex_lock(&nat_ctx->lock);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (nat_ctx->is_sys_mem) {
+ IPADBG("Mapping system memory\n");
+ if (nat_ctx->is_mapped) {
+ IPAERR("mapping already exists, only 1 supported\n");
+ result = -EINVAL;
+ goto bail;
+ }
+ IPADBG("map sz=0x%zx\n", nat_ctx->size);
+ result =
+ dma_mmap_coherent(
+ ipa3_ctx->pdev, vma,
+ nat_ctx->vaddr, nat_ctx->dma_handle,
+ nat_ctx->size);
+
+ if (result) {
+ IPAERR("unable to map memory. Err:%d\n", result);
+ goto bail;
+ }
+ ipa3_ctx->nat_mem.nat_base_address = nat_ctx->vaddr;
+ } else {
+ IPADBG("Mapping shared(local) memory\n");
+ IPADBG("map sz=0x%lx\n", vsize);
+
+ if ((IPA_NAT_PHYS_MEM_SIZE == 0) ||
+ (vsize > IPA_NAT_PHYS_MEM_SIZE)) {
+ result = -EINVAL;
+ goto bail;
+ }
+ phys_addr = ipa3_ctx->ipa_wrapper_base +
+ ipa3_ctx->ctrl->ipa_reg_base_ofst +
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
+ IPA_NAT_PHYS_MEM_OFFSET);
+
+ if (remap_pfn_range(
+ vma, vma->vm_start,
+ phys_addr >> PAGE_SHIFT, vsize, vma->vm_page_prot)) {
+ IPAERR("remap failed\n");
+ result = -EAGAIN;
+ goto bail;
+ }
+ ipa3_ctx->nat_mem.nat_base_address = (void *)vma->vm_start;
+ }
+ nat_ctx->is_mapped = true;
+ vma->vm_ops = &ipa3_nat_remap_vm_ops;
+ IPADBG("return\n");
+ result = 0;
+bail:
+ mutex_unlock(&nat_ctx->lock);
+ return result;
+}
+
+static const struct file_operations ipa3_nat_fops = {
+ .owner = THIS_MODULE,
+ .open = ipa3_nat_open,
+ .mmap = ipa3_nat_mmap
+};
+
+/**
+ * ipa3_allocate_temp_nat_memory() - Allocates temp nat memory
+ *
+ * Called during nat table delete
+ */
+void ipa3_allocate_temp_nat_memory(void)
+{
+ struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem);
+ int gfp_flags = GFP_KERNEL | __GFP_ZERO;
+
+ nat_ctx->tmp_vaddr =
+ dma_alloc_coherent(ipa3_ctx->pdev, IPA_NAT_TEMP_MEM_SIZE,
+ &nat_ctx->tmp_dma_handle, gfp_flags);
+
+ if (nat_ctx->tmp_vaddr == NULL) {
+ IPAERR("Temp Memory alloc failed\n");
+ nat_ctx->is_tmp_mem = false;
+ return;
+ }
+
+ nat_ctx->is_tmp_mem = true;
+ IPADBG("IPA NAT allocated temp memory successfully\n");
+}
+
+/**
+ * ipa3_create_nat_device() - Create the NAT device
+ *
+ * Called during ipa init to create nat device
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_create_nat_device(void)
+{
+ struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem);
+ int result;
+
+ IPADBG("\n");
+
+ mutex_lock(&nat_ctx->lock);
+ nat_ctx->class = class_create(THIS_MODULE, NAT_DEV_NAME);
+ if (IS_ERR(nat_ctx->class)) {
+ IPAERR("unable to create the class\n");
+ result = -ENODEV;
+ goto vaddr_alloc_fail;
+ }
+ result = alloc_chrdev_region(&nat_ctx->dev_num,
+ 0,
+ 1,
+ NAT_DEV_NAME);
+ if (result) {
+ IPAERR("alloc_chrdev_region err.\n");
+ result = -ENODEV;
+ goto alloc_chrdev_region_fail;
+ }
+
+ nat_ctx->dev =
+ device_create(nat_ctx->class, NULL, nat_ctx->dev_num, nat_ctx,
+ "%s", NAT_DEV_NAME);
+
+ if (IS_ERR(nat_ctx->dev)) {
+ IPAERR("device_create err:%ld\n", PTR_ERR(nat_ctx->dev));
+ result = -ENODEV;
+ goto device_create_fail;
+ }
+
+ cdev_init(&nat_ctx->cdev, &ipa3_nat_fops);
+ nat_ctx->cdev.owner = THIS_MODULE;
+ nat_ctx->cdev.ops = &ipa3_nat_fops;
+
+ result = cdev_add(&nat_ctx->cdev, nat_ctx->dev_num, 1);
+ if (result) {
+ IPAERR("cdev_add err=%d\n", -result);
+ goto cdev_add_fail;
+ }
+ IPADBG("ipa nat dev added successful. major:%d minor:%d\n",
+ MAJOR(nat_ctx->dev_num),
+ MINOR(nat_ctx->dev_num));
+
+ nat_ctx->is_dev = true;
+ ipa3_allocate_temp_nat_memory();
+ IPADBG("IPA NAT device created successfully\n");
+ result = 0;
+ goto bail;
+
+cdev_add_fail:
+ device_destroy(nat_ctx->class, nat_ctx->dev_num);
+device_create_fail:
+ unregister_chrdev_region(nat_ctx->dev_num, 1);
+alloc_chrdev_region_fail:
+ class_destroy(nat_ctx->class);
+vaddr_alloc_fail:
+ if (nat_ctx->vaddr) {
+ IPADBG("Releasing system memory\n");
+ dma_free_coherent(
+ ipa3_ctx->pdev, nat_ctx->size,
+ nat_ctx->vaddr, nat_ctx->dma_handle);
+ nat_ctx->vaddr = NULL;
+ nat_ctx->dma_handle = 0;
+ nat_ctx->size = 0;
+ }
+
+bail:
+ mutex_unlock(&nat_ctx->lock);
+
+ return result;
+}
+
+/**
+ * ipa3_allocate_nat_device() - Allocates memory for the NAT device
+ * @mem: [in/out] memory parameters
+ *
+ * Called by NAT client driver to allocate memory for the NAT entries. Based on
+ * the request size either shared or system memory will be used.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
+{
+ struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem);
+ int gfp_flags = GFP_KERNEL | __GFP_ZERO;
+ int result;
+
+ IPADBG("passed memory size %zu\n", mem->size);
+
+ mutex_lock(&nat_ctx->lock);
+ if (strcmp(mem->dev_name, NAT_DEV_NAME)) {
+ IPAERR("Nat device name mismatch\n");
+ IPAERR("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
+ result = -EPERM;
+ goto bail;
+ }
+
+ if (nat_ctx->is_dev != true) {
+ IPAERR("Nat device not created successfully during boot up\n");
+ result = -EPERM;
+ goto bail;
+ }
+
+ if (nat_ctx->is_dev_init == true) {
+ IPAERR("Device already init\n");
+ result = 0;
+ goto bail;
+ }
+
+ if (mem->size <= 0 ||
+ nat_ctx->is_dev_init == true) {
+ IPAERR("Invalid Parameters or device is already init\n");
+ result = -EPERM;
+ goto bail;
+ }
+
+ if (mem->size > IPA_NAT_PHYS_MEM_SIZE) {
+ IPADBG("Allocating system memory\n");
+ nat_ctx->is_sys_mem = true;
+ nat_ctx->vaddr =
+ dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
+ &nat_ctx->dma_handle, gfp_flags);
+ if (nat_ctx->vaddr == NULL) {
+ IPAERR("memory alloc failed\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+ nat_ctx->size = mem->size;
+ } else {
+ IPADBG("using shared(local) memory\n");
+ nat_ctx->is_sys_mem = false;
+ }
+
+ nat_ctx->is_dev_init = true;
+ IPADBG("IPA NAT dev init successfully\n");
+ result = 0;
+
+bail:
+ mutex_unlock(&nat_ctx->lock);
+
+ return result;
+}
+
+/* IOCTL function handlers */
+/**
+ * ipa3_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW
+ * @init: [in] initialization command attributes
+ *
+ * Called by NAT client driver to post IP_V4_NAT_INIT command to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
+{
+#define TBL_ENTRY_SIZE 32
+#define INDX_TBL_ENTRY_SIZE 4
+
+ struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL;
+ struct ipa3_desc desc[2];
+ struct ipahal_imm_cmd_ip_v4_nat_init cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+ int result;
+ u32 offset = 0;
+ size_t tmp;
+
+ IPADBG("\n");
+ if (init->table_entries == 0) {
+ IPADBG("Table entries is zero\n");
+ return -EPERM;
+ }
+
+ /* check for integer overflow */
+ if (init->ipv4_rules_offset >
+ UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1))) {
+ IPAERR("Detected overflow\n");
+ return -EPERM;
+ }
+ /* Check Table Entry offset is not
+ * beyond allocated size
+ */
+ tmp = init->ipv4_rules_offset +
+ (TBL_ENTRY_SIZE * (init->table_entries + 1));
+ if (tmp > ipa3_ctx->nat_mem.size) {
+ IPAERR("Table rules offset not valid\n");
+ IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ init->ipv4_rules_offset, (init->table_entries + 1),
+ tmp, ipa3_ctx->nat_mem.size);
+ return -EPERM;
+ }
+
+ /* check for integer overflow */
+ if (init->expn_rules_offset >
+ UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries)) {
+ IPAERR("Detected overflow\n");
+ return -EPERM;
+ }
+ /* Check Expn Table Entry offset is not
+ * beyond allocated size
+ */
+ tmp = init->expn_rules_offset +
+ (TBL_ENTRY_SIZE * init->expn_table_entries);
+ if (tmp > ipa3_ctx->nat_mem.size) {
+ IPAERR("Expn Table rules offset not valid\n");
+ IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ init->expn_rules_offset, init->expn_table_entries,
+ tmp, ipa3_ctx->nat_mem.size);
+ return -EPERM;
+ }
+
+ /* check for integer overflow */
+ if (init->index_offset >
+ UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) {
+ IPAERR("Detected overflow\n");
+ return -EPERM;
+ }
+ /* Check Indx Table Entry offset is not
+ * beyond allocated size
+ */
+ tmp = init->index_offset +
+ (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1));
+ if (tmp > ipa3_ctx->nat_mem.size) {
+ IPAERR("Indx Table rules offset not valid\n");
+ IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ init->index_offset, (init->table_entries + 1),
+ tmp, ipa3_ctx->nat_mem.size);
+ return -EPERM;
+ }
+
+ /* check for integer overflow */
+ if (init->index_expn_offset >
+ UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries)) {
+ IPAERR("Detected overflow\n");
+ return -EPERM;
+ }
+ /* Check Expn Table entry offset is not
+ * beyond allocated size
+ */
+ tmp = init->index_expn_offset +
+ (INDX_TBL_ENTRY_SIZE * init->expn_table_entries);
+ if (tmp > ipa3_ctx->nat_mem.size) {
+ IPAERR("Indx Expn Table rules offset not valid\n");
+ IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ init->index_expn_offset, init->expn_table_entries,
+ tmp, ipa3_ctx->nat_mem.size);
+ return -EPERM;
+ }
+
+ memset(&desc, 0, sizeof(desc));
+ /* NO-OP IC for ensuring that IPA pipeline is empty */
+ nop_cmd_pyld =
+ ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
+ if (!nop_cmd_pyld) {
+ IPAERR("failed to construct NOP imm cmd\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+
+ desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+ desc[0].type = IPA_IMM_CMD_DESC;
+ desc[0].callback = NULL;
+ desc[0].user1 = NULL;
+ desc[0].user2 = 0;
+ desc[0].pyld = nop_cmd_pyld->data;
+ desc[0].len = nop_cmd_pyld->len;
+
+ if (ipa3_ctx->nat_mem.vaddr) {
+ IPADBG("using system memory for nat table\n");
+ cmd.ipv4_rules_addr_shared = false;
+ cmd.ipv4_expansion_rules_addr_shared = false;
+ cmd.index_table_addr_shared = false;
+ cmd.index_table_expansion_addr_shared = false;
+
+ offset = UINT_MAX - ipa3_ctx->nat_mem.dma_handle;
+
+ if ((init->ipv4_rules_offset > offset) ||
+ (init->expn_rules_offset > offset) ||
+ (init->index_offset > offset) ||
+ (init->index_expn_offset > offset)) {
+ IPAERR("Failed due to integer overflow\n");
+ IPAERR("nat.mem.dma_handle: 0x%pa\n",
+ &ipa3_ctx->nat_mem.dma_handle);
+ IPAERR("ipv4_rules_offset: 0x%x\n",
+ init->ipv4_rules_offset);
+ IPAERR("expn_rules_offset: 0x%x\n",
+ init->expn_rules_offset);
+ IPAERR("index_offset: 0x%x\n",
+ init->index_offset);
+ IPAERR("index_expn_offset: 0x%x\n",
+ init->index_expn_offset);
+ result = -EPERM;
+ goto free_nop;
+ }
+ cmd.ipv4_rules_addr =
+ ipa3_ctx->nat_mem.dma_handle + init->ipv4_rules_offset;
+ IPADBG("ipv4_rules_offset:0x%x\n", init->ipv4_rules_offset);
+
+ cmd.ipv4_expansion_rules_addr =
+ ipa3_ctx->nat_mem.dma_handle + init->expn_rules_offset;
+ IPADBG("expn_rules_offset:0x%x\n", init->expn_rules_offset);
+
+ cmd.index_table_addr =
+ ipa3_ctx->nat_mem.dma_handle + init->index_offset;
+ IPADBG("index_offset:0x%x\n", init->index_offset);
+
+ cmd.index_table_expansion_addr =
+ ipa3_ctx->nat_mem.dma_handle + init->index_expn_offset;
+ IPADBG("index_expn_offset:0x%x\n", init->index_expn_offset);
+ } else {
+ IPADBG("using shared(local) memory for nat table\n");
+ cmd.ipv4_rules_addr_shared = true;
+ cmd.ipv4_expansion_rules_addr_shared = true;
+ cmd.index_table_addr_shared = true;
+ cmd.index_table_expansion_addr_shared = true;
+
+ cmd.ipv4_rules_addr = init->ipv4_rules_offset +
+ IPA_RAM_NAT_OFST;
+
+ cmd.ipv4_expansion_rules_addr = init->expn_rules_offset +
+ IPA_RAM_NAT_OFST;
+
+ cmd.index_table_addr = init->index_offset +
+ IPA_RAM_NAT_OFST;
+
+ cmd.index_table_expansion_addr = init->index_expn_offset +
+ IPA_RAM_NAT_OFST;
+ }
+ cmd.table_index = init->tbl_index;
+ IPADBG("Table index:0x%x\n", cmd.table_index);
+ cmd.size_base_tables = init->table_entries;
+ IPADBG("Base Table size:0x%x\n", cmd.size_base_tables);
+ cmd.size_expansion_tables = init->expn_table_entries;
+ IPADBG("Expansion Table size:0x%x\n", cmd.size_expansion_tables);
+ cmd.public_ip_addr = init->ip_addr;
+ IPADBG("Public ip address:0x%x\n", cmd.public_ip_addr);
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("Fail to construct ip_v4_nat_init imm cmd\n");
+ result = -EPERM;
+ goto free_nop;
+ }
+
+ desc[1].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_NAT_INIT);
+ desc[1].type = IPA_IMM_CMD_DESC;
+ desc[1].callback = NULL;
+ desc[1].user1 = NULL;
+ desc[1].user2 = 0;
+ desc[1].pyld = cmd_pyld->data;
+ desc[1].len = cmd_pyld->len;
+ IPADBG("posting v4 init command\n");
+ if (ipa3_send_cmd(2, desc)) {
+ IPAERR("Fail to send immediate command\n");
+ result = -EPERM;
+ goto destroy_imm_cmd;
+ }
+
+ ipa3_ctx->nat_mem.public_ip_addr = init->ip_addr;
+ IPADBG("Table ip address:0x%x", ipa3_ctx->nat_mem.public_ip_addr);
+
+ ipa3_ctx->nat_mem.ipv4_rules_addr =
+ (char *)ipa3_ctx->nat_mem.nat_base_address + init->ipv4_rules_offset;
+ IPADBG("ipv4_rules_addr: 0x%p\n",
+ ipa3_ctx->nat_mem.ipv4_rules_addr);
+
+ ipa3_ctx->nat_mem.ipv4_expansion_rules_addr =
+ (char *)ipa3_ctx->nat_mem.nat_base_address + init->expn_rules_offset;
+ IPADBG("ipv4_expansion_rules_addr: 0x%p\n",
+ ipa3_ctx->nat_mem.ipv4_expansion_rules_addr);
+
+ ipa3_ctx->nat_mem.index_table_addr =
+ (char *)ipa3_ctx->nat_mem.nat_base_address +
+ init->index_offset;
+ IPADBG("index_table_addr: 0x%p\n",
+ ipa3_ctx->nat_mem.index_table_addr);
+
+ ipa3_ctx->nat_mem.index_table_expansion_addr =
+ (char *)ipa3_ctx->nat_mem.nat_base_address + init->index_expn_offset;
+ IPADBG("index_table_expansion_addr: 0x%p\n",
+ ipa3_ctx->nat_mem.index_table_expansion_addr);
+
+ IPADBG("size_base_tables: %d\n", init->table_entries);
+ ipa3_ctx->nat_mem.size_base_tables = init->table_entries;
+
+ IPADBG("size_expansion_tables: %d\n", init->expn_table_entries);
+ ipa3_ctx->nat_mem.size_expansion_tables = init->expn_table_entries;
+
+ IPADBG("return\n");
+ result = 0;
+destroy_imm_cmd:
+ ipahal_destroy_imm_cmd(cmd_pyld);
+free_nop:
+ ipahal_destroy_imm_cmd(nop_cmd_pyld);
+bail:
+ return result;
+}
+
+/**
+ * ipa3_nat_dma_cmd() - Post NAT_DMA command to IPA HW
+ * @dma: [in] initialization command attributes
+ *
+ * Called by NAT client driver to post NAT_DMA command to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+#define NUM_OF_DESC 2
+
+ struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL;
+ struct ipahal_imm_cmd_nat_dma cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+ struct ipa3_desc *desc = NULL;
+ u16 size = 0, cnt = 0;
+ int ret = 0;
+
+ IPADBG("\n");
+ if (dma->entries <= 0) {
+ IPAERR("Invalid number of commands %d\n",
+ dma->entries);
+ ret = -EPERM;
+ goto bail;
+ }
+
+ size = sizeof(struct ipa3_desc) * NUM_OF_DESC;
+ desc = kzalloc(size, GFP_KERNEL);
+ if (desc == NULL) {
+ IPAERR("Failed to alloc memory\n");
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ /* NO-OP IC for ensuring that IPA pipeline is empty */
+ nop_cmd_pyld =
+ ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
+ if (!nop_cmd_pyld) {
+ IPAERR("Failed to construct NOP imm cmd\n");
+ ret = -ENOMEM;
+ goto bail;
+ }
+ desc[0].type = IPA_IMM_CMD_DESC;
+ desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+ desc[0].callback = NULL;
+ desc[0].user1 = NULL;
+ desc[0].user2 = 0;
+ desc[0].pyld = nop_cmd_pyld->data;
+ desc[0].len = nop_cmd_pyld->len;
+
+ for (cnt = 0; cnt < dma->entries; cnt++) {
+ cmd.table_index = dma->dma[cnt].table_index;
+ cmd.base_addr = dma->dma[cnt].base_addr;
+ cmd.offset = dma->dma[cnt].offset;
+ cmd.data = dma->dma[cnt].data;
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_NAT_DMA, &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("Fail to construct nat_dma imm cmd\n");
+ continue;
+ }
+ desc[1].type = IPA_IMM_CMD_DESC;
+ desc[1].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_NAT_DMA);
+ desc[1].callback = NULL;
+ desc[1].user1 = NULL;
+ desc[1].user2 = 0;
+ desc[1].pyld = cmd_pyld->data;
+ desc[1].len = cmd_pyld->len;
+
+ ret = ipa3_send_cmd(NUM_OF_DESC, desc);
+ if (ret == -EPERM)
+ IPAERR("Fail to send immediate command %d\n", cnt);
+ ipahal_destroy_imm_cmd(cmd_pyld);
+ }
+
+bail:
+ if (desc != NULL)
+ kfree(desc);
+
+ if (nop_cmd_pyld != NULL)
+ ipahal_destroy_imm_cmd(nop_cmd_pyld);
+
+ return ret;
+}
+
+/**
+ * ipa3_nat_free_mem_and_device() - free the NAT memory and remove the device
+ * @nat_ctx: [in] the IPA NAT memory to free
+ *
+ * Called by NAT client driver to free the NAT memory and remove the device
+ */
+void ipa3_nat_free_mem_and_device(struct ipa3_nat_mem *nat_ctx)
+{
+ IPADBG("\n");
+ mutex_lock(&nat_ctx->lock);
+
+ if (nat_ctx->is_sys_mem) {
+ IPADBG("freeing the dma memory\n");
+ dma_free_coherent(
+ ipa3_ctx->pdev, nat_ctx->size,
+ nat_ctx->vaddr, nat_ctx->dma_handle);
+ nat_ctx->size = 0;
+ nat_ctx->vaddr = NULL;
+ }
+ nat_ctx->is_mapped = false;
+ nat_ctx->is_sys_mem = false;
+ nat_ctx->is_dev_init = false;
+
+ mutex_unlock(&nat_ctx->lock);
+ IPADBG("return\n");
+}
+
+/**
+ * ipa3_nat_del_cmd() - Delete a NAT table
+ * @del: [in] delete table table table parameters
+ *
+ * Called by NAT client driver to delete the nat table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
+{
+ struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL;
+ struct ipa3_desc desc[2];
+ struct ipahal_imm_cmd_ip_v4_nat_init cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+ bool mem_type_shared = true;
+ u32 base_addr = IPA_NAT_PHYS_MEM_OFFSET;
+ int result;
+
+ IPADBG("\n");
+ if (ipa3_ctx->nat_mem.is_tmp_mem) {
+ IPAERR("using temp memory during nat del\n");
+ mem_type_shared = false;
+ base_addr = ipa3_ctx->nat_mem.tmp_dma_handle;
+ }
+
+ if (del->public_ip_addr == 0) {
+ IPADBG("Bad Parameter\n");
+ result = -EPERM;
+ goto bail;
+ }
+
+ memset(&desc, 0, sizeof(desc));
+ /* NO-OP IC for ensuring that IPA pipeline is empty */
+ nop_cmd_pyld =
+ ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
+ if (!nop_cmd_pyld) {
+ IPAERR("Failed to construct NOP imm cmd\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+ desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+ desc[0].type = IPA_IMM_CMD_DESC;
+ desc[0].callback = NULL;
+ desc[0].user1 = NULL;
+ desc[0].user2 = 0;
+ desc[0].pyld = nop_cmd_pyld->data;
+ desc[0].len = nop_cmd_pyld->len;
+
+ cmd.table_index = del->table_index;
+ cmd.ipv4_rules_addr = base_addr;
+ cmd.ipv4_rules_addr_shared = mem_type_shared;
+ cmd.ipv4_expansion_rules_addr = base_addr;
+ cmd.ipv4_expansion_rules_addr_shared = mem_type_shared;
+ cmd.index_table_addr = base_addr;
+ cmd.index_table_addr_shared = mem_type_shared;
+ cmd.index_table_expansion_addr = base_addr;
+ cmd.index_table_expansion_addr_shared = mem_type_shared;
+ cmd.size_base_tables = 0;
+ cmd.size_expansion_tables = 0;
+ cmd.public_ip_addr = 0;
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("Fail to construct ip_v4_nat_init imm cmd\n");
+ result = -EPERM;
+ goto destroy_regwrt_imm_cmd;
+ }
+ desc[1].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_NAT_INIT);
+ desc[1].type = IPA_IMM_CMD_DESC;
+ desc[1].callback = NULL;
+ desc[1].user1 = NULL;
+ desc[1].user2 = 0;
+ desc[1].pyld = cmd_pyld->data;
+ desc[1].len = cmd_pyld->len;
+
+ if (ipa3_send_cmd(2, desc)) {
+ IPAERR("Fail to send immediate command\n");
+ result = -EPERM;
+ goto destroy_imm_cmd;
+ }
+
+ ipa3_ctx->nat_mem.size_base_tables = 0;
+ ipa3_ctx->nat_mem.size_expansion_tables = 0;
+ ipa3_ctx->nat_mem.public_ip_addr = 0;
+ ipa3_ctx->nat_mem.ipv4_rules_addr = 0;
+ ipa3_ctx->nat_mem.ipv4_expansion_rules_addr = 0;
+ ipa3_ctx->nat_mem.index_table_addr = 0;
+ ipa3_ctx->nat_mem.index_table_expansion_addr = 0;
+
+ ipa3_nat_free_mem_and_device(&ipa3_ctx->nat_mem);
+ IPADBG("return\n");
+ result = 0;
+
+destroy_imm_cmd:
+ ipahal_destroy_imm_cmd(cmd_pyld);
+destroy_regwrt_imm_cmd:
+ ipahal_destroy_imm_cmd(nop_cmd_pyld);
+bail:
+ return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
new file mode 100644
index 0000000..719eb2d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -0,0 +1,1268 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/qmi_encdec.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <linux/ipa.h>
+#include <linux/vmalloc.h>
+
+#include "ipa_qmi_service.h"
+
+#define IPA_Q6_SVC_VERS 1
+#define IPA_A5_SVC_VERS 1
+#define Q6_QMI_COMPLETION_TIMEOUT (60*HZ)
+
+#define IPA_A5_SERVICE_SVC_ID 0x31
+#define IPA_A5_SERVICE_INS_ID 1
+#define IPA_Q6_SERVICE_SVC_ID 0x31
+#define IPA_Q6_SERVICE_INS_ID 2
+
+#define QMI_SEND_STATS_REQ_TIMEOUT_MS 5000
+#define QMI_SEND_REQ_TIMEOUT_MS 60000
+
+static struct qmi_handle *ipa3_svc_handle;
+static void ipa3_a5_svc_recv_msg(struct work_struct *work);
+static DECLARE_DELAYED_WORK(work_recv_msg, ipa3_a5_svc_recv_msg);
+static struct workqueue_struct *ipa_svc_workqueue;
+static struct workqueue_struct *ipa_clnt_req_workqueue;
+static struct workqueue_struct *ipa_clnt_resp_workqueue;
+static void *curr_conn;
+static bool ipa3_qmi_modem_init_fin, ipa3_qmi_indication_fin;
+static struct work_struct ipa3_qmi_service_init_work;
+static uint32_t ipa_wan_platform;
+struct ipa3_qmi_context *ipa3_qmi_ctx;
+static bool workqueues_stopped;
+static bool ipa3_modem_init_cmplt;
+static bool first_time_handshake;
+/* QMI A5 service */
+
+static struct msg_desc ipa3_indication_reg_req_desc = {
+ .max_msg_len = QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01,
+ .msg_id = QMI_IPA_INDICATION_REGISTER_REQ_V01,
+ .ei_array = ipa3_indication_reg_req_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_indication_reg_resp_desc = {
+ .max_msg_len = QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01,
+ .msg_id = QMI_IPA_INDICATION_REGISTER_RESP_V01,
+ .ei_array = ipa3_indication_reg_resp_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_master_driver_complete_indication_desc = {
+ .max_msg_len = QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01,
+ .msg_id = QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01,
+ .ei_array = ipa3_master_driver_init_complt_ind_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_install_fltr_rule_req_desc = {
+ .max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01,
+ .msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01,
+ .ei_array = ipa3_install_fltr_rule_req_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_install_fltr_rule_resp_desc = {
+ .max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01,
+ .msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01,
+ .ei_array = ipa3_install_fltr_rule_resp_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_filter_installed_notif_req_desc = {
+ .max_msg_len = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01,
+ .msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01,
+ .ei_array = ipa3_fltr_installed_notif_req_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_filter_installed_notif_resp_desc = {
+ .max_msg_len = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01,
+ .msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01,
+ .ei_array = ipa3_fltr_installed_notif_resp_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_config_req_desc = {
+ .max_msg_len = QMI_IPA_CONFIG_REQ_MAX_MSG_LEN_V01,
+ .msg_id = QMI_IPA_CONFIG_REQ_V01,
+ .ei_array = ipa3_config_req_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_config_resp_desc = {
+ .max_msg_len = QMI_IPA_CONFIG_RESP_MAX_MSG_LEN_V01,
+ .msg_id = QMI_IPA_CONFIG_RESP_V01,
+ .ei_array = ipa3_config_resp_msg_data_v01_ei,
+};
+
+static struct msg_desc ipa3_init_modem_driver_cmplt_req_desc = {
+ .max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_MAX_MSG_LEN_V01,
+ .msg_id = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01,
+ .ei_array = ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei,
+};
+
+static struct msg_desc ipa3_init_modem_driver_cmplt_resp_desc = {
+ .max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_MAX_MSG_LEN_V01,
+ .msg_id = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01,
+ .ei_array = ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei,
+};
+
+static int ipa3_handle_indication_req(void *req_h, void *req)
+{
+ struct ipa_indication_reg_req_msg_v01 *indication_req;
+ struct ipa_indication_reg_resp_msg_v01 resp;
+ struct ipa_master_driver_init_complt_ind_msg_v01 ind;
+ int rc;
+
+ indication_req = (struct ipa_indication_reg_req_msg_v01 *)req;
+ IPAWANDBG("Received INDICATION Request\n");
+
+ memset(&resp, 0, sizeof(struct ipa_indication_reg_resp_msg_v01));
+ resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+ rc = qmi_send_resp_from_cb(ipa3_svc_handle, curr_conn, req_h,
+ &ipa3_indication_reg_resp_desc, &resp, sizeof(resp));
+ ipa3_qmi_indication_fin = true;
+ /* check if need sending indication to modem */
+ if (ipa3_qmi_modem_init_fin) {
+ IPAWANDBG("send indication to modem (%d)\n",
+ ipa3_qmi_modem_init_fin);
+ memset(&ind, 0, sizeof(struct
+ ipa_master_driver_init_complt_ind_msg_v01));
+ ind.master_driver_init_status.result =
+ IPA_QMI_RESULT_SUCCESS_V01;
+ rc = qmi_send_ind_from_cb(ipa3_svc_handle, curr_conn,
+ &ipa3_master_driver_complete_indication_desc,
+ &ind,
+ sizeof(ind));
+ } else {
+ IPAWANERR("not send indication\n");
+ }
+ return rc;
+}
+
+
+static int ipa3_handle_install_filter_rule_req(void *req_h, void *req)
+{
+ struct ipa_install_fltr_rule_req_msg_v01 *rule_req;
+ struct ipa_install_fltr_rule_resp_msg_v01 resp;
+ uint32_t rule_hdl[MAX_NUM_Q6_RULE];
+ int rc = 0, i;
+
+ rule_req = (struct ipa_install_fltr_rule_req_msg_v01 *)req;
+ memset(rule_hdl, 0, sizeof(rule_hdl));
+ memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_msg_v01));
+ IPAWANDBG("Received install filter Request\n");
+
+ rc = ipa3_copy_ul_filter_rule_to_ipa((struct
+ ipa_install_fltr_rule_req_msg_v01*)req);
+ if (rc)
+ IPAWANERR("copy UL rules from modem is failed\n");
+
+ resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+ if (rule_req->filter_spec_ex_list_valid == true) {
+ resp.rule_id_valid = 1;
+ if (rule_req->filter_spec_ex_list_len > MAX_NUM_Q6_RULE) {
+ resp.rule_id_len = MAX_NUM_Q6_RULE;
+ IPAWANERR("installed (%d) max Q6-UL rules ",
+ MAX_NUM_Q6_RULE);
+ IPAWANERR("but modem gives total (%u)\n",
+ rule_req->filter_spec_ex_list_len);
+ } else {
+ resp.rule_id_len =
+ rule_req->filter_spec_ex_list_len;
+ }
+ } else {
+ resp.rule_id_valid = 0;
+ resp.rule_id_len = 0;
+ }
+
+ /* construct UL filter rules response to Modem*/
+ for (i = 0; i < resp.rule_id_len; i++) {
+ resp.rule_id[i] =
+ rule_req->filter_spec_ex_list[i].rule_id;
+ }
+
+ rc = qmi_send_resp_from_cb(ipa3_svc_handle, curr_conn, req_h,
+ &ipa3_install_fltr_rule_resp_desc, &resp, sizeof(resp));
+
+ IPAWANDBG("Replied to install filter request\n");
+ return rc;
+}
+
+static int ipa3_handle_filter_installed_notify_req(void *req_h, void *req)
+{
+ struct ipa_fltr_installed_notif_resp_msg_v01 resp;
+ int rc = 0;
+
+ memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+ IPAWANDBG("Received filter_install_notify Request\n");
+ resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+
+ rc = qmi_send_resp_from_cb(ipa3_svc_handle, curr_conn, req_h,
+ &ipa3_filter_installed_notif_resp_desc,
+ &resp, sizeof(resp));
+
+ IPAWANDBG("Responsed filter_install_notify Request\n");
+ return rc;
+}
+
+static int handle_ipa_config_req(void *req_h, void *req)
+{
+ struct ipa_config_resp_msg_v01 resp;
+ int rc;
+
+ memset(&resp, 0, sizeof(struct ipa_config_resp_msg_v01));
+ resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+ IPAWANDBG("Received IPA CONFIG Request\n");
+ rc = ipa_mhi_handle_ipa_config_req(
+ (struct ipa_config_req_msg_v01 *)req);
+ if (rc) {
+ IPAERR("ipa3_mhi_handle_ipa_config_req failed %d\n", rc);
+ resp.resp.result = IPA_QMI_RESULT_FAILURE_V01;
+ }
+ rc = qmi_send_resp_from_cb(ipa3_svc_handle, curr_conn, req_h,
+ &ipa3_config_resp_desc,
+ &resp, sizeof(resp));
+ IPAWANDBG("Responsed IPA CONFIG Request\n");
+ return rc;
+}
+
+static int ipa3_handle_modem_init_cmplt_req(void *req_h, void *req)
+{
+ struct ipa_init_modem_driver_cmplt_req_msg_v01 *cmplt_req;
+ struct ipa_init_modem_driver_cmplt_resp_msg_v01 resp;
+ int rc;
+
+ IPAWANDBG("Received QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01\n");
+ cmplt_req = (struct ipa_init_modem_driver_cmplt_req_msg_v01 *)req;
+
+ if (ipa3_modem_init_cmplt == false) {
+ ipa3_modem_init_cmplt = true;
+ if (ipa3_qmi_modem_init_fin == true) {
+ IPAWANDBG("load uc related registers (%d)\n",
+ ipa3_qmi_modem_init_fin);
+ ipa3_uc_load_notify();
+ }
+ }
+
+ memset(&resp, 0, sizeof(resp));
+ resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+
+ rc = qmi_send_resp_from_cb(ipa3_svc_handle, curr_conn, req_h,
+ &ipa3_init_modem_driver_cmplt_resp_desc,
+ &resp, sizeof(resp));
+
+ IPAWANDBG("Sent QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01\n");
+ return rc;
+}
+
+static int ipa3_a5_svc_connect_cb(struct qmi_handle *handle,
+ void *conn_h)
+{
+ if (ipa3_svc_handle != handle || !conn_h)
+ return -EINVAL;
+
+ if (curr_conn) {
+ IPAWANERR("Service is busy\n");
+ return -ECONNREFUSED;
+ }
+ curr_conn = conn_h;
+ return 0;
+}
+
+static int ipa3_a5_svc_disconnect_cb(struct qmi_handle *handle,
+ void *conn_h)
+{
+ if (ipa3_svc_handle != handle || curr_conn != conn_h)
+ return -EINVAL;
+
+ curr_conn = NULL;
+ return 0;
+}
+
+static int ipa3_a5_svc_req_desc_cb(unsigned int msg_id,
+ struct msg_desc **req_desc)
+{
+ int rc;
+
+ switch (msg_id) {
+ case QMI_IPA_INDICATION_REGISTER_REQ_V01:
+ *req_desc = &ipa3_indication_reg_req_desc;
+ rc = sizeof(struct ipa_indication_reg_req_msg_v01);
+ break;
+
+ case QMI_IPA_INSTALL_FILTER_RULE_REQ_V01:
+ *req_desc = &ipa3_install_fltr_rule_req_desc;
+ rc = sizeof(struct ipa_install_fltr_rule_req_msg_v01);
+ break;
+ case QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01:
+ *req_desc = &ipa3_filter_installed_notif_req_desc;
+ rc = sizeof(struct ipa_fltr_installed_notif_req_msg_v01);
+ break;
+ case QMI_IPA_CONFIG_REQ_V01:
+ *req_desc = &ipa3_config_req_desc;
+ rc = sizeof(struct ipa_config_req_msg_v01);
+ break;
+ case QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01:
+ *req_desc = &ipa3_init_modem_driver_cmplt_req_desc;
+ rc = sizeof(struct ipa_init_modem_driver_cmplt_req_msg_v01);
+ break;
+ default:
+ rc = -ENOTSUPP;
+ break;
+ }
+ return rc;
+}
+
+static int ipa3_a5_svc_req_cb(struct qmi_handle *handle, void *conn_h,
+ void *req_h, unsigned int msg_id, void *req)
+{
+ int rc;
+
+ if (ipa3_svc_handle != handle || curr_conn != conn_h)
+ return -EINVAL;
+
+ switch (msg_id) {
+ case QMI_IPA_INDICATION_REGISTER_REQ_V01:
+ rc = ipa3_handle_indication_req(req_h, req);
+ break;
+ case QMI_IPA_INSTALL_FILTER_RULE_REQ_V01:
+ rc = ipa3_handle_install_filter_rule_req(req_h, req);
+ rc = ipa3_wwan_update_mux_channel_prop();
+ break;
+ case QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01:
+ rc = ipa3_handle_filter_installed_notify_req(req_h, req);
+ break;
+ case QMI_IPA_CONFIG_REQ_V01:
+ rc = handle_ipa_config_req(req_h, req);
+ break;
+ case QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01:
+ rc = ipa3_handle_modem_init_cmplt_req(req_h, req);
+ break;
+ default:
+ rc = -ENOTSUPP;
+ break;
+ }
+ return rc;
+}
+
+static void ipa3_a5_svc_recv_msg(struct work_struct *work)
+{
+ int rc;
+
+ do {
+ IPAWANDBG_LOW("Notified about a Receive Event");
+ rc = qmi_recv_msg(ipa3_svc_handle);
+ } while (rc == 0);
+ if (rc != -ENOMSG)
+ IPAWANERR("Error receiving message\n");
+}
+
+static void qmi_ipa_a5_svc_ntfy(struct qmi_handle *handle,
+ enum qmi_event_type event, void *priv)
+{
+ switch (event) {
+ case QMI_RECV_MSG:
+ if (!workqueues_stopped)
+ queue_delayed_work(ipa_svc_workqueue,
+ &work_recv_msg, 0);
+ break;
+ default:
+ break;
+ }
+}
+
+static struct qmi_svc_ops_options ipa3_a5_svc_ops_options = {
+ .version = 1,
+ .service_id = IPA_A5_SERVICE_SVC_ID,
+ .service_vers = IPA_A5_SVC_VERS,
+ .service_ins = IPA_A5_SERVICE_INS_ID,
+ .connect_cb = ipa3_a5_svc_connect_cb,
+ .disconnect_cb = ipa3_a5_svc_disconnect_cb,
+ .req_desc_cb = ipa3_a5_svc_req_desc_cb,
+ .req_cb = ipa3_a5_svc_req_cb,
+};
+
+
+/****************************************************/
+/* QMI A5 client ->Q6 */
+/****************************************************/
+static void ipa3_q6_clnt_recv_msg(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_work_recv_msg_client, ipa3_q6_clnt_recv_msg);
+static void ipa3_q6_clnt_svc_arrive(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_work_svc_arrive, ipa3_q6_clnt_svc_arrive);
+static void ipa3_q6_clnt_svc_exit(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_work_svc_exit, ipa3_q6_clnt_svc_exit);
+/* Test client port for IPC Router */
+static struct qmi_handle *ipa_q6_clnt;
+static int ipa_q6_clnt_reset;
+
+static int ipa3_check_qmi_response(int rc,
+ int req_id,
+ enum ipa_qmi_result_type_v01 result,
+ enum ipa_qmi_error_type_v01 error,
+ char *resp_type)
+{
+ if (rc < 0) {
+ if (rc == -ETIMEDOUT && ipa3_rmnet_ctx.ipa_rmnet_ssr) {
+ IPAWANERR(
+ "Timeout for qmi request id %d\n", req_id);
+ return rc;
+ }
+ if ((rc == -ENETRESET) || (rc == -ENODEV)) {
+ IPAWANERR(
+ "SSR while waiting for qmi request id %d\n", req_id);
+ return rc;
+ }
+ IPAWANERR("Error sending qmi request id %d, rc = %d\n",
+ req_id, rc);
+ return rc;
+ }
+ if (result != IPA_QMI_RESULT_SUCCESS_V01 &&
+ ipa3_rmnet_ctx.ipa_rmnet_ssr) {
+ IPAWANERR(
+ "Got bad response %d from request id %d (error %d)\n",
+ req_id, result, error);
+ return result;
+ }
+ IPAWANDBG_LOW("Received %s successfully\n", resp_type);
+ return 0;
+}
+
+static int ipa3_qmi_init_modem_send_sync_msg(void)
+{
+ struct ipa_init_modem_driver_req_msg_v01 req;
+ struct ipa_init_modem_driver_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+ u16 smem_restr_bytes = ipa3_get_smem_restr_bytes();
+
+ memset(&req, 0, sizeof(struct ipa_init_modem_driver_req_msg_v01));
+ memset(&resp, 0, sizeof(struct ipa_init_modem_driver_resp_msg_v01));
+
+ req.platform_type_valid = true;
+ req.platform_type = ipa_wan_platform;
+
+ req.hdr_tbl_info_valid = (IPA_MEM_PART(modem_hdr_size) != 0);
+ req.hdr_tbl_info.modem_offset_start =
+ IPA_MEM_PART(modem_hdr_ofst) + smem_restr_bytes;
+ req.hdr_tbl_info.modem_offset_end = IPA_MEM_PART(modem_hdr_ofst) +
+ smem_restr_bytes + IPA_MEM_PART(modem_hdr_size) - 1;
+
+ req.v4_route_tbl_info_valid = true;
+ req.v4_route_tbl_info.route_tbl_start_addr =
+ IPA_MEM_PART(v4_rt_nhash_ofst) + smem_restr_bytes;
+ req.v4_route_tbl_info.num_indices =
+ IPA_MEM_PART(v4_modem_rt_index_hi);
+ req.v6_route_tbl_info_valid = true;
+
+ req.v6_route_tbl_info.route_tbl_start_addr =
+ IPA_MEM_PART(v6_rt_nhash_ofst) + smem_restr_bytes;
+ req.v6_route_tbl_info.num_indices =
+ IPA_MEM_PART(v6_modem_rt_index_hi);
+
+ req.v4_filter_tbl_start_addr_valid = true;
+ req.v4_filter_tbl_start_addr =
+ IPA_MEM_PART(v4_flt_nhash_ofst) + smem_restr_bytes;
+
+ req.v6_filter_tbl_start_addr_valid = true;
+ req.v6_filter_tbl_start_addr =
+ IPA_MEM_PART(v6_flt_nhash_ofst) + smem_restr_bytes;
+
+ req.modem_mem_info_valid = (IPA_MEM_PART(modem_size) != 0);
+ req.modem_mem_info.block_start_addr =
+ IPA_MEM_PART(modem_ofst) + smem_restr_bytes;
+ req.modem_mem_info.size = IPA_MEM_PART(modem_size);
+
+ req.ctrl_comm_dest_end_pt_valid = true;
+ req.ctrl_comm_dest_end_pt =
+ ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+
+ req.hdr_proc_ctx_tbl_info_valid =
+ (IPA_MEM_PART(modem_hdr_proc_ctx_size) != 0);
+ req.hdr_proc_ctx_tbl_info.modem_offset_start =
+ IPA_MEM_PART(modem_hdr_proc_ctx_ofst) + smem_restr_bytes;
+ req.hdr_proc_ctx_tbl_info.modem_offset_end =
+ IPA_MEM_PART(modem_hdr_proc_ctx_ofst) +
+ IPA_MEM_PART(modem_hdr_proc_ctx_size) + smem_restr_bytes - 1;
+
+ req.zip_tbl_info_valid = (IPA_MEM_PART(modem_comp_decomp_size) != 0);
+ req.zip_tbl_info.modem_offset_start =
+ IPA_MEM_PART(modem_comp_decomp_size) + smem_restr_bytes;
+ req.zip_tbl_info.modem_offset_end =
+ IPA_MEM_PART(modem_comp_decomp_ofst) +
+ IPA_MEM_PART(modem_comp_decomp_size) + smem_restr_bytes - 1;
+
+ req.v4_hash_route_tbl_info_valid = true;
+ req.v4_hash_route_tbl_info.route_tbl_start_addr =
+ IPA_MEM_PART(v4_rt_hash_ofst) + smem_restr_bytes;
+ req.v4_hash_route_tbl_info.num_indices =
+ IPA_MEM_PART(v4_modem_rt_index_hi);
+
+ req.v6_hash_route_tbl_info_valid = true;
+ req.v6_hash_route_tbl_info.route_tbl_start_addr =
+ IPA_MEM_PART(v6_rt_hash_ofst) + smem_restr_bytes;
+ req.v6_hash_route_tbl_info.num_indices =
+ IPA_MEM_PART(v6_modem_rt_index_hi);
+
+ req.v4_hash_filter_tbl_start_addr_valid = true;
+ req.v4_hash_filter_tbl_start_addr =
+ IPA_MEM_PART(v4_flt_hash_ofst) + smem_restr_bytes;
+
+ req.v6_hash_filter_tbl_start_addr_valid = true;
+ req.v6_hash_filter_tbl_start_addr =
+ IPA_MEM_PART(v6_flt_hash_ofst) + smem_restr_bytes;
+
+ if (!ipa3_uc_loaded_check()) { /* First time boot */
+ req.is_ssr_bootup_valid = false;
+ req.is_ssr_bootup = 0;
+ } else { /* After SSR boot */
+ req.is_ssr_bootup_valid = true;
+ req.is_ssr_bootup = 1;
+ }
+
+ IPAWANDBG("platform_type %d\n", req.platform_type);
+ IPAWANDBG("hdr_tbl_info.modem_offset_start %d\n",
+ req.hdr_tbl_info.modem_offset_start);
+ IPAWANDBG("hdr_tbl_info.modem_offset_end %d\n",
+ req.hdr_tbl_info.modem_offset_end);
+ IPAWANDBG("v4_route_tbl_info.route_tbl_start_addr %d\n",
+ req.v4_route_tbl_info.route_tbl_start_addr);
+ IPAWANDBG("v4_route_tbl_info.num_indices %d\n",
+ req.v4_route_tbl_info.num_indices);
+ IPAWANDBG("v6_route_tbl_info.route_tbl_start_addr %d\n",
+ req.v6_route_tbl_info.route_tbl_start_addr);
+ IPAWANDBG("v6_route_tbl_info.num_indices %d\n",
+ req.v6_route_tbl_info.num_indices);
+ IPAWANDBG("v4_filter_tbl_start_addr %d\n",
+ req.v4_filter_tbl_start_addr);
+ IPAWANDBG("v6_filter_tbl_start_addr %d\n",
+ req.v6_filter_tbl_start_addr);
+ IPAWANDBG("modem_mem_info.block_start_addr %d\n",
+ req.modem_mem_info.block_start_addr);
+ IPAWANDBG("modem_mem_info.size %d\n",
+ req.modem_mem_info.size);
+ IPAWANDBG("ctrl_comm_dest_end_pt %d\n",
+ req.ctrl_comm_dest_end_pt);
+ IPAWANDBG("is_ssr_bootup %d\n",
+ req.is_ssr_bootup);
+ IPAWANDBG("v4_hash_route_tbl_info.route_tbl_start_addr %d\n",
+ req.v4_hash_route_tbl_info.route_tbl_start_addr);
+ IPAWANDBG("v4_hash_route_tbl_info.num_indices %d\n",
+ req.v4_hash_route_tbl_info.num_indices);
+ IPAWANDBG("v6_hash_route_tbl_info.route_tbl_start_addr %d\n",
+ req.v6_hash_route_tbl_info.route_tbl_start_addr);
+ IPAWANDBG("v6_hash_route_tbl_info.num_indices %d\n",
+ req.v6_hash_route_tbl_info.num_indices);
+ IPAWANDBG("v4_hash_filter_tbl_start_addr %d\n",
+ req.v4_hash_filter_tbl_start_addr);
+ IPAWANDBG("v6_hash_filter_tbl_start_addr %d\n",
+ req.v6_hash_filter_tbl_start_addr);
+
+ req_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_REQ_V01;
+ req_desc.ei_array = ipa3_init_modem_driver_req_msg_data_v01_ei;
+
+ resp_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_RESP_V01;
+ resp_desc.ei_array = ipa3_init_modem_driver_resp_msg_data_v01_ei;
+
+ pr_info("Sending QMI_IPA_INIT_MODEM_DRIVER_REQ_V01\n");
+ rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
+ &resp_desc, &resp, sizeof(resp),
+ QMI_SEND_REQ_TIMEOUT_MS);
+ pr_info("QMI_IPA_INIT_MODEM_DRIVER_REQ_V01 response received\n");
+ return ipa3_check_qmi_response(rc,
+ QMI_IPA_INIT_MODEM_DRIVER_REQ_V01, resp.resp.result,
+ resp.resp.error, "ipa_init_modem_driver_resp_msg_v01");
+}
+
+/* sending filter-install-request to modem*/
+int ipa3_qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req)
+{
+ struct ipa_install_fltr_rule_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ /* check if the filter rules from IPACM is valid */
+ if (req->filter_spec_ex_list_len == 0) {
+ IPAWANDBG("IPACM pass zero rules to Q6\n");
+ } else {
+ IPAWANDBG("IPACM pass %u rules to Q6\n",
+ req->filter_spec_ex_list_len);
+ }
+
+ /* cache the qmi_filter_request */
+ memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_msg_cache[
+ ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg]),
+ req, sizeof(struct ipa_install_fltr_rule_req_msg_v01));
+ ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg++;
+ ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg %= 10;
+
+ req_desc.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01;
+ req_desc.ei_array = ipa3_install_fltr_rule_req_msg_data_v01_ei;
+
+ memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_msg_v01));
+ resp_desc.max_msg_len =
+ QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01;
+ resp_desc.ei_array = ipa3_install_fltr_rule_resp_msg_data_v01_ei;
+
+ rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc,
+ req,
+ sizeof(struct ipa_install_fltr_rule_req_msg_v01),
+ &resp_desc, &resp, sizeof(resp),
+ QMI_SEND_REQ_TIMEOUT_MS);
+ return ipa3_check_qmi_response(rc,
+ QMI_IPA_INSTALL_FILTER_RULE_REQ_V01, resp.resp.result,
+ resp.resp.error, "ipa_install_filter");
+}
+
+
+int ipa3_qmi_enable_force_clear_datapath_send(
+ struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
+{
+ struct ipa_enable_force_clear_datapath_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int rc = 0;
+
+
+ if (!req || !req->source_pipe_bitmask) {
+ IPAWANERR("invalid params\n");
+ return -EINVAL;
+ }
+
+ req_desc.max_msg_len =
+ QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_V01;
+ req_desc.ei_array =
+ ipa3_enable_force_clear_datapath_req_msg_data_v01_ei;
+
+ memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+ resp_desc.max_msg_len =
+ QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
+ resp_desc.ei_array =
+ ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei;
+
+ rc = qmi_send_req_wait(ipa_q6_clnt,
+ &req_desc,
+ req,
+ sizeof(*req),
+ &resp_desc, &resp, sizeof(resp), 0);
+ if (rc < 0) {
+ IPAWANERR("send req failed %d\n", rc);
+ return rc;
+ }
+ if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) {
+ IPAWANERR("filter_notify failed %d\n",
+ resp.resp.result);
+ return resp.resp.result;
+ }
+ IPAWANDBG("SUCCESS\n");
+ return rc;
+}
+
+int ipa3_qmi_disable_force_clear_datapath_send(
+ struct ipa_disable_force_clear_datapath_req_msg_v01 *req)
+{
+ struct ipa_disable_force_clear_datapath_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int rc = 0;
+
+
+ if (!req) {
+ IPAWANERR("invalid params\n");
+ return -EINVAL;
+ }
+
+ req_desc.max_msg_len =
+ QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_V01;
+ req_desc.ei_array =
+ ipa3_disable_force_clear_datapath_req_msg_data_v01_ei;
+
+ memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+ resp_desc.max_msg_len =
+ QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
+ resp_desc.ei_array =
+ ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei;
+
+ rc = qmi_send_req_wait(ipa_q6_clnt,
+ &req_desc,
+ req,
+ sizeof(*req),
+ &resp_desc, &resp, sizeof(resp), 0);
+ if (rc < 0) {
+ IPAWANERR("send req failed %d\n", rc);
+ return rc;
+ }
+ if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) {
+ IPAWANERR("filter_notify failed %d\n",
+ resp.resp.result);
+ return resp.resp.result;
+ }
+ IPAWANDBG("SUCCESS\n");
+ return rc;
+}
+
+/* sending filter-installed-notify-request to modem*/
+int ipa3_qmi_filter_notify_send(
+ struct ipa_fltr_installed_notif_req_msg_v01 *req)
+{
+ struct ipa_fltr_installed_notif_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int rc = 0;
+
+ /* check if the filter rules from IPACM is valid */
+ if (req->rule_id_len == 0) {
+ IPAWANERR(" delete UL filter rule for pipe %d\n",
+ req->source_pipe_index);
+ return -EINVAL;
+ } else if (req->rule_id_len > QMI_IPA_MAX_FILTERS_V01) {
+ IPAWANERR(" UL filter rule for pipe %d exceed max (%u)\n",
+ req->source_pipe_index,
+ req->rule_id_len);
+ return -EINVAL;
+ }
+
+ /* cache the qmi_filter_request */
+ memcpy(&(ipa3_qmi_ctx->ipa_fltr_installed_notif_req_msg_cache[
+ ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg]),
+ req, sizeof(struct ipa_fltr_installed_notif_req_msg_v01));
+ ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg++;
+ ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg %= 10;
+
+ req_desc.max_msg_len =
+ QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01;
+ req_desc.ei_array = ipa3_fltr_installed_notif_req_msg_data_v01_ei;
+
+ memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+ resp_desc.max_msg_len =
+ QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01;
+ resp_desc.ei_array = ipa3_fltr_installed_notif_resp_msg_data_v01_ei;
+
+ rc = qmi_send_req_wait(ipa_q6_clnt,
+ &req_desc,
+ req,
+ sizeof(struct ipa_fltr_installed_notif_req_msg_v01),
+ &resp_desc, &resp, sizeof(resp),
+ QMI_SEND_REQ_TIMEOUT_MS);
+ return ipa3_check_qmi_response(rc,
+ QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, resp.resp.result,
+ resp.resp.error, "ipa_fltr_installed_notif_resp");
+}
+
+static void ipa3_q6_clnt_recv_msg(struct work_struct *work)
+{
+ int rc;
+
+ do {
+ IPAWANDBG_LOW("Notified about a Receive Event");
+ rc = qmi_recv_msg(ipa_q6_clnt);
+ } while (rc == 0);
+ if (rc != -ENOMSG)
+ IPAWANERR("Error receiving message\n");
+}
+
+static void ipa3_q6_clnt_notify(struct qmi_handle *handle,
+ enum qmi_event_type event, void *notify_priv)
+{
+ switch (event) {
+ case QMI_RECV_MSG:
+ IPAWANDBG_LOW("client qmi recv message called");
+ if (!workqueues_stopped)
+ queue_delayed_work(ipa_clnt_resp_workqueue,
+ &ipa3_work_recv_msg_client, 0);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ipa3_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id,
+ void *msg, unsigned int msg_len,
+ void *ind_cb_priv)
+{
+ struct ipa_data_usage_quota_reached_ind_msg_v01 qmi_ind;
+ struct msg_desc qmi_ind_desc;
+ int rc = 0;
+
+ if (handle != ipa_q6_clnt) {
+ IPAWANERR("Wrong client\n");
+ return;
+ }
+
+ if (msg_id == QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01) {
+ memset(&qmi_ind, 0, sizeof(
+ struct ipa_data_usage_quota_reached_ind_msg_v01));
+ qmi_ind_desc.max_msg_len =
+ QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01;
+ qmi_ind_desc.msg_id = QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01;
+ qmi_ind_desc.ei_array =
+ ipa3_data_usage_quota_reached_ind_msg_data_v01_ei;
+
+ rc = qmi_kernel_decode(&qmi_ind_desc, &qmi_ind, msg, msg_len);
+ if (rc < 0) {
+ IPAWANERR("Error decoding msg_id %d\n", msg_id);
+ return;
+ }
+ IPAWANDBG("Quota reached indication on qmux(%d) Mbytes(%lu)\n",
+ qmi_ind.apn.mux_id,
+ (unsigned long int) qmi_ind.apn.num_Mbytes);
+ ipa3_broadcast_quota_reach_ind(qmi_ind.apn.mux_id);
+ }
+}
+
+static void ipa3_q6_clnt_svc_arrive(struct work_struct *work)
+{
+ int rc;
+ struct ipa_master_driver_init_complt_ind_msg_v01 ind;
+
+ /* Create a Local client port for QMI communication */
+ ipa_q6_clnt = qmi_handle_create(ipa3_q6_clnt_notify, NULL);
+ if (!ipa_q6_clnt) {
+ IPAWANERR("QMI client handle alloc failed\n");
+ return;
+ }
+
+ IPAWANDBG("Lookup server name, get client-hdl(%p)\n",
+ ipa_q6_clnt);
+ rc = qmi_connect_to_service(ipa_q6_clnt,
+ IPA_Q6_SERVICE_SVC_ID,
+ IPA_Q6_SVC_VERS,
+ IPA_Q6_SERVICE_INS_ID);
+ if (rc < 0) {
+ IPAWANERR("Server not found\n");
+ qmi_handle_destroy(ipa_q6_clnt);
+ ipa_q6_clnt = NULL;
+ return;
+ }
+
+ rc = qmi_register_ind_cb(ipa_q6_clnt, ipa3_q6_clnt_ind_cb, NULL);
+ if (rc < 0)
+ IPAWANERR("Unable to register for indications\n");
+
+ ipa_q6_clnt_reset = 0;
+ IPAWANDBG("Q6 QMI service available now\n");
+ /* Initialize modem IPA-driver */
+ IPAWANDBG("send ipa3_qmi_init_modem_send_sync_msg to modem\n");
+ rc = ipa3_qmi_init_modem_send_sync_msg();
+ if ((rc == -ENETRESET) || (rc == -ENODEV)) {
+ IPAWANERR(
+ "ipa3_qmi_init_modem_send_sync_msg failed due to SSR!\n");
+ /* Cleanup will take place when ipa3_wwan_remove is called */
+ return;
+ }
+ if (rc != 0) {
+ IPAWANERR("ipa3_qmi_init_modem_send_sync_msg failed\n");
+ /*
+ * This is a very unexpected scenario, which requires a kernel
+ * panic in order to force dumps for QMI/Q6 side analysis.
+ */
+ BUG();
+ return;
+ }
+ ipa3_qmi_modem_init_fin = true;
+
+ /* got modem_init_cmplt_req already, load uc-related register */
+ if (ipa3_modem_init_cmplt == true) {
+ IPAWANDBG("load uc related registers (%d)\n",
+ ipa3_modem_init_cmplt);
+ ipa3_uc_load_notify();
+ }
+
+ /* In cold-bootup, first_time_handshake = false */
+ ipa3_q6_handshake_complete(first_time_handshake);
+ first_time_handshake = true;
+ IPAWANDBG("complete, ipa3_qmi_modem_init_fin : %d\n",
+ ipa3_qmi_modem_init_fin);
+
+ if (ipa3_qmi_indication_fin) {
+ IPAWANDBG("send indication to modem (%d)\n",
+ ipa3_qmi_indication_fin);
+ memset(&ind, 0, sizeof(struct
+ ipa_master_driver_init_complt_ind_msg_v01));
+ ind.master_driver_init_status.result =
+ IPA_QMI_RESULT_SUCCESS_V01;
+ rc = qmi_send_ind(ipa3_svc_handle, curr_conn,
+ &ipa3_master_driver_complete_indication_desc,
+ &ind,
+ sizeof(ind));
+ IPAWANDBG("ipa_qmi_service_client good\n");
+ } else {
+ IPAWANERR("not send indication (%d)\n",
+ ipa3_qmi_indication_fin);
+ }
+}
+
+
+static void ipa3_q6_clnt_svc_exit(struct work_struct *work)
+{
+ qmi_handle_destroy(ipa_q6_clnt);
+ ipa_q6_clnt_reset = 1;
+ ipa_q6_clnt = NULL;
+}
+
+
+static int ipa3_q6_clnt_svc_event_notify(struct notifier_block *this,
+ unsigned long code,
+ void *_cmd)
+{
+ IPAWANDBG("event %ld\n", code);
+ switch (code) {
+ case QMI_SERVER_ARRIVE:
+ if (!workqueues_stopped)
+ queue_delayed_work(ipa_clnt_req_workqueue,
+ &ipa3_work_svc_arrive, 0);
+ break;
+ case QMI_SERVER_EXIT:
+ if (!workqueues_stopped)
+ queue_delayed_work(ipa_clnt_req_workqueue,
+ &ipa3_work_svc_exit, 0);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+
+static struct notifier_block ipa3_q6_clnt_nb = {
+ .notifier_call = ipa3_q6_clnt_svc_event_notify,
+};
+
+static void ipa3_qmi_service_init_worker(struct work_struct *work)
+{
+ int rc;
+
+ /* Initialize QMI-service*/
+ IPAWANDBG("IPA A7 QMI init OK :>>>>\n");
+
+ /* start the QMI msg cache */
+ ipa3_qmi_ctx = vzalloc(sizeof(*ipa3_qmi_ctx));
+ if (!ipa3_qmi_ctx) {
+ IPAWANERR(":kzalloc err.\n");
+ return;
+ }
+ ipa3_qmi_ctx->modem_cfg_emb_pipe_flt =
+ ipa3_get_modem_cfg_emb_pipe_flt();
+
+ ipa_svc_workqueue = create_singlethread_workqueue("ipa_A7_svc");
+ if (!ipa_svc_workqueue) {
+ IPAWANERR("Creating ipa_A7_svc workqueue failed\n");
+ vfree(ipa3_qmi_ctx);
+ ipa3_qmi_ctx = NULL;
+ return;
+ }
+
+ ipa3_svc_handle = qmi_handle_create(qmi_ipa_a5_svc_ntfy, NULL);
+ if (!ipa3_svc_handle) {
+ IPAWANERR("Creating ipa_A7_svc qmi handle failed\n");
+ goto destroy_ipa_A7_svc_wq;
+ }
+
+ /*
+ * Setting the current connection to NULL, as due to a race between
+ * server and client clean-up in SSR, the disconnect_cb might not
+ * have necessarily been called
+ */
+ curr_conn = NULL;
+
+ rc = qmi_svc_register(ipa3_svc_handle, &ipa3_a5_svc_ops_options);
+ if (rc < 0) {
+ IPAWANERR("Registering ipa_a5 svc failed %d\n",
+ rc);
+ goto destroy_qmi_handle;
+ }
+
+ /* Initialize QMI-client */
+
+ ipa_clnt_req_workqueue = create_singlethread_workqueue("clnt_req");
+ if (!ipa_clnt_req_workqueue) {
+ IPAWANERR("Creating clnt_req workqueue failed\n");
+ goto deregister_qmi_srv;
+ }
+
+ ipa_clnt_resp_workqueue = create_singlethread_workqueue("clnt_resp");
+ if (!ipa_clnt_resp_workqueue) {
+ IPAWANERR("Creating clnt_resp workqueue failed\n");
+ goto destroy_clnt_req_wq;
+ }
+
+ rc = qmi_svc_event_notifier_register(IPA_Q6_SERVICE_SVC_ID,
+ IPA_Q6_SVC_VERS,
+ IPA_Q6_SERVICE_INS_ID, &ipa3_q6_clnt_nb);
+ if (rc < 0) {
+ IPAWANERR("notifier register failed\n");
+ goto destroy_clnt_resp_wq;
+ }
+
+ /* get Q6 service and start send modem-initial to Q6 */
+ IPAWANDBG("wait service available\n");
+ return;
+
+destroy_clnt_resp_wq:
+ destroy_workqueue(ipa_clnt_resp_workqueue);
+ ipa_clnt_resp_workqueue = NULL;
+destroy_clnt_req_wq:
+ destroy_workqueue(ipa_clnt_req_workqueue);
+ ipa_clnt_req_workqueue = NULL;
+deregister_qmi_srv:
+ qmi_svc_unregister(ipa3_svc_handle);
+destroy_qmi_handle:
+ qmi_handle_destroy(ipa3_svc_handle);
+ ipa3_svc_handle = 0;
+destroy_ipa_A7_svc_wq:
+ destroy_workqueue(ipa_svc_workqueue);
+ ipa_svc_workqueue = NULL;
+ vfree(ipa3_qmi_ctx);
+ ipa3_qmi_ctx = NULL;
+}
+
+int ipa3_qmi_service_init(uint32_t wan_platform_type)
+{
+ ipa_wan_platform = wan_platform_type;
+ ipa3_qmi_modem_init_fin = false;
+ ipa3_qmi_indication_fin = false;
+ ipa3_modem_init_cmplt = false;
+ workqueues_stopped = false;
+
+ if (!ipa3_svc_handle) {
+ INIT_WORK(&ipa3_qmi_service_init_work,
+ ipa3_qmi_service_init_worker);
+ schedule_work(&ipa3_qmi_service_init_work);
+ }
+ return 0;
+}
+
+void ipa3_qmi_service_exit(void)
+{
+ int ret = 0;
+
+ workqueues_stopped = true;
+
+ /* qmi-service */
+ if (ipa3_svc_handle) {
+ ret = qmi_svc_unregister(ipa3_svc_handle);
+ if (ret < 0)
+ IPAWANERR("unregister qmi handle %p failed, ret=%d\n",
+ ipa3_svc_handle, ret);
+ }
+ if (ipa_svc_workqueue) {
+ flush_workqueue(ipa_svc_workqueue);
+ destroy_workqueue(ipa_svc_workqueue);
+ ipa_svc_workqueue = NULL;
+ }
+
+ if (ipa3_svc_handle) {
+ ret = qmi_handle_destroy(ipa3_svc_handle);
+ if (ret < 0)
+ IPAWANERR("Error destroying qmi handle %p, ret=%d\n",
+ ipa3_svc_handle, ret);
+ }
+
+ /* qmi-client */
+
+ /* Unregister from events */
+ ret = qmi_svc_event_notifier_unregister(IPA_Q6_SERVICE_SVC_ID,
+ IPA_Q6_SVC_VERS,
+ IPA_Q6_SERVICE_INS_ID, &ipa3_q6_clnt_nb);
+ if (ret < 0)
+ IPAWANERR(
+ "Error qmi_svc_event_notifier_unregister service %d, ret=%d\n",
+ IPA_Q6_SERVICE_SVC_ID, ret);
+
+ /* Release client handle */
+ ipa3_q6_clnt_svc_exit(0);
+
+ if (ipa_clnt_req_workqueue) {
+ destroy_workqueue(ipa_clnt_req_workqueue);
+ ipa_clnt_req_workqueue = NULL;
+ }
+ if (ipa_clnt_resp_workqueue) {
+ destroy_workqueue(ipa_clnt_resp_workqueue);
+ ipa_clnt_resp_workqueue = NULL;
+ }
+
+ /* clean the QMI msg cache */
+ if (ipa3_qmi_ctx != NULL) {
+ vfree(ipa3_qmi_ctx);
+ ipa3_qmi_ctx = NULL;
+ }
+ ipa3_svc_handle = 0;
+ ipa3_qmi_modem_init_fin = false;
+ ipa3_qmi_indication_fin = false;
+ ipa3_modem_init_cmplt = false;
+}
+
+void ipa3_qmi_stop_workqueues(void)
+{
+ IPAWANDBG("Stopping all QMI workqueues\n");
+
+ /* Stopping all workqueues so new work won't be scheduled */
+ workqueues_stopped = true;
+
+ /* Making sure that the current scheduled work won't be executed */
+ cancel_delayed_work(&work_recv_msg);
+ cancel_delayed_work(&ipa3_work_recv_msg_client);
+ cancel_delayed_work(&ipa3_work_svc_arrive);
+ cancel_delayed_work(&ipa3_work_svc_exit);
+}
+
+
+/* voting for bus BW to ipa_rm*/
+int ipa3_vote_for_bus_bw(uint32_t *bw_mbps)
+{
+ struct ipa_rm_perf_profile profile;
+ int ret;
+
+ if (bw_mbps == NULL) {
+ IPAWANERR("Bus BW is invalid\n");
+ return -EINVAL;
+ }
+
+ memset(&profile, 0, sizeof(profile));
+ profile.max_supported_bandwidth_mbps = *bw_mbps;
+ ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
+ &profile);
+ if (ret)
+ IPAWANERR("Failed to set perf profile to BW %u\n",
+ profile.max_supported_bandwidth_mbps);
+ else
+ IPAWANDBG("Succeeded to set perf profile to BW %u\n",
+ profile.max_supported_bandwidth_mbps);
+
+ return ret;
+}
+
+int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
+ struct ipa_get_data_stats_resp_msg_v01 *resp)
+{
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ req_desc.max_msg_len = QMI_IPA_GET_DATA_STATS_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_GET_DATA_STATS_REQ_V01;
+ req_desc.ei_array = ipa3_get_data_stats_req_msg_data_v01_ei;
+
+ resp_desc.max_msg_len = QMI_IPA_GET_DATA_STATS_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_GET_DATA_STATS_RESP_V01;
+ resp_desc.ei_array = ipa3_get_data_stats_resp_msg_data_v01_ei;
+
+ IPAWANDBG_LOW("Sending QMI_IPA_GET_DATA_STATS_REQ_V01\n");
+
+ rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+ sizeof(struct ipa_get_data_stats_req_msg_v01),
+ &resp_desc, resp,
+ sizeof(struct ipa_get_data_stats_resp_msg_v01),
+ QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+ IPAWANDBG_LOW("QMI_IPA_GET_DATA_STATS_RESP_V01 received\n");
+
+ return ipa3_check_qmi_response(rc,
+ QMI_IPA_GET_DATA_STATS_REQ_V01, resp->resp.result,
+ resp->resp.error, "ipa_get_data_stats_resp_msg_v01");
+}
+
+int ipa3_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req,
+ struct ipa_get_apn_data_stats_resp_msg_v01 *resp)
+{
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ req_desc.max_msg_len = QMI_IPA_GET_APN_DATA_STATS_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_GET_APN_DATA_STATS_REQ_V01;
+ req_desc.ei_array = ipa3_get_apn_data_stats_req_msg_data_v01_ei;
+
+ resp_desc.max_msg_len = QMI_IPA_GET_APN_DATA_STATS_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_GET_APN_DATA_STATS_RESP_V01;
+ resp_desc.ei_array = ipa3_get_apn_data_stats_resp_msg_data_v01_ei;
+
+ IPAWANDBG_LOW("Sending QMI_IPA_GET_APN_DATA_STATS_REQ_V01\n");
+
+ rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+ sizeof(struct ipa_get_apn_data_stats_req_msg_v01),
+ &resp_desc, resp,
+ sizeof(struct ipa_get_apn_data_stats_resp_msg_v01),
+ QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+ IPAWANDBG_LOW("QMI_IPA_GET_APN_DATA_STATS_RESP_V01 received\n");
+
+ return ipa3_check_qmi_response(rc,
+ QMI_IPA_GET_APN_DATA_STATS_REQ_V01, resp->resp.result,
+ resp->resp.error, "ipa_get_apn_data_stats_req_msg_v01");
+}
+
+int ipa3_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req)
+{
+ struct ipa_set_data_usage_quota_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ memset(&resp, 0, sizeof(struct ipa_set_data_usage_quota_resp_msg_v01));
+
+ req_desc.max_msg_len = QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01;
+ req_desc.ei_array = ipa3_set_data_usage_quota_req_msg_data_v01_ei;
+
+ resp_desc.max_msg_len =
+ QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01;
+ resp_desc.ei_array = ipa3_set_data_usage_quota_resp_msg_data_v01_ei;
+
+ IPAWANDBG_LOW("Sending QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01\n");
+
+ rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+ sizeof(struct ipa_set_data_usage_quota_req_msg_v01),
+ &resp_desc, &resp, sizeof(resp),
+ QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+ IPAWANDBG_LOW("QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01 received\n");
+
+ return ipa3_check_qmi_response(rc,
+ QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result,
+ resp.resp.error, "ipa_set_data_usage_quota_req_msg_v01");
+}
+
+int ipa3_qmi_stop_data_qouta(void)
+{
+ struct ipa_stop_data_usage_quota_req_msg_v01 req;
+ struct ipa_stop_data_usage_quota_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ memset(&req, 0, sizeof(struct ipa_stop_data_usage_quota_req_msg_v01));
+ memset(&resp, 0, sizeof(struct ipa_stop_data_usage_quota_resp_msg_v01));
+
+ req_desc.max_msg_len =
+ QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01;
+ req_desc.ei_array = ipa3_stop_data_usage_quota_req_msg_data_v01_ei;
+
+ resp_desc.max_msg_len =
+ QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01;
+ resp_desc.ei_array = ipa3_stop_data_usage_quota_resp_msg_data_v01_ei;
+
+ IPAWANDBG_LOW("Sending QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01\n");
+
+ rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
+ &resp_desc, &resp, sizeof(resp),
+ QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+ IPAWANDBG_LOW("QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01 received\n");
+
+ return ipa3_check_qmi_response(rc,
+ QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result,
+ resp.resp.error, "ipa_stop_data_usage_quota_req_msg_v01");
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
new file mode 100644
index 0000000..0f64120
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -0,0 +1,303 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef IPA_QMI_SERVICE_H
+#define IPA_QMI_SERVICE_H
+
+#include <linux/ipa.h>
+#include <linux/ipa_qmi_service_v01.h>
+#include <uapi/linux/msm_rmnet.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include "ipa_i.h"
+#include <linux/rmnet_ipa_fd_ioctl.h>
+
+/**
+ * name of the DL wwan default routing tables for v4 and v6
+ */
+#define IPA_A7_QMAP_HDR_NAME "ipa_qmap_hdr"
+#define IPA_DFLT_WAN_RT_TBL_NAME "ipa_dflt_wan_rt"
+#define MAX_NUM_Q6_RULE 35
+#define MAX_NUM_QMI_RULE_CACHE 10
+#define DEV_NAME "ipa-wan"
+#define SUBSYS_MODEM "modem"
+
+#define IPAWANDBG(fmt, args...) \
+ do { \
+ pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ DEV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ DEV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+
+#define IPAWANDBG_LOW(fmt, args...) \
+ do { \
+ pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ DEV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPAWANERR(fmt, args...) \
+ do { \
+ pr_err(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ DEV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ DEV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPAWANINFO(fmt, args...) \
+ do { \
+ pr_info(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ DEV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ DEV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+extern struct ipa3_qmi_context *ipa3_qmi_ctx;
+
+struct ipa3_qmi_context {
+struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE];
+u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE];
+int num_ipa_install_fltr_rule_req_msg;
+struct ipa_install_fltr_rule_req_msg_v01
+ ipa_install_fltr_rule_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
+int num_ipa_fltr_installed_notif_req_msg;
+struct ipa_fltr_installed_notif_req_msg_v01
+ ipa_fltr_installed_notif_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
+bool modem_cfg_emb_pipe_flt;
+};
+
+struct ipa3_rmnet_mux_val {
+ uint32_t mux_id;
+ int8_t vchannel_name[IFNAMSIZ];
+ bool mux_channel_set;
+ bool ul_flt_reg;
+ bool mux_hdr_set;
+ uint32_t hdr_hdl;
+};
+
+extern struct elem_info ipa3_init_modem_driver_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_init_modem_driver_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_indication_reg_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_indication_reg_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_master_driver_init_complt_ind_msg_data_v01_ei[];
+extern struct elem_info ipa3_install_fltr_rule_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_install_fltr_rule_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_fltr_installed_notif_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_config_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_config_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_get_data_stats_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_get_data_stats_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_get_apn_data_stats_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_get_apn_data_stats_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_set_data_usage_quota_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_set_data_usage_quota_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[];
+extern struct elem_info ipa3_stop_data_usage_quota_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[];
+
+/**
+ * struct ipa3_rmnet_context - IPA rmnet context
+ * @ipa_rmnet_ssr: support modem SSR
+ * @polling_interval: Requested interval for polling tethered statistics
+ * @metered_mux_id: The mux ID on which quota has been set
+ */
+struct ipa3_rmnet_context {
+ bool ipa_rmnet_ssr;
+ u64 polling_interval;
+ u32 metered_mux_id;
+};
+
+extern struct ipa3_rmnet_context ipa3_rmnet_ctx;
+
+#ifdef CONFIG_RMNET_IPA3
+
+int ipa3_qmi_service_init(uint32_t wan_platform_type);
+
+void ipa3_qmi_service_exit(void);
+
+/* sending filter-install-request to modem*/
+int ipa3_qmi_filter_request_send(
+ struct ipa_install_fltr_rule_req_msg_v01 *req);
+
+/* sending filter-installed-notify-request to modem*/
+int ipa3_qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01
+ *req);
+
+/* voting for bus BW to ipa_rm*/
+int ipa3_vote_for_bus_bw(uint32_t *bw_mbps);
+
+int ipa3_qmi_enable_force_clear_datapath_send(
+ struct ipa_enable_force_clear_datapath_req_msg_v01 *req);
+
+int ipa3_qmi_disable_force_clear_datapath_send(
+ struct ipa_disable_force_clear_datapath_req_msg_v01 *req);
+
+int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
+ *rule_req);
+
+int ipa3_wwan_update_mux_channel_prop(void);
+
+int ipa3_wan_ioctl_init(void);
+
+void ipa3_wan_ioctl_stop_qmi_messages(void);
+
+void ipa3_wan_ioctl_enable_qmi_messages(void);
+
+void ipa3_wan_ioctl_deinit(void);
+
+void ipa3_qmi_stop_workqueues(void);
+
+int rmnet_ipa3_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats
+ *data);
+
+int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data);
+
+void ipa3_broadcast_quota_reach_ind(uint32_t mux_id);
+
+int rmnet_ipa3_set_tether_client_pipe(struct wan_ioctl_set_tether_client_pipe
+ *data);
+
+int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
+ bool reset);
+
+int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
+ struct ipa_get_data_stats_resp_msg_v01 *resp);
+
+int ipa3_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req,
+ struct ipa_get_apn_data_stats_resp_msg_v01 *resp);
+
+int ipa3_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req);
+
+int ipa3_qmi_stop_data_qouta(void);
+
+void ipa3_q6_handshake_complete(bool ssr_bootup);
+
+#else /* CONFIG_RMNET_IPA3 */
+
+static inline int ipa3_qmi_service_init(uint32_t wan_platform_type)
+{
+ return -EPERM;
+}
+
+static inline void ipa3_qmi_service_exit(void) { }
+
+/* sending filter-install-request to modem*/
+static inline int ipa3_qmi_filter_request_send(
+ struct ipa_install_fltr_rule_req_msg_v01 *req)
+{
+ return -EPERM;
+}
+
+/* sending filter-installed-notify-request to modem*/
+static inline int ipa3_qmi_filter_notify_send(
+ struct ipa_fltr_installed_notif_req_msg_v01 *req)
+{
+ return -EPERM;
+}
+
+static inline int ipa3_qmi_enable_force_clear_datapath_send(
+ struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
+{
+ return -EPERM;
+}
+
+static inline int ipa3_qmi_disable_force_clear_datapath_send(
+ struct ipa_disable_force_clear_datapath_req_msg_v01 *req)
+{
+ return -EPERM;
+}
+
+static inline int ipa3_copy_ul_filter_rule_to_ipa(
+ struct ipa_install_fltr_rule_req_msg_v01 *rule_req)
+{
+ return -EPERM;
+}
+
+static inline int ipa3_wwan_update_mux_channel_prop(void)
+{
+ return -EPERM;
+}
+
+static inline int ipa3_wan_ioctl_init(void)
+{
+ return -EPERM;
+}
+
+static inline void ipa3_wan_ioctl_stop_qmi_messages(void) { }
+
+static inline void ipa3_wan_ioctl_enable_qmi_messages(void) { }
+
+static inline void ipa3_wan_ioctl_deinit(void) { }
+
+static inline void ipa3_qmi_stop_workqueues(void) { }
+
+static inline int ipa3_vote_for_bus_bw(uint32_t *bw_mbps)
+{
+ return -EPERM;
+}
+
+static inline int rmnet_ipa3_poll_tethering_stats(
+ struct wan_ioctl_poll_tethering_stats *data)
+{
+ return -EPERM;
+}
+
+static inline int rmnet_ipa3_set_data_quota(
+ struct wan_ioctl_set_data_quota *data)
+{
+ return -EPERM;
+}
+
+static inline void ipa3_broadcast_quota_reach_ind(uint32_t mux_id) { }
+
+static inline int ipa3_qmi_get_data_stats(
+ struct ipa_get_data_stats_req_msg_v01 *req,
+ struct ipa_get_data_stats_resp_msg_v01 *resp)
+{
+ return -EPERM;
+}
+
+static inline int ipa3_qmi_get_network_stats(
+ struct ipa_get_apn_data_stats_req_msg_v01 *req,
+ struct ipa_get_apn_data_stats_resp_msg_v01 *resp)
+{
+ return -EPERM;
+}
+
+static inline int ipa3_qmi_set_data_quota(
+ struct ipa_set_data_usage_quota_req_msg_v01 *req)
+{
+ return -EPERM;
+}
+
+static inline int ipa3_qmi_stop_data_qouta(void)
+{
+ return -EPERM;
+}
+
+static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { }
+
+#endif /* CONFIG_RMNET_IPA3 */
+
+#endif /* IPA_QMI_SERVICE_H */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
new file mode 100644
index 0000000..6907811
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
@@ -0,0 +1,2746 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/qmi_encdec.h>
+#include <linux/ipa_qmi_service_v01.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+/* Type Definitions */
+static struct elem_info ipa3_hdr_tbl_info_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_hdr_tbl_info_type_v01,
+ modem_offset_start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_hdr_tbl_info_type_v01,
+ modem_offset_end),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa3_route_tbl_info_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_route_tbl_info_type_v01,
+ route_tbl_start_addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_route_tbl_info_type_v01,
+ num_indices),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa3_modem_mem_info_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_modem_mem_info_type_v01,
+ block_start_addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_modem_mem_info_type_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa3_hdr_proc_ctx_tbl_info_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_hdr_proc_ctx_tbl_info_type_v01,
+ modem_offset_start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_hdr_proc_ctx_tbl_info_type_v01,
+ modem_offset_end),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa3_zip_tbl_info_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_zip_tbl_info_type_v01,
+ modem_offset_start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_zip_tbl_info_type_v01,
+ modem_offset_end),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa3_ipfltr_range_eq_16_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ipfltr_range_eq_16_type_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ipfltr_range_eq_16_type_v01,
+ range_low),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ipfltr_range_eq_16_type_v01,
+ range_high),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa3_ipfltr_mask_eq_32_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ipfltr_mask_eq_32_type_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ipfltr_mask_eq_32_type_v01,
+ mask),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ipfltr_mask_eq_32_type_v01,
+ value),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa3_ipfltr_eq_16_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ipfltr_eq_16_type_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_ipfltr_eq_16_type_v01,
+ value),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa3_ipfltr_eq_32_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_ipfltr_eq_32_type_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_ipfltr_eq_32_type_v01,
+ value),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa3_ipfltr_mask_eq_128_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ipfltr_mask_eq_128_type_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 16,
+ .elem_size = sizeof(uint8_t),
+ .is_array = STATIC_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ipfltr_mask_eq_128_type_v01,
+ mask),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 16,
+ .elem_size = sizeof(uint8_t),
+ .is_array = STATIC_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ipfltr_mask_eq_128_type_v01,
+ value),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa3_filter_rule_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_filter_rule_type_v01,
+ rule_eq_bitmap),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_filter_rule_type_v01,
+ tos_eq_present),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ tos_eq),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ protocol_eq_present),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ protocol_eq),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ num_ihl_offset_range_16),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01,
+ .elem_size = sizeof(
+ struct ipa_ipfltr_range_eq_16_type_v01),
+ .is_array = STATIC_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ ihl_offset_range_16),
+ .ei_array = ipa3_ipfltr_range_eq_16_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ num_offset_meq_32),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01,
+ .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+ .is_array = STATIC_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ offset_meq_32),
+ .ei_array = ipa3_ipfltr_mask_eq_32_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ tc_eq_present),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ tc_eq),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ flow_eq_present),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ flow_eq),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ ihl_offset_eq_16_present),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_ipfltr_eq_16_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ ihl_offset_eq_16),
+ .ei_array = ipa3_ipfltr_eq_16_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ ihl_offset_eq_32_present),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_ipfltr_eq_32_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ ihl_offset_eq_32),
+ .ei_array = ipa3_ipfltr_eq_32_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ num_ihl_offset_meq_32),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01,
+ .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+ .is_array = STATIC_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ ihl_offset_meq_32),
+ .ei_array = ipa3_ipfltr_mask_eq_32_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ num_offset_meq_128),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len =
+ QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01,
+ .elem_size = sizeof(
+ struct ipa_ipfltr_mask_eq_128_type_v01),
+ .is_array = STATIC_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_filter_rule_type_v01,
+ offset_meq_128),
+ .ei_array = ipa3_ipfltr_mask_eq_128_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ metadata_meq32_present),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ metadata_meq32),
+ .ei_array = ipa3_ipfltr_mask_eq_32_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_rule_type_v01,
+ ipv4_frag_eq_present),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa_filter_spec_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_type_v01,
+ filter_spec_identifier),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_type_v01,
+ ip_type),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_filter_rule_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_type_v01,
+ filter_rule),
+ .ei_array = ipa3_filter_rule_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_type_v01,
+ filter_action),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_type_v01,
+ is_routing_table_index_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_type_v01,
+ route_table_index),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_type_v01,
+ is_mux_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_type_v01,
+ mux_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa_filter_spec_ex_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_ex_type_v01,
+ ip_type),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_filter_rule_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_ex_type_v01,
+ filter_rule),
+ .ei_array = ipa3_filter_rule_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_ex_type_v01,
+ filter_action),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_ex_type_v01,
+ is_routing_table_index_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_ex_type_v01,
+ route_table_index),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_ex_type_v01,
+ is_mux_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_ex_type_v01,
+ mux_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_ex_type_v01,
+ rule_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_filter_spec_ex_type_v01,
+ is_rule_hashable),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct
+elem_info ipa3_filter_rule_identifier_to_handle_map_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_filter_rule_identifier_to_handle_map_v01,
+ filter_spec_identifier),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_filter_rule_identifier_to_handle_map_v01,
+ filter_handle),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa3_filter_handle_to_index_map_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_filter_handle_to_index_map_v01,
+ filter_handle),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_filter_handle_to_index_map_v01,
+ filter_index),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_init_modem_driver_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ platform_type_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ platform_type),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ hdr_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_hdr_tbl_info_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ hdr_tbl_info),
+ .ei_array = ipa3_hdr_tbl_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v4_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_route_tbl_info_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v4_route_tbl_info),
+ .ei_array = ipa3_route_tbl_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v6_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_route_tbl_info_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v6_route_tbl_info),
+ .ei_array = ipa3_route_tbl_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v4_filter_tbl_start_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v4_filter_tbl_start_addr),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v6_filter_tbl_start_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v6_filter_tbl_start_addr),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ modem_mem_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_modem_mem_info_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ modem_mem_info),
+ .ei_array = ipa3_modem_mem_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ ctrl_comm_dest_end_pt_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ ctrl_comm_dest_end_pt),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ is_ssr_bootup_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ is_ssr_bootup),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ hdr_proc_ctx_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(
+ struct ipa_hdr_proc_ctx_tbl_info_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ hdr_proc_ctx_tbl_info),
+ .ei_array = ipa3_hdr_proc_ctx_tbl_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ zip_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_zip_tbl_info_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ zip_tbl_info),
+ .ei_array = ipa3_zip_tbl_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v4_hash_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_route_tbl_info_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v4_hash_route_tbl_info),
+ .ei_array = ipa3_route_tbl_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v6_hash_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_route_tbl_info_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v6_hash_route_tbl_info),
+ .ei_array = ipa3_route_tbl_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1D,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v4_hash_filter_tbl_start_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1D,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v4_hash_filter_tbl_start_addr),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1E,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v6_hash_filter_tbl_start_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1E,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_req_msg_v01,
+ v6_hash_filter_tbl_start_addr),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_init_modem_driver_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_resp_msg_v01,
+ ctrl_comm_dest_end_pt_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_resp_msg_v01,
+ ctrl_comm_dest_end_pt),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_resp_msg_v01,
+ default_end_pt_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_resp_msg_v01,
+ default_end_pt),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_resp_msg_v01,
+ modem_driver_init_pending_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_resp_msg_v01,
+ modem_driver_init_pending),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_cmplt_req_msg_v01,
+ status),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_init_modem_driver_cmplt_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_indication_reg_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_indication_reg_req_msg_v01,
+ master_driver_init_complete_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_indication_reg_req_msg_v01,
+ master_driver_init_complete),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_indication_reg_req_msg_v01,
+ data_usage_quota_reached_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_indication_reg_req_msg_v01,
+ data_usage_quota_reached),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_indication_reg_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_indication_reg_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_master_driver_init_complt_ind_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ ipa_master_driver_init_complt_ind_msg_v01,
+ master_driver_init_status),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_install_fltr_rule_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ filter_spec_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ filter_spec_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_FILTERS_V01,
+ .elem_size = sizeof(struct ipa_filter_spec_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ filter_spec_list),
+ .ei_array = ipa_filter_spec_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ source_pipe_index_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ source_pipe_index),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ num_ipv4_filters_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ num_ipv4_filters),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ num_ipv6_filters_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ num_ipv6_filters),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ xlat_filter_indices_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ xlat_filter_indices_list_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_IPA_MAX_FILTERS_V01,
+ .elem_size = sizeof(uint32_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ xlat_filter_indices_list),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ filter_spec_ex_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ filter_spec_ex_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_FILTERS_V01,
+ .elem_size = sizeof(struct ipa_filter_spec_ex_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_req_msg_v01,
+ filter_spec_ex_list),
+ .ei_array = ipa_filter_spec_ex_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_install_fltr_rule_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_resp_msg_v01,
+ filter_handle_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_resp_msg_v01,
+ filter_handle_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_FILTERS_V01,
+ .elem_size = sizeof(
+ struct ipa_filter_rule_identifier_to_handle_map_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_resp_msg_v01,
+ filter_handle_list),
+ .ei_array =
+ ipa3_filter_rule_identifier_to_handle_map_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_resp_msg_v01,
+ rule_id_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_resp_msg_v01,
+ rule_id_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_IPA_MAX_FILTERS_V01,
+ .elem_size = sizeof(uint32_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_install_fltr_rule_resp_msg_v01,
+ rule_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ source_pipe_index),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ install_status),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ filter_index_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_FILTERS_V01,
+ .elem_size = sizeof(
+ struct ipa_filter_handle_to_index_map_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ filter_index_list),
+ .ei_array = ipa3_filter_handle_to_index_map_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ embedded_pipe_index_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ embedded_pipe_index),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ retain_header_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ retain_header),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ embedded_call_mux_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ embedded_call_mux_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ num_ipv4_filters_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ num_ipv4_filters),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ num_ipv6_filters_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ num_ipv6_filters),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ start_ipv4_filter_idx_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ start_ipv4_filter_idx),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ start_ipv6_filter_idx_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ start_ipv6_filter_idx),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ rule_id_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ rule_id_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_IPA_MAX_FILTERS_V01,
+ .elem_size = sizeof(uint32_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ rule_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_fltr_installed_notif_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct ipa_enable_force_clear_datapath_req_msg_v01,
+ source_pipe_bitmask),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_enable_force_clear_datapath_req_msg_v01,
+ request_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_enable_force_clear_datapath_req_msg_v01,
+ throttle_source_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_enable_force_clear_datapath_req_msg_v01,
+ throttle_source),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_enable_force_clear_datapath_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct ipa_disable_force_clear_datapath_req_msg_v01,
+ request_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_disable_force_clear_datapath_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_config_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ peripheral_type_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ peripheral_type),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ hw_deaggr_supported_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ hw_deaggr_supported),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ max_aggr_frame_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ max_aggr_frame_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ ipa_ingress_pipe_mode_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ ipa_ingress_pipe_mode),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ peripheral_speed_info_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ peripheral_speed_info),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ dl_accumulation_time_limit_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ dl_accumulation_time_limit),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ dl_accumulation_pkt_limit_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ dl_accumulation_pkt_limit),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ dl_accumulation_byte_limit_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ dl_accumulation_byte_limit),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ ul_accumulation_time_limit_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ ul_accumulation_time_limit),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ hw_control_flags_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ hw_control_flags),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ ul_msi_event_threshold_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ ul_msi_event_threshold),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ dl_msi_event_threshold_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ dl_msi_event_threshold),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ ul_fifo_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ ul_fifo_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1D,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ dl_fifo_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1D,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ dl_fifo_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1E,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ dl_buf_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1E,
+ .offset = offsetof(
+ struct ipa_config_req_msg_v01,
+ dl_buf_size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_config_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_config_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_get_data_stats_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct ipa_get_data_stats_req_msg_v01,
+ ipa_stats_type),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_data_stats_req_msg_v01,
+ reset_stats_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_data_stats_req_msg_v01,
+ reset_stats),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa3_pipe_stats_info_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_pipe_stats_info_type_v01,
+ pipe_index),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_pipe_stats_info_type_v01,
+ num_ipv4_packets),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_pipe_stats_info_type_v01,
+ num_ipv4_bytes),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_pipe_stats_info_type_v01,
+ num_ipv6_packets),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_pipe_stats_info_type_v01,
+ num_ipv6_bytes),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa3_stats_type_filter_rule_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct
+ ipa_stats_type_filter_rule_v01,
+ filter_rule_index),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct
+ ipa_stats_type_filter_rule_v01,
+ num_packets),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_get_data_stats_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ ipa_stats_type_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ ipa_stats_type),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ ul_src_pipe_stats_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ ul_src_pipe_stats_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_PIPES_V01,
+ .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ ul_src_pipe_stats_list),
+ .ei_array = ipa3_pipe_stats_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ dl_dst_pipe_stats_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ dl_dst_pipe_stats_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_PIPES_V01,
+ .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ dl_dst_pipe_stats_list),
+ .ei_array = ipa3_pipe_stats_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ dl_filter_rule_stats_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ dl_filter_rule_stats_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_FILTERS_V01,
+ .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct ipa_get_data_stats_resp_msg_v01,
+ dl_filter_rule_stats_list),
+ .ei_array = ipa3_stats_type_filter_rule_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa3_apn_data_stats_info_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct
+ ipa_apn_data_stats_info_type_v01,
+ mux_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct
+ ipa_apn_data_stats_info_type_v01,
+ num_ul_packets),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct
+ ipa_apn_data_stats_info_type_v01,
+ num_ul_bytes),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct
+ ipa_apn_data_stats_info_type_v01,
+ num_dl_packets),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct
+ ipa_apn_data_stats_info_type_v01,
+ num_dl_bytes),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_get_apn_data_stats_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_apn_data_stats_req_msg_v01,
+ mux_id_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_apn_data_stats_req_msg_v01,
+ mux_id_list_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_IPA_MAX_APN_V01,
+ .elem_size = sizeof(uint32_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_apn_data_stats_req_msg_v01,
+ mux_id_list),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_get_apn_data_stats_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_get_apn_data_stats_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_apn_data_stats_resp_msg_v01,
+ apn_data_stats_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_apn_data_stats_resp_msg_v01,
+ apn_data_stats_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_APN_V01,
+ .elem_size = sizeof(struct
+ ipa_apn_data_stats_info_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_apn_data_stats_resp_msg_v01,
+ apn_data_stats_list),
+ .ei_array = ipa3_apn_data_stats_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info ipa3_data_usage_quota_info_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct
+ ipa_data_usage_quota_info_type_v01,
+ mux_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct
+ ipa_data_usage_quota_info_type_v01,
+ num_Mbytes),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_set_data_usage_quota_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_set_data_usage_quota_req_msg_v01,
+ apn_quota_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_set_data_usage_quota_req_msg_v01,
+ apn_quota_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_APN_V01,
+ .elem_size = sizeof(struct
+ ipa_data_usage_quota_info_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_set_data_usage_quota_req_msg_v01,
+ apn_quota_list),
+ .ei_array = ipa3_data_usage_quota_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_set_data_usage_quota_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_set_data_usage_quota_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct
+ ipa_data_usage_quota_info_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct ipa_data_usage_quota_reached_ind_msg_v01,
+ apn),
+ .ei_array = ipa3_data_usage_quota_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_stop_data_usage_quota_req_msg_data_v01_ei[] = {
+ /* ipa_stop_data_usage_quota_req_msg is empty */
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_stop_data_usage_quota_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
new file mode 100644
index 0000000..8930d92
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -0,0 +1,1792 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/idr.h>
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+
+#define IPA_RT_INDEX_BITMAP_SIZE (32)
+#define IPA_RT_STATUS_OF_ADD_FAILED (-1)
+#define IPA_RT_STATUS_OF_DEL_FAILED (-1)
+#define IPA_RT_STATUS_OF_MDFY_FAILED (-1)
+
+#define IPA_RT_GET_RULE_TYPE(__entry) \
+ ( \
+ ((__entry)->rule.hashable) ? \
+ (IPA_RULE_HASHABLE) : (IPA_RULE_NON_HASHABLE) \
+ )
+
+/**
+ * ipa_generate_rt_hw_rule() - Generated the RT H/W single rule
+ * This func will do the preparation core driver work and then calls
+ * the HAL layer for the real work.
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means
+ * caller wants to know the size of the rule as seen
+ * by HW so they did not pass a valid buffer, we will use a
+ * scratch buffer instead.
+ * With this scheme we are going to
+ * generate the rule twice, once to know size using scratch
+ * buffer and second to write the rule to the actual caller
+ * supplied buffer which is of required size
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ */
+static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip,
+ struct ipa3_rt_entry *entry, u8 *buf)
+{
+ struct ipahal_rt_rule_gen_params gen_params;
+ int res = 0;
+
+ memset(&gen_params, 0, sizeof(gen_params));
+
+ gen_params.ipt = ip;
+ gen_params.dst_pipe_idx = ipa3_get_ep_mapping(entry->rule.dst);
+ if (gen_params.dst_pipe_idx == -1) {
+ IPAERR("Wrong destination pipe specified in RT rule\n");
+ WARN_ON(1);
+ return -EPERM;
+ }
+ if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) {
+ IPAERR("No RT rule on IPA_client_producer pipe.\n");
+ IPAERR("pipe_idx: %d dst_pipe: %d\n",
+ gen_params.dst_pipe_idx, entry->rule.dst);
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ if (entry->proc_ctx || (entry->hdr && entry->hdr->is_hdr_proc_ctx)) {
+ struct ipa3_hdr_proc_ctx_entry *proc_ctx;
+
+ proc_ctx = (entry->proc_ctx) ? : entry->hdr->proc_ctx;
+ gen_params.hdr_lcl = ipa3_ctx->hdr_proc_ctx_tbl_lcl;
+ gen_params.hdr_type = IPAHAL_RT_RULE_HDR_PROC_CTX;
+ gen_params.hdr_ofst = proc_ctx->offset_entry->offset +
+ ipa3_ctx->hdr_proc_ctx_tbl.start_offset;
+ } else if (entry->hdr) {
+ gen_params.hdr_lcl = ipa3_ctx->hdr_tbl_lcl;
+ gen_params.hdr_type = IPAHAL_RT_RULE_HDR_RAW;
+ gen_params.hdr_ofst = entry->hdr->offset_entry->offset;
+ } else {
+ gen_params.hdr_type = IPAHAL_RT_RULE_HDR_NONE;
+ gen_params.hdr_ofst = 0;
+ }
+
+ gen_params.priority = entry->prio;
+ gen_params.id = entry->rule_id;
+ gen_params.rule = (const struct ipa_rt_rule *)&entry->rule;
+
+ res = ipahal_rt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
+ if (res)
+ IPAERR("failed to generate rt h/w rule\n");
+
+ return res;
+}
+
+/**
+ * ipa_translate_rt_tbl_to_hw_fmt() - translate the routing driver structures
+ * (rules and tables) to HW format and fill it in the given buffers
+ * @ip: the ip address family type
+ * @rlt: the type of the rules to translate (hashable or non-hashable)
+ * @base: the rules body buffer to be filled
+ * @hdr: the rules header (addresses/offsets) buffer to be filled
+ * @body_ofst: the offset of the rules body from the rules header at
+ * ipa sram (for local body usage)
+ * @apps_start_idx: the first rt table index of apps tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_translate_rt_tbl_to_hw_fmt(enum ipa_ip_type ip,
+ enum ipa_rule_type rlt, u8 *base, u8 *hdr,
+ u32 body_ofst, u32 apps_start_idx)
+{
+ struct ipa3_rt_tbl_set *set;
+ struct ipa3_rt_tbl *tbl;
+ struct ipa_mem_buffer tbl_mem;
+ u8 *tbl_mem_buf;
+ struct ipa3_rt_entry *entry;
+ int res;
+ u64 offset;
+ u8 *body_i;
+
+ set = &ipa3_ctx->rt_tbl_set[ip];
+ body_i = base;
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ if (tbl->sz[rlt] == 0)
+ continue;
+ if (tbl->in_sys[rlt]) {
+ /* only body (no header) */
+ tbl_mem.size = tbl->sz[rlt] -
+ ipahal_get_hw_tbl_hdr_width();
+ if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) {
+ IPAERR("fail to alloc sys tbl of size %d\n",
+ tbl_mem.size);
+ goto err;
+ }
+
+ if (ipahal_fltrt_write_addr_to_hdr(tbl_mem.phys_base,
+ hdr, tbl->idx - apps_start_idx, true)) {
+ IPAERR("fail to wrt sys tbl addr to hdr\n");
+ goto hdr_update_fail;
+ }
+
+ tbl_mem_buf = tbl_mem.base;
+
+ /* generate the rule-set */
+ list_for_each_entry(entry, &tbl->head_rt_rule_list,
+ link) {
+ if (IPA_RT_GET_RULE_TYPE(entry) != rlt)
+ continue;
+ res = ipa_generate_rt_hw_rule(ip, entry,
+ tbl_mem_buf);
+ if (res) {
+ IPAERR("failed to gen HW RT rule\n");
+ goto hdr_update_fail;
+ }
+ tbl_mem_buf += entry->hw_len;
+ }
+
+ if (tbl->curr_mem[rlt].phys_base) {
+ WARN_ON(tbl->prev_mem[rlt].phys_base);
+ tbl->prev_mem[rlt] = tbl->curr_mem[rlt];
+ }
+ tbl->curr_mem[rlt] = tbl_mem;
+ } else {
+ offset = body_i - base + body_ofst;
+
+ /* update the hdr at the right index */
+ if (ipahal_fltrt_write_addr_to_hdr(offset, hdr,
+ tbl->idx - apps_start_idx, true)) {
+ IPAERR("fail to wrt lcl tbl ofst to hdr\n");
+ goto hdr_update_fail;
+ }
+
+ /* generate the rule-set */
+ list_for_each_entry(entry, &tbl->head_rt_rule_list,
+ link) {
+ if (IPA_RT_GET_RULE_TYPE(entry) != rlt)
+ continue;
+ res = ipa_generate_rt_hw_rule(ip, entry,
+ body_i);
+ if (res) {
+ IPAERR("failed to gen HW RT rule\n");
+ goto err;
+ }
+ body_i += entry->hw_len;
+ }
+
+ /**
+ * advance body_i to next table alignment as local
+ * tables
+ * are order back-to-back
+ */
+ body_i += ipahal_get_lcl_tbl_addr_alignment();
+ body_i = (u8 *)((long)body_i &
+ ~ipahal_get_lcl_tbl_addr_alignment());
+ }
+ }
+
+ return 0;
+
+hdr_update_fail:
+ ipahal_free_dma_mem(&tbl_mem);
+err:
+ return -EPERM;
+}
+
+static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
+{
+ struct ipa3_rt_tbl *tbl;
+ struct ipa3_rt_tbl *next;
+ struct ipa3_rt_tbl_set *set;
+ int i;
+
+ set = &ipa3_ctx->rt_tbl_set[ip];
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ for (i = 0; i < IPA_RULE_TYPE_MAX; i++) {
+ if (tbl->prev_mem[i].phys_base) {
+ IPADBG_LOW(
+ "reaping sys rt tbl name=%s ip=%d rlt=%d\n",
+ tbl->name, ip, i);
+ ipahal_free_dma_mem(&tbl->prev_mem[i]);
+ memset(&tbl->prev_mem[i], 0,
+ sizeof(tbl->prev_mem[i]));
+ }
+ }
+ }
+
+ set = &ipa3_ctx->reap_rt_tbl_set[ip];
+ list_for_each_entry_safe(tbl, next, &set->head_rt_tbl_list, link) {
+ for (i = 0; i < IPA_RULE_TYPE_MAX; i++) {
+ WARN_ON(tbl->prev_mem[i].phys_base != 0);
+ if (tbl->curr_mem[i].phys_base) {
+ IPADBG_LOW(
+ "reaping sys rt tbl name=%s ip=%d rlt=%d\n",
+ tbl->name, ip, i);
+ ipahal_free_dma_mem(&tbl->curr_mem[i]);
+ }
+ }
+ list_del(&tbl->link);
+ kmem_cache_free(ipa3_ctx->rt_tbl_cache, tbl);
+ }
+}
+
+/**
+ * ipa_prep_rt_tbl_for_cmt() - preparing the rt table for commit
+ * assign priorities to the rules, calculate their sizes and calculate
+ * the overall table size
+ * @ip: the ip address family type
+ * @tbl: the rt tbl to be prepared
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_prep_rt_tbl_for_cmt(enum ipa_ip_type ip,
+ struct ipa3_rt_tbl *tbl)
+{
+ struct ipa3_rt_entry *entry;
+ int prio_i;
+ int res;
+ int max_prio;
+ u32 hdr_width;
+
+ tbl->sz[IPA_RULE_HASHABLE] = 0;
+ tbl->sz[IPA_RULE_NON_HASHABLE] = 0;
+
+ max_prio = ipahal_get_rule_max_priority();
+
+ prio_i = max_prio;
+ list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+
+ if (entry->rule.max_prio) {
+ entry->prio = max_prio;
+ } else {
+ if (ipahal_rule_decrease_priority(&prio_i)) {
+ IPAERR("cannot rule decrease priority - %d\n",
+ prio_i);
+ return -EPERM;
+ }
+ entry->prio = prio_i;
+ }
+
+ res = ipa_generate_rt_hw_rule(ip, entry, NULL);
+ if (res) {
+ IPAERR("failed to calculate HW RT rule size\n");
+ return -EPERM;
+ }
+
+ IPADBG("RT rule id (handle) %d hw_len %u priority %u\n",
+ entry->id, entry->hw_len, entry->prio);
+
+ if (entry->rule.hashable)
+ tbl->sz[IPA_RULE_HASHABLE] += entry->hw_len;
+ else
+ tbl->sz[IPA_RULE_NON_HASHABLE] += entry->hw_len;
+ }
+
+ if ((tbl->sz[IPA_RULE_HASHABLE] +
+ tbl->sz[IPA_RULE_NON_HASHABLE]) == 0) {
+ WARN_ON(1);
+ IPAERR("rt tbl %s is with zero total size\n", tbl->name);
+ }
+
+ hdr_width = ipahal_get_hw_tbl_hdr_width();
+
+ if (tbl->sz[IPA_RULE_HASHABLE])
+ tbl->sz[IPA_RULE_HASHABLE] += hdr_width;
+ if (tbl->sz[IPA_RULE_NON_HASHABLE])
+ tbl->sz[IPA_RULE_NON_HASHABLE] += hdr_width;
+
+ IPADBG("RT tbl index %u hash_sz %u non-hash sz %u\n", tbl->idx,
+ tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]);
+
+ return 0;
+}
+
+/**
+ * ipa_generate_rt_hw_tbl_img() - generates the rt hw tbls.
+ * headers and bodies (sys bodies) are being created into buffers that will
+ * be filled into the local memory (sram)
+ * @ip: the ip address family type
+ * @alloc_params: IN/OUT parameters to hold info regard the tables headers
+ * and bodies on DDR (DMA buffers), and needed info for the allocation
+ * that the HAL needs
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_generate_rt_hw_tbl_img(enum ipa_ip_type ip,
+ struct ipahal_fltrt_alloc_imgs_params *alloc_params)
+{
+ u32 hash_bdy_start_ofst, nhash_bdy_start_ofst;
+ u32 apps_start_idx;
+ int rc = 0;
+
+ if (ip == IPA_IP_v4) {
+ nhash_bdy_start_ofst = IPA_MEM_PART(apps_v4_rt_nhash_ofst) -
+ IPA_MEM_PART(v4_rt_nhash_ofst);
+ hash_bdy_start_ofst = IPA_MEM_PART(apps_v4_rt_hash_ofst) -
+ IPA_MEM_PART(v4_rt_hash_ofst);
+ apps_start_idx = IPA_MEM_PART(v4_apps_rt_index_lo);
+ } else {
+ nhash_bdy_start_ofst = IPA_MEM_PART(apps_v6_rt_nhash_ofst) -
+ IPA_MEM_PART(v6_rt_nhash_ofst);
+ hash_bdy_start_ofst = IPA_MEM_PART(apps_v6_rt_hash_ofst) -
+ IPA_MEM_PART(v6_rt_hash_ofst);
+ apps_start_idx = IPA_MEM_PART(v6_apps_rt_index_lo);
+ }
+
+ if (ipahal_fltrt_allocate_hw_tbl_imgs(alloc_params)) {
+ IPAERR("fail to allocate RT HW TBL images. IP %d\n", ip);
+ rc = -ENOMEM;
+ goto allocate_fail;
+ }
+
+ if (ipa_translate_rt_tbl_to_hw_fmt(ip, IPA_RULE_HASHABLE,
+ alloc_params->hash_bdy.base, alloc_params->hash_hdr.base,
+ hash_bdy_start_ofst, apps_start_idx)) {
+ IPAERR("fail to translate hashable rt tbls to hw format\n");
+ rc = -EPERM;
+ goto translate_fail;
+ }
+ if (ipa_translate_rt_tbl_to_hw_fmt(ip, IPA_RULE_NON_HASHABLE,
+ alloc_params->nhash_bdy.base, alloc_params->nhash_hdr.base,
+ nhash_bdy_start_ofst, apps_start_idx)) {
+ IPAERR("fail to translate non-hashable rt tbls to hw format\n");
+ rc = -EPERM;
+ goto translate_fail;
+ }
+
+ return rc;
+
+translate_fail:
+ if (alloc_params->hash_hdr.size)
+ ipahal_free_dma_mem(&alloc_params->hash_hdr);
+ ipahal_free_dma_mem(&alloc_params->nhash_hdr);
+ if (alloc_params->hash_bdy.size)
+ ipahal_free_dma_mem(&alloc_params->hash_bdy);
+ if (alloc_params->nhash_bdy.size)
+ ipahal_free_dma_mem(&alloc_params->nhash_bdy);
+allocate_fail:
+ return rc;
+}
+
+/**
+ * ipa_rt_valid_lcl_tbl_size() - validate if the space allocated for rt tbl
+ * bodies at the sram is enough for the commit
+ * @ipt: the ip address family type
+ * @rlt: the rule type (hashable or non-hashable)
+ *
+ * Return: true if enough space available or false in other cases
+ */
+static bool ipa_rt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
+ enum ipa_rule_type rlt, struct ipa_mem_buffer *bdy)
+{
+ u16 avail;
+
+ if (ipt == IPA_IP_v4)
+ avail = (rlt == IPA_RULE_HASHABLE) ?
+ IPA_MEM_PART(apps_v4_rt_hash_size) :
+ IPA_MEM_PART(apps_v4_rt_nhash_size);
+ else
+ avail = (rlt == IPA_RULE_HASHABLE) ?
+ IPA_MEM_PART(apps_v6_rt_hash_size) :
+ IPA_MEM_PART(apps_v6_rt_nhash_size);
+
+ if (bdy->size <= avail)
+ return true;
+
+ IPAERR("tbl too big, needed %d avail %d ipt %d rlt %d\n",
+ bdy->size, avail, ipt, rlt);
+ return false;
+}
+
+/**
+ * __ipa_commit_rt_v3() - commit rt tables to the hw
+ * commit the headers and the bodies if are local with internal cache flushing
+ * @ipt: the ip address family type
+ *
+ * Return: 0 on success, negative on failure
+ */
+int __ipa_commit_rt_v3(enum ipa_ip_type ip)
+{
+ struct ipa3_desc desc[5];
+ struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
+ struct ipahal_imm_cmd_dma_shared_mem mem_cmd = {0};
+ struct ipahal_imm_cmd_pyld *cmd_pyld[5];
+ int num_cmd = 0;
+ struct ipahal_fltrt_alloc_imgs_params alloc_params;
+ u32 num_modem_rt_index;
+ int rc = 0;
+ u32 lcl_hash_hdr, lcl_nhash_hdr;
+ u32 lcl_hash_bdy, lcl_nhash_bdy;
+ bool lcl_hash, lcl_nhash;
+ struct ipahal_reg_fltrt_hash_flush flush;
+ struct ipahal_reg_valmask valmask;
+ int i;
+ struct ipa3_rt_tbl_set *set;
+ struct ipa3_rt_tbl *tbl;
+ u32 tbl_hdr_width;
+
+ tbl_hdr_width = ipahal_get_hw_tbl_hdr_width();
+ memset(desc, 0, sizeof(desc));
+ memset(cmd_pyld, 0, sizeof(cmd_pyld));
+ memset(&alloc_params, 0, sizeof(alloc_params));
+ alloc_params.ipt = ip;
+
+ if (ip == IPA_IP_v4) {
+ num_modem_rt_index =
+ IPA_MEM_PART(v4_modem_rt_index_hi) -
+ IPA_MEM_PART(v4_modem_rt_index_lo) + 1;
+ lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v4_rt_hash_ofst) +
+ num_modem_rt_index * tbl_hdr_width;
+ lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v4_rt_nhash_ofst) +
+ num_modem_rt_index * tbl_hdr_width;
+ lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(apps_v4_rt_hash_ofst);
+ lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(apps_v4_rt_nhash_ofst);
+ lcl_hash = ipa3_ctx->ip4_rt_tbl_hash_lcl;
+ lcl_nhash = ipa3_ctx->ip4_rt_tbl_nhash_lcl;
+ alloc_params.tbls_num = IPA_MEM_PART(v4_apps_rt_index_hi) -
+ IPA_MEM_PART(v4_apps_rt_index_lo) + 1;
+ } else {
+ num_modem_rt_index =
+ IPA_MEM_PART(v6_modem_rt_index_hi) -
+ IPA_MEM_PART(v6_modem_rt_index_lo) + 1;
+ lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v6_rt_hash_ofst) +
+ num_modem_rt_index * tbl_hdr_width;
+ lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(v6_rt_nhash_ofst) +
+ num_modem_rt_index * tbl_hdr_width;
+ lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(apps_v6_rt_hash_ofst);
+ lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(apps_v6_rt_nhash_ofst);
+ lcl_hash = ipa3_ctx->ip6_rt_tbl_hash_lcl;
+ lcl_nhash = ipa3_ctx->ip6_rt_tbl_nhash_lcl;
+ alloc_params.tbls_num = IPA_MEM_PART(v6_apps_rt_index_hi) -
+ IPA_MEM_PART(v6_apps_rt_index_lo) + 1;
+ }
+
+ if (!ipa3_ctx->rt_idx_bitmap[ip]) {
+ IPAERR("no rt tbls present\n");
+ rc = -EPERM;
+ goto no_rt_tbls;
+ }
+
+ set = &ipa3_ctx->rt_tbl_set[ip];
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ if (ipa_prep_rt_tbl_for_cmt(ip, tbl)) {
+ rc = -EPERM;
+ goto no_rt_tbls;
+ }
+ if (!tbl->in_sys[IPA_RULE_HASHABLE] &&
+ tbl->sz[IPA_RULE_HASHABLE]) {
+ alloc_params.num_lcl_hash_tbls++;
+ alloc_params.total_sz_lcl_hash_tbls +=
+ tbl->sz[IPA_RULE_HASHABLE];
+ alloc_params.total_sz_lcl_hash_tbls -= tbl_hdr_width;
+ }
+ if (!tbl->in_sys[IPA_RULE_NON_HASHABLE] &&
+ tbl->sz[IPA_RULE_NON_HASHABLE]) {
+ alloc_params.num_lcl_nhash_tbls++;
+ alloc_params.total_sz_lcl_nhash_tbls +=
+ tbl->sz[IPA_RULE_NON_HASHABLE];
+ alloc_params.total_sz_lcl_nhash_tbls -= tbl_hdr_width;
+ }
+ }
+
+ if (ipa_generate_rt_hw_tbl_img(ip, &alloc_params)) {
+ IPAERR("fail to generate RT HW TBL images. IP %d\n", ip);
+ rc = -EFAULT;
+ goto no_rt_tbls;
+ }
+
+ if (!ipa_rt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE,
+ &alloc_params.hash_bdy)) {
+ rc = -EFAULT;
+ goto fail_size_valid;
+ }
+ if (!ipa_rt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE,
+ &alloc_params.nhash_bdy)) {
+ rc = -EFAULT;
+ goto fail_size_valid;
+ }
+
+ /* flushing ipa internal hashable rt rules cache */
+ memset(&flush, 0, sizeof(flush));
+ if (ip == IPA_IP_v4)
+ flush.v4_rt = true;
+ else
+ flush.v6_rt = true;
+ ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
+ reg_write_cmd.skip_pipeline_clear = false;
+ reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
+ reg_write_cmd.value = valmask.val;
+ reg_write_cmd.value_mask = valmask.mask;
+ cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_REGISTER_WRITE, ®_write_cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR("fail construct register_write imm cmd. IP %d\n", ip);
+ goto fail_size_valid;
+ }
+ desc[num_cmd].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+ desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+ desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+ desc[num_cmd].type = IPA_IMM_CMD_DESC;
+ num_cmd++;
+
+ mem_cmd.is_read = false;
+ mem_cmd.skip_pipeline_clear = false;
+ mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ mem_cmd.size = alloc_params.nhash_hdr.size;
+ mem_cmd.system_addr = alloc_params.nhash_hdr.phys_base;
+ mem_cmd.local_addr = lcl_nhash_hdr;
+ cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR("fail construct dma_shared_mem imm cmd. IP %d\n", ip);
+ goto fail_imm_cmd_construct;
+ }
+ desc[num_cmd].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+ desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+ desc[num_cmd].type = IPA_IMM_CMD_DESC;
+ num_cmd++;
+
+ mem_cmd.is_read = false;
+ mem_cmd.skip_pipeline_clear = false;
+ mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ mem_cmd.size = alloc_params.hash_hdr.size;
+ mem_cmd.system_addr = alloc_params.hash_hdr.phys_base;
+ mem_cmd.local_addr = lcl_hash_hdr;
+ cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR("fail construct dma_shared_mem imm cmd. IP %d\n", ip);
+ goto fail_imm_cmd_construct;
+ }
+ desc[num_cmd].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+ desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+ desc[num_cmd].type = IPA_IMM_CMD_DESC;
+ num_cmd++;
+
+ if (lcl_nhash) {
+ mem_cmd.is_read = false;
+ mem_cmd.skip_pipeline_clear = false;
+ mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ mem_cmd.size = alloc_params.nhash_bdy.size;
+ mem_cmd.system_addr = alloc_params.nhash_bdy.phys_base;
+ mem_cmd.local_addr = lcl_nhash_bdy;
+ cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR("fail construct dma_shared_mem cmd. IP %d\n",
+ ip);
+ goto fail_imm_cmd_construct;
+ }
+ desc[num_cmd].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+ desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+ desc[num_cmd].type = IPA_IMM_CMD_DESC;
+ num_cmd++;
+ }
+ if (lcl_hash) {
+ mem_cmd.is_read = false;
+ mem_cmd.skip_pipeline_clear = false;
+ mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ mem_cmd.size = alloc_params.hash_bdy.size;
+ mem_cmd.system_addr = alloc_params.hash_bdy.phys_base;
+ mem_cmd.local_addr = lcl_hash_bdy;
+ cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR("fail construct dma_shared_mem cmd. IP %d\n",
+ ip);
+ goto fail_imm_cmd_construct;
+ }
+ desc[num_cmd].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+ desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+ desc[num_cmd].type = IPA_IMM_CMD_DESC;
+ num_cmd++;
+ }
+
+ if (ipa3_send_cmd(num_cmd, desc)) {
+ IPAERR("fail to send immediate command\n");
+ rc = -EFAULT;
+ goto fail_imm_cmd_construct;
+ }
+
+ IPADBG("Hashable HEAD\n");
+ IPA_DUMP_BUFF(alloc_params.hash_hdr.base,
+ alloc_params.hash_hdr.phys_base, alloc_params.hash_hdr.size);
+
+ IPADBG("Non-Hashable HEAD\n");
+ IPA_DUMP_BUFF(alloc_params.nhash_hdr.base,
+ alloc_params.nhash_hdr.phys_base, alloc_params.nhash_hdr.size);
+
+ if (alloc_params.hash_bdy.size) {
+ IPADBG("Hashable BODY\n");
+ IPA_DUMP_BUFF(alloc_params.hash_bdy.base,
+ alloc_params.hash_bdy.phys_base,
+ alloc_params.hash_bdy.size);
+ }
+
+ if (alloc_params.nhash_bdy.size) {
+ IPADBG("Non-Hashable BODY\n");
+ IPA_DUMP_BUFF(alloc_params.nhash_bdy.base,
+ alloc_params.nhash_bdy.phys_base,
+ alloc_params.nhash_bdy.size);
+ }
+
+ __ipa_reap_sys_rt_tbls(ip);
+
+fail_imm_cmd_construct:
+ for (i = 0 ; i < num_cmd ; i++)
+ ipahal_destroy_imm_cmd(cmd_pyld[i]);
+fail_size_valid:
+ if (alloc_params.hash_hdr.size)
+ ipahal_free_dma_mem(&alloc_params.hash_hdr);
+ ipahal_free_dma_mem(&alloc_params.nhash_hdr);
+ if (alloc_params.hash_bdy.size)
+ ipahal_free_dma_mem(&alloc_params.hash_bdy);
+ if (alloc_params.nhash_bdy.size)
+ ipahal_free_dma_mem(&alloc_params.nhash_bdy);
+
+no_rt_tbls:
+ return rc;
+}
+
+/**
+ * __ipa3_find_rt_tbl() - find the routing table
+ * which name is given as parameter
+ * @ip: [in] the ip address family type of the wanted routing table
+ * @name: [in] the name of the wanted routing table
+ *
+ * Returns: the routing table which name is given as parameter, or NULL if it
+ * doesn't exist
+ */
+struct ipa3_rt_tbl *__ipa3_find_rt_tbl(enum ipa_ip_type ip, const char *name)
+{
+ struct ipa3_rt_tbl *entry;
+ struct ipa3_rt_tbl_set *set;
+
+ if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
+ IPAERR("Name too long: %s\n", name);
+ return NULL;
+ }
+
+ set = &ipa3_ctx->rt_tbl_set[ip];
+ list_for_each_entry(entry, &set->head_rt_tbl_list, link) {
+ if (!strcmp(name, entry->name))
+ return entry;
+ }
+
+ return NULL;
+}
+
+/**
+ * ipa3_query_rt_index() - find the routing table index
+ * which name and ip type are given as parameters
+ * @in: [out] the index of the wanted routing table
+ *
+ * Returns: the routing table which name is given as parameter, or NULL if it
+ * doesn't exist
+ */
+int ipa3_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in)
+{
+ struct ipa3_rt_tbl *entry;
+
+ if (in->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ /* check if this table exists */
+ entry = __ipa3_find_rt_tbl(in->ip, in->name);
+ if (!entry)
+ return -EFAULT;
+
+ in->idx = entry->idx;
+ return 0;
+}
+
+static struct ipa3_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
+ const char *name)
+{
+ struct ipa3_rt_tbl *entry;
+ struct ipa3_rt_tbl_set *set;
+ int i;
+ int id;
+ int max_tbl_indx;
+
+ if (name == NULL) {
+ IPAERR("no tbl name\n");
+ goto error;
+ }
+
+ if (ip == IPA_IP_v4) {
+ max_tbl_indx =
+ max(IPA_MEM_PART(v4_modem_rt_index_hi),
+ IPA_MEM_PART(v4_apps_rt_index_hi));
+ } else if (ip == IPA_IP_v6) {
+ max_tbl_indx =
+ max(IPA_MEM_PART(v6_modem_rt_index_hi),
+ IPA_MEM_PART(v6_apps_rt_index_hi));
+ } else {
+ IPAERR("bad ip family type\n");
+ goto error;
+ }
+
+ set = &ipa3_ctx->rt_tbl_set[ip];
+ /* check if this table exists */
+ entry = __ipa3_find_rt_tbl(ip, name);
+ if (!entry) {
+ entry = kmem_cache_zalloc(ipa3_ctx->rt_tbl_cache, GFP_KERNEL);
+ if (!entry) {
+ IPAERR("failed to alloc RT tbl object\n");
+ goto error;
+ }
+ /* find a routing tbl index */
+ for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
+ if (!test_bit(i, &ipa3_ctx->rt_idx_bitmap[ip])) {
+ entry->idx = i;
+ set_bit(i, &ipa3_ctx->rt_idx_bitmap[ip]);
+ break;
+ }
+ }
+ if (i == IPA_RT_INDEX_BITMAP_SIZE) {
+ IPAERR("not free RT tbl indices left\n");
+ goto fail_rt_idx_alloc;
+ }
+ if (i > max_tbl_indx) {
+ IPAERR("rt tbl index is above max\n");
+ goto fail_rt_idx_alloc;
+ }
+
+ INIT_LIST_HEAD(&entry->head_rt_rule_list);
+ INIT_LIST_HEAD(&entry->link);
+ strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
+ entry->set = set;
+ entry->cookie = IPA_COOKIE;
+ entry->in_sys[IPA_RULE_HASHABLE] = (ip == IPA_IP_v4) ?
+ !ipa3_ctx->ip4_rt_tbl_hash_lcl :
+ !ipa3_ctx->ip6_rt_tbl_hash_lcl;
+ entry->in_sys[IPA_RULE_NON_HASHABLE] = (ip == IPA_IP_v4) ?
+ !ipa3_ctx->ip4_rt_tbl_nhash_lcl :
+ !ipa3_ctx->ip6_rt_tbl_nhash_lcl;
+ set->tbl_cnt++;
+ idr_init(&entry->rule_ids);
+ list_add(&entry->link, &set->head_rt_tbl_list);
+
+ IPADBG("add rt tbl idx=%d tbl_cnt=%d ip=%d\n", entry->idx,
+ set->tbl_cnt, ip);
+
+ id = ipa3_id_alloc(entry);
+ if (id < 0) {
+ IPAERR("failed to add to tree\n");
+ WARN_ON(1);
+ }
+ entry->id = id;
+ }
+
+ return entry;
+
+fail_rt_idx_alloc:
+ entry->cookie = 0;
+ kmem_cache_free(ipa3_ctx->rt_tbl_cache, entry);
+error:
+ return NULL;
+}
+
+static int __ipa_del_rt_tbl(struct ipa3_rt_tbl *entry)
+{
+ enum ipa_ip_type ip = IPA_IP_MAX;
+ u32 id;
+ struct ipa3_rt_tbl_set *rset;
+
+ if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
+ IPAERR("bad parms\n");
+ return -EINVAL;
+ }
+ id = entry->id;
+ if (ipa3_id_find(id) == NULL) {
+ IPAERR("lookup failed\n");
+ return -EPERM;
+ }
+
+ if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v4])
+ ip = IPA_IP_v4;
+ else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6])
+ ip = IPA_IP_v6;
+ else
+ WARN_ON(1);
+
+ rset = &ipa3_ctx->reap_rt_tbl_set[ip];
+
+ idr_destroy(&entry->rule_ids);
+ if (entry->in_sys[IPA_RULE_HASHABLE] ||
+ entry->in_sys[IPA_RULE_NON_HASHABLE]) {
+ list_move(&entry->link, &rset->head_rt_tbl_list);
+ clear_bit(entry->idx, &ipa3_ctx->rt_idx_bitmap[ip]);
+ entry->set->tbl_cnt--;
+ IPADBG("del sys rt tbl_idx=%d tbl_cnt=%d ip=%d\n",
+ entry->idx, entry->set->tbl_cnt, ip);
+ } else {
+ list_del(&entry->link);
+ clear_bit(entry->idx, &ipa3_ctx->rt_idx_bitmap[ip]);
+ entry->set->tbl_cnt--;
+ IPADBG("del rt tbl_idx=%d tbl_cnt=%d ip=%d\n",
+ entry->idx, entry->set->tbl_cnt, ip);
+ kmem_cache_free(ipa3_ctx->rt_tbl_cache, entry);
+ }
+
+ /* remove the handle from the database */
+ ipa3_id_remove(id);
+ return 0;
+}
+
+static int __ipa_rt_validate_hndls(const struct ipa_rt_rule *rule,
+ struct ipa3_hdr_entry **hdr,
+ struct ipa3_hdr_proc_ctx_entry **proc_ctx)
+{
+ if (rule->hdr_hdl && rule->hdr_proc_ctx_hdl) {
+ IPAERR("rule contains both hdr_hdl and hdr_proc_ctx_hdl\n");
+ return -EPERM;
+ }
+
+ if (rule->hdr_hdl) {
+ *hdr = ipa3_id_find(rule->hdr_hdl);
+ if ((*hdr == NULL) || ((*hdr)->cookie != IPA_COOKIE)) {
+ IPAERR("rt rule does not point to valid hdr\n");
+ return -EPERM;
+ }
+ } else if (rule->hdr_proc_ctx_hdl) {
+ *proc_ctx = ipa3_id_find(rule->hdr_proc_ctx_hdl);
+ if ((*proc_ctx == NULL) ||
+ ((*proc_ctx)->cookie != IPA_COOKIE)) {
+
+ IPAERR("rt rule does not point to valid proc ctx\n");
+ return -EPERM;
+ }
+ }
+
+ return 0;
+}
+
+static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry,
+ const struct ipa_rt_rule *rule,
+ struct ipa3_rt_tbl *tbl, struct ipa3_hdr_entry *hdr,
+ struct ipa3_hdr_proc_ctx_entry *proc_ctx)
+{
+ int id;
+
+ *entry = kmem_cache_zalloc(ipa3_ctx->rt_rule_cache, GFP_KERNEL);
+ if (!*entry) {
+ IPAERR("failed to alloc RT rule object\n");
+ goto error;
+ }
+ INIT_LIST_HEAD(&(*entry)->link);
+ (*(entry))->cookie = IPA_COOKIE;
+ (*(entry))->rule = *rule;
+ (*(entry))->tbl = tbl;
+ (*(entry))->hdr = hdr;
+ (*(entry))->proc_ctx = proc_ctx;
+ id = ipa3_alloc_rule_id(&tbl->rule_ids);
+ if (id < 0) {
+ IPAERR("failed to allocate rule id\n");
+ WARN_ON(1);
+ goto alloc_rule_id_fail;
+ }
+ (*(entry))->rule_id = id;
+
+ return 0;
+
+alloc_rule_id_fail:
+ kmem_cache_free(ipa3_ctx->rt_rule_cache, *entry);
+error:
+ return -EPERM;
+}
+
+static int __ipa_finish_rt_rule_add(struct ipa3_rt_entry *entry, u32 *rule_hdl,
+ struct ipa3_rt_tbl *tbl)
+{
+ int id;
+
+ tbl->rule_cnt++;
+ if (entry->hdr)
+ entry->hdr->ref_cnt++;
+ else if (entry->proc_ctx)
+ entry->proc_ctx->ref_cnt++;
+ id = ipa3_id_alloc(entry);
+ if (id < 0) {
+ IPAERR("failed to add to tree\n");
+ WARN_ON(1);
+ goto ipa_insert_failed;
+ }
+ IPADBG("add rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n",
+ tbl->idx, tbl->rule_cnt, entry->rule_id);
+ *rule_hdl = id;
+ entry->id = id;
+
+ return 0;
+
+ipa_insert_failed:
+ if (entry->hdr)
+ entry->hdr->ref_cnt--;
+ else if (entry->proc_ctx)
+ entry->proc_ctx->ref_cnt--;
+ idr_remove(&tbl->rule_ids, entry->rule_id);
+ list_del(&entry->link);
+ kmem_cache_free(ipa3_ctx->rt_rule_cache, entry);
+ return -EPERM;
+}
+
+static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
+ const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl)
+{
+ struct ipa3_rt_tbl *tbl;
+ struct ipa3_rt_entry *entry;
+ struct ipa3_hdr_entry *hdr = NULL;
+ struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL;
+
+ if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx))
+ goto error;
+
+
+ tbl = __ipa_add_rt_tbl(ip, name);
+ if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
+ IPAERR("failed adding rt tbl name = %s\n",
+ name ? name : "");
+ goto error;
+ }
+ /*
+ * do not allow any rules to be added at end of the "default" routing
+ * tables
+ */
+ if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
+ (tbl->rule_cnt > 0) && (at_rear != 0)) {
+ IPAERR("cannot add rule at end of tbl rule_cnt=%d at_rear=%d\n",
+ tbl->rule_cnt, at_rear);
+ goto error;
+ }
+
+ if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx))
+ goto error;
+
+ if (at_rear)
+ list_add_tail(&entry->link, &tbl->head_rt_rule_list);
+ else
+ list_add(&entry->link, &tbl->head_rt_rule_list);
+
+ if (__ipa_finish_rt_rule_add(entry, rule_hdl, tbl))
+ goto error;
+
+ return 0;
+
+error:
+ return -EPERM;
+}
+
+static int __ipa_add_rt_rule_after(struct ipa3_rt_tbl *tbl,
+ const struct ipa_rt_rule *rule, u32 *rule_hdl,
+ struct ipa3_rt_entry **add_after_entry)
+{
+ struct ipa3_rt_entry *entry;
+ struct ipa3_hdr_entry *hdr = NULL;
+ struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL;
+
+ if (!*add_after_entry)
+ goto error;
+
+ if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx))
+ goto error;
+
+ if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx))
+ goto error;
+
+ list_add(&entry->link, &((*add_after_entry)->link));
+
+ if (__ipa_finish_rt_rule_add(entry, rule_hdl, tbl))
+ goto error;
+
+ /*
+ * prepare for next insertion
+ */
+ *add_after_entry = entry;
+
+ return 0;
+
+error:
+ *add_after_entry = NULL;
+ return -EPERM;
+}
+
+/**
+ * ipa3_add_rt_rule() - Add the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @rules: [inout] set of routing rules to add
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
+{
+ int i;
+ int ret;
+
+ if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+ for (i = 0; i < rules->num_rules; i++) {
+ if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
+ &rules->rules[i].rule,
+ rules->rules[i].at_rear,
+ &rules->rules[i].rt_rule_hdl)) {
+ IPAERR("failed to add rt rule %d\n", i);
+ rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+ } else {
+ rules->rules[i].status = 0;
+ }
+ }
+
+ if (rules->commit)
+ if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+ return ret;
+}
+
+/**
+ * ipa3_add_rt_rule_after() - Add the given routing rules after the
+ * specified rule to SW and optionally commit to IPA HW
+ * @rules: [inout] set of routing rules to add + handle where to add
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules)
+{
+ int i;
+ int ret = 0;
+ struct ipa3_rt_tbl *tbl = NULL;
+ struct ipa3_rt_entry *entry = NULL;
+
+ if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+
+ tbl = __ipa3_find_rt_tbl(rules->ip, rules->rt_tbl_name);
+ if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
+ IPAERR("failed finding rt tbl name = %s\n",
+ rules->rt_tbl_name ? rules->rt_tbl_name : "");
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if (tbl->rule_cnt <= 0) {
+ IPAERR("tbl->rule_cnt <= 0");
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ entry = ipa3_id_find(rules->add_after_hdl);
+ if (!entry) {
+ IPAERR("failed finding rule %d in rt tbls\n",
+ rules->add_after_hdl);
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if (entry->tbl != tbl) {
+ IPAERR("given rt rule does not match the table\n");
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /*
+ * do not allow any rules to be added at end of the "default" routing
+ * tables
+ */
+ if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
+ (&entry->link == tbl->head_rt_rule_list.prev)) {
+ IPAERR("cannot add rule at end of tbl rule_cnt=%d\n",
+ tbl->rule_cnt);
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /*
+ * we add all rules one after the other, if one insertion fails, it cuts
+ * the chain (all following will receive fail status) following calls to
+ * __ipa_add_rt_rule_after will fail (entry == NULL)
+ */
+
+ for (i = 0; i < rules->num_rules; i++) {
+ if (__ipa_add_rt_rule_after(tbl,
+ &rules->rules[i].rule,
+ &rules->rules[i].rt_rule_hdl,
+ &entry)) {
+ IPAERR("failed to add rt rule %d\n", i);
+ rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+ } else {
+ rules->rules[i].status = 0;
+ }
+ }
+
+ if (rules->commit)
+ if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
+ IPAERR("failed to commit\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+ goto bail;
+
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+ return ret;
+}
+
+int __ipa3_del_rt_rule(u32 rule_hdl)
+{
+ struct ipa3_rt_entry *entry;
+ int id;
+
+ entry = ipa3_id_find(rule_hdl);
+
+ if (entry == NULL) {
+ IPAERR("lookup failed\n");
+ return -EINVAL;
+ }
+
+ if (entry->cookie != IPA_COOKIE) {
+ IPAERR("bad params\n");
+ return -EINVAL;
+ }
+
+ if (entry->hdr)
+ __ipa3_release_hdr(entry->hdr->id);
+ else if (entry->proc_ctx)
+ __ipa3_release_hdr_proc_ctx(entry->proc_ctx->id);
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ IPADBG("del rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n",
+ entry->tbl->idx, entry->tbl->rule_cnt, entry->rule_id);
+ idr_remove(&entry->tbl->rule_ids, entry->rule_id);
+ if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
+ if (__ipa_del_rt_tbl(entry->tbl))
+ IPAERR("fail to del RT tbl\n");
+ }
+ entry->cookie = 0;
+ id = entry->id;
+ kmem_cache_free(ipa3_ctx->rt_rule_cache, entry);
+
+ /* remove the handle from the database */
+ ipa3_id_remove(id);
+
+ return 0;
+}
+
+/**
+ * ipa3_del_rt_rule() - Remove the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @hdls: [inout] set of routing rules to delete
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
+{
+ int i;
+ int ret;
+
+ if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+ for (i = 0; i < hdls->num_hdls; i++) {
+ if (__ipa3_del_rt_rule(hdls->hdl[i].hdl)) {
+ IPAERR("failed to del rt rule %i\n", i);
+ hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
+ } else {
+ hdls->hdl[i].status = 0;
+ }
+ }
+
+ if (hdls->commit)
+ if (ipa3_ctx->ctrl->ipa3_commit_rt(hdls->ip)) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+ return ret;
+}
+
+/**
+ * ipa_commit_rt_rule() - Commit the current SW routing table of specified type
+ * to IPA HW
+ * @ip: The family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_commit_rt(enum ipa_ip_type ip)
+{
+ int ret;
+
+ if (ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ /*
+ * issue a commit on the filtering module of same IP type since
+ * filtering rules point to routing tables
+ */
+ if (ipa3_commit_flt(ip))
+ return -EPERM;
+
+ mutex_lock(&ipa3_ctx->lock);
+ if (ipa3_ctx->ctrl->ipa3_commit_rt(ip)) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+ return ret;
+}
+
+/**
+ * ipa3_reset_rt() - reset the current SW routing table of specified type
+ * (does not commit to HW)
+ * @ip: The family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_reset_rt(enum ipa_ip_type ip)
+{
+ struct ipa3_rt_tbl *tbl;
+ struct ipa3_rt_tbl *tbl_next;
+ struct ipa3_rt_tbl_set *set;
+ struct ipa3_rt_entry *rule;
+ struct ipa3_rt_entry *rule_next;
+ struct ipa3_rt_tbl_set *rset;
+ u32 apps_start_idx;
+ int id;
+
+ if (ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ if (ip == IPA_IP_v4)
+ apps_start_idx =
+ IPA_MEM_PART(v4_apps_rt_index_lo);
+ else
+ apps_start_idx =
+ IPA_MEM_PART(v6_apps_rt_index_lo);
+
+ /*
+ * issue a reset on the filtering module of same IP type since
+ * filtering rules point to routing tables
+ */
+ if (ipa3_reset_flt(ip))
+ IPAERR("fail to reset flt ip=%d\n", ip);
+
+ set = &ipa3_ctx->rt_tbl_set[ip];
+ rset = &ipa3_ctx->reap_rt_tbl_set[ip];
+ mutex_lock(&ipa3_ctx->lock);
+ IPADBG("reset rt ip=%d\n", ip);
+ list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) {
+ list_for_each_entry_safe(rule, rule_next,
+ &tbl->head_rt_rule_list, link) {
+ if (ipa3_id_find(rule->id) == NULL) {
+ WARN_ON(1);
+ mutex_unlock(&ipa3_ctx->lock);
+ return -EFAULT;
+ }
+
+ /*
+ * for the "default" routing tbl, remove all but the
+ * last rule
+ */
+ if (tbl->idx == apps_start_idx && tbl->rule_cnt == 1)
+ continue;
+
+ list_del(&rule->link);
+ tbl->rule_cnt--;
+ if (rule->hdr)
+ __ipa3_release_hdr(rule->hdr->id);
+ else if (rule->proc_ctx)
+ __ipa3_release_hdr_proc_ctx(rule->proc_ctx->id);
+ rule->cookie = 0;
+ idr_remove(&tbl->rule_ids, rule->rule_id);
+ id = rule->id;
+ kmem_cache_free(ipa3_ctx->rt_rule_cache, rule);
+
+ /* remove the handle from the database */
+ ipa3_id_remove(id);
+ }
+
+ if (ipa3_id_find(tbl->id) == NULL) {
+ WARN_ON(1);
+ mutex_unlock(&ipa3_ctx->lock);
+ return -EFAULT;
+ }
+ id = tbl->id;
+
+ /* do not remove the "default" routing tbl which has index 0 */
+ if (tbl->idx != apps_start_idx) {
+ idr_destroy(&tbl->rule_ids);
+ if (tbl->in_sys[IPA_RULE_HASHABLE] ||
+ tbl->in_sys[IPA_RULE_NON_HASHABLE]) {
+ list_move(&tbl->link, &rset->head_rt_tbl_list);
+ clear_bit(tbl->idx,
+ &ipa3_ctx->rt_idx_bitmap[ip]);
+ set->tbl_cnt--;
+ IPADBG("rst sys rt tbl_idx=%d tbl_cnt=%d\n",
+ tbl->idx, set->tbl_cnt);
+ } else {
+ list_del(&tbl->link);
+ set->tbl_cnt--;
+ clear_bit(tbl->idx,
+ &ipa3_ctx->rt_idx_bitmap[ip]);
+ IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n",
+ tbl->idx, set->tbl_cnt);
+ kmem_cache_free(ipa3_ctx->rt_tbl_cache, tbl);
+ }
+ /* remove the handle from the database */
+ ipa3_id_remove(id);
+ }
+ }
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return 0;
+}
+
+/**
+ * ipa3_get_rt_tbl() - lookup the specified routing table and return handle if
+ * it exists, if lookup succeeds the routing table ref cnt is increased
+ * @lookup: [inout] routing table to lookup and its handle
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ * Caller should call ipa3_put_rt_tbl later if this function succeeds
+ */
+int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
+{
+ struct ipa3_rt_tbl *entry;
+ int result = -EFAULT;
+
+ if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+ mutex_lock(&ipa3_ctx->lock);
+ entry = __ipa3_find_rt_tbl(lookup->ip, lookup->name);
+ if (entry && entry->cookie == IPA_COOKIE) {
+ entry->ref_cnt++;
+ lookup->hdl = entry->id;
+
+ /* commit for get */
+ if (ipa3_ctx->ctrl->ipa3_commit_rt(lookup->ip))
+ IPAERR("fail to commit RT tbl\n");
+
+ result = 0;
+ }
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return result;
+}
+
+/**
+ * ipa3_put_rt_tbl() - Release the specified routing table handle
+ * @rt_tbl_hdl: [in] the routing table handle to release
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_put_rt_tbl(u32 rt_tbl_hdl)
+{
+ struct ipa3_rt_tbl *entry;
+ enum ipa_ip_type ip = IPA_IP_MAX;
+ int result;
+
+ mutex_lock(&ipa3_ctx->lock);
+ entry = ipa3_id_find(rt_tbl_hdl);
+ if (entry == NULL) {
+ IPAERR("lookup failed\n");
+ result = -EINVAL;
+ goto ret;
+ }
+
+ if ((entry->cookie != IPA_COOKIE) || entry->ref_cnt == 0) {
+ IPAERR("bad parms\n");
+ result = -EINVAL;
+ goto ret;
+ }
+
+ if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v4])
+ ip = IPA_IP_v4;
+ else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6])
+ ip = IPA_IP_v6;
+ else
+ WARN_ON(1);
+
+ entry->ref_cnt--;
+ if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
+ if (__ipa_del_rt_tbl(entry))
+ IPAERR("fail to del RT tbl\n");
+ /* commit for put */
+ if (ipa3_ctx->ctrl->ipa3_commit_rt(ip))
+ IPAERR("fail to commit RT tbl\n");
+ }
+
+ result = 0;
+
+ret:
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return result;
+}
+
+
+static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule)
+{
+ struct ipa3_rt_entry *entry;
+ struct ipa3_hdr_entry *hdr = NULL;
+ struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL;
+
+ if (rtrule->rule.hdr_hdl) {
+ hdr = ipa3_id_find(rtrule->rule.hdr_hdl);
+ if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) {
+ IPAERR("rt rule does not point to valid hdr\n");
+ goto error;
+ }
+ } else if (rtrule->rule.hdr_proc_ctx_hdl) {
+ proc_ctx = ipa3_id_find(rtrule->rule.hdr_proc_ctx_hdl);
+ if ((proc_ctx == NULL) || (proc_ctx->cookie != IPA_COOKIE)) {
+ IPAERR("rt rule does not point to valid proc ctx\n");
+ goto error;
+ }
+ }
+
+ entry = ipa3_id_find(rtrule->rt_rule_hdl);
+ if (entry == NULL) {
+ IPAERR("lookup failed\n");
+ goto error;
+ }
+
+ if (entry->cookie != IPA_COOKIE) {
+ IPAERR("bad params\n");
+ goto error;
+ }
+
+ if (entry->hdr)
+ entry->hdr->ref_cnt--;
+ if (entry->proc_ctx)
+ entry->proc_ctx->ref_cnt--;
+
+ entry->rule = rtrule->rule;
+ entry->hdr = hdr;
+ entry->proc_ctx = proc_ctx;
+
+ if (entry->hdr)
+ entry->hdr->ref_cnt++;
+ if (entry->proc_ctx)
+ entry->proc_ctx->ref_cnt++;
+
+ entry->hw_len = 0;
+ entry->prio = 0;
+
+ return 0;
+
+error:
+ return -EPERM;
+}
+
+/**
+ * ipa3_mdfy_rt_rule() - Modify the specified routing rules in SW and optionally
+ * commit to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls)
+{
+ int i;
+ int result;
+
+ if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+ for (i = 0; i < hdls->num_rules; i++) {
+ if (__ipa_mdfy_rt_rule(&hdls->rules[i])) {
+ IPAERR("failed to mdfy rt rule %i\n", i);
+ hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
+ } else {
+ hdls->rules[i].status = 0;
+ }
+ }
+
+ if (hdls->commit)
+ if (ipa3_ctx->ctrl->ipa3_commit_rt(hdls->ip)) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return result;
+}
+
+/**
+ * ipa3_set_rt_tuple_mask() - Sets the rt tuple masking for the given tbl
+ * table index must be for AP EP (not modem)
+ * updates the the routing masking values without changing the flt ones.
+ *
+ * @tbl_idx: routing table index to configure the tuple masking
+ * @tuple: the tuple members masking
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple)
+{
+ struct ipahal_reg_fltrt_hash_tuple fltrt_tuple;
+
+ if (!tuple) {
+ IPAERR("bad tuple\n");
+ return -EINVAL;
+ }
+
+ if (tbl_idx >=
+ max(IPA_MEM_PART(v6_rt_num_index),
+ IPA_MEM_PART(v4_rt_num_index)) ||
+ tbl_idx < 0) {
+ IPAERR("bad table index\n");
+ return -EINVAL;
+ }
+
+ if (tbl_idx >= IPA_MEM_PART(v4_modem_rt_index_lo) &&
+ tbl_idx <= IPA_MEM_PART(v4_modem_rt_index_hi)) {
+ IPAERR("cannot configure modem v4 rt tuple by AP\n");
+ return -EINVAL;
+ }
+
+ if (tbl_idx >= IPA_MEM_PART(v6_modem_rt_index_lo) &&
+ tbl_idx <= IPA_MEM_PART(v6_modem_rt_index_hi)) {
+ IPAERR("cannot configure modem v6 rt tuple by AP\n");
+ return -EINVAL;
+ }
+
+ ipahal_read_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+ tbl_idx, &fltrt_tuple);
+ fltrt_tuple.rt = *tuple;
+ ipahal_write_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+ tbl_idx, &fltrt_tuple);
+
+ return 0;
+}
+
+/**
+ * ipa3_rt_read_tbl_from_hw() -Read routing table from IPA HW
+ * @tbl_idx: routing table index
+ * @ip_type: IPv4 or IPv6 table
+ * @hashable: hashable or non-hashable table
+ * @entry: array to fill the table entries
+ * @num_entry: number of entries in entry array. set by the caller to indicate
+ * entry array size. Then set by this function as an output parameter to
+ * indicate the number of entries in the array
+ *
+ * This function reads the routing table from IPA SRAM and prepares an array
+ * of entries. This function is mainly used for debugging purposes.
+ *
+ * If empty table or Modem Apps table, zero entries will be returned.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_rt_read_tbl_from_hw(u32 tbl_idx, enum ipa_ip_type ip_type,
+ bool hashable, struct ipahal_rt_rule_entry entry[], int *num_entry)
+{
+ void *ipa_sram_mmio;
+ u64 hdr_base_ofst;
+ int res = 0;
+ u64 tbl_addr;
+ bool is_sys;
+ struct ipa_mem_buffer *sys_tbl_mem;
+ u8 *rule_addr;
+ int rule_idx;
+
+ IPADBG("tbl_idx=%d ip_type=%d hashable=%d entry=0x%p num_entry=0x%p\n",
+ tbl_idx, ip_type, hashable, entry, num_entry);
+
+ if (ip_type == IPA_IP_v4 && tbl_idx >= IPA_MEM_PART(v4_rt_num_index)) {
+ IPAERR("Invalid params\n");
+ return -EFAULT;
+ }
+
+ if (ip_type == IPA_IP_v6 && tbl_idx >= IPA_MEM_PART(v6_rt_num_index)) {
+ IPAERR("Invalid params\n");
+ return -EFAULT;
+ }
+
+ /* map IPA SRAM */
+ ipa_sram_mmio = ioremap(ipa3_ctx->ipa_wrapper_base +
+ ipa3_ctx->ctrl->ipa_reg_base_ofst +
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
+ ipa3_ctx->smem_restricted_bytes / 4),
+ ipa3_ctx->smem_sz);
+ if (!ipa_sram_mmio) {
+ IPAERR("fail to ioremap IPA SRAM\n");
+ return -ENOMEM;
+ }
+
+ memset(entry, 0, sizeof(*entry) * (*num_entry));
+ if (hashable) {
+ if (ip_type == IPA_IP_v4)
+ hdr_base_ofst =
+ IPA_MEM_PART(v4_rt_hash_ofst);
+ else
+ hdr_base_ofst =
+ IPA_MEM_PART(v6_rt_hash_ofst);
+ } else {
+ if (ip_type == IPA_IP_v4)
+ hdr_base_ofst =
+ IPA_MEM_PART(v4_rt_nhash_ofst);
+ else
+ hdr_base_ofst =
+ IPA_MEM_PART(v6_rt_nhash_ofst);
+ }
+
+ IPADBG("hdr_base_ofst=0x%llx\n", hdr_base_ofst);
+
+ res = ipahal_fltrt_read_addr_from_hdr(ipa_sram_mmio + hdr_base_ofst,
+ tbl_idx, &tbl_addr, &is_sys);
+ if (res) {
+ IPAERR("failed to read table address from header structure\n");
+ goto bail;
+ }
+ IPADBG("rt tbl %d: tbl_addr=0x%llx is_sys=%d\n",
+ tbl_idx, tbl_addr, is_sys);
+ if (!tbl_addr) {
+ IPAERR("invalid rt tbl addr\n");
+ res = -EFAULT;
+ goto bail;
+ }
+
+ /* for tables which reside in DDR access it from the virtual memory */
+ if (is_sys) {
+ struct ipa3_rt_tbl_set *set;
+ struct ipa3_rt_tbl *tbl;
+
+ set = &ipa3_ctx->rt_tbl_set[ip_type];
+ rule_addr = NULL;
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ if (tbl->idx == tbl_idx) {
+ sys_tbl_mem = &(tbl->curr_mem[hashable ?
+ IPA_RULE_HASHABLE :
+ IPA_RULE_NON_HASHABLE]);
+ if (sys_tbl_mem->phys_base &&
+ sys_tbl_mem->phys_base != tbl_addr) {
+ IPAERR("mismatch:parsed=%llx sw=%pad\n"
+ , tbl_addr,
+ &sys_tbl_mem->phys_base);
+ }
+ if (sys_tbl_mem->phys_base)
+ rule_addr = sys_tbl_mem->base;
+ else
+ rule_addr = NULL;
+ }
+ }
+ } else {
+ rule_addr = ipa_sram_mmio + hdr_base_ofst + tbl_addr;
+ }
+
+ IPADBG("First rule addr 0x%p\n", rule_addr);
+
+ if (!rule_addr) {
+ /* Modem table in system memory or empty table */
+ *num_entry = 0;
+ goto bail;
+ }
+
+ rule_idx = 0;
+ while (rule_idx < *num_entry) {
+ res = ipahal_rt_parse_hw_rule(rule_addr, &entry[rule_idx]);
+ if (res) {
+ IPAERR("failed parsing rt rule\n");
+ goto bail;
+ }
+
+ IPADBG("rule_size=%d\n", entry[rule_idx].rule_size);
+ if (!entry[rule_idx].rule_size)
+ break;
+
+ rule_addr += entry[rule_idx].rule_size;
+ rule_idx++;
+ }
+ *num_entry = rule_idx;
+bail:
+ iounmap(ipa_sram_mmio);
+ return res;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h b/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h
new file mode 100644
index 0000000..b67899b
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h
@@ -0,0 +1,153 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ipa
+#define TRACE_INCLUDE_FILE ipa_trace
+
+#if !defined(_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _IPA_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(
+ intr_to_poll3,
+
+ TP_PROTO(unsigned long client),
+
+ TP_ARGS(client),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, client)
+ ),
+
+ TP_fast_assign(
+ __entry->client = client;
+ ),
+
+ TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+ poll_to_intr3,
+
+ TP_PROTO(unsigned long client),
+
+ TP_ARGS(client),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, client)
+ ),
+
+ TP_fast_assign(
+ __entry->client = client;
+ ),
+
+ TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+ idle_sleep_enter3,
+
+ TP_PROTO(unsigned long client),
+
+ TP_ARGS(client),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, client)
+ ),
+
+ TP_fast_assign(
+ __entry->client = client;
+ ),
+
+ TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+ idle_sleep_exit3,
+
+ TP_PROTO(unsigned long client),
+
+ TP_ARGS(client),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, client)
+ ),
+
+ TP_fast_assign(
+ __entry->client = client;
+ ),
+
+ TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+ rmnet_ipa_netifni3,
+
+ TP_PROTO(unsigned long rx_pkt_cnt),
+
+ TP_ARGS(rx_pkt_cnt),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, rx_pkt_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->rx_pkt_cnt = rx_pkt_cnt;
+ ),
+
+ TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
+TRACE_EVENT(
+ rmnet_ipa_netifrx3,
+
+ TP_PROTO(unsigned long rx_pkt_cnt),
+
+ TP_ARGS(rx_pkt_cnt),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, rx_pkt_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->rx_pkt_cnt = rx_pkt_cnt;
+ ),
+
+ TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
+TRACE_EVENT(
+ rmnet_ipa_netif_rcv_skb3,
+
+ TP_PROTO(unsigned long rx_pkt_cnt),
+
+ TP_ARGS(rx_pkt_cnt),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, rx_pkt_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->rx_pkt_cnt = rx_pkt_cnt;
+ ),
+
+ TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
+#endif /* _IPA_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
new file mode 100644
index 0000000..780a005
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
@@ -0,0 +1,991 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "ipa_i.h"
+#include <linux/delay.h>
+
+#define IPA_RAM_UC_SMEM_SIZE 128
+#define IPA_HW_INTERFACE_VERSION 0x2000
+#define IPA_PKT_FLUSH_TO_US 100
+#define IPA_UC_POLL_SLEEP_USEC 100
+#define IPA_UC_POLL_MAX_RETRY 10000
+
+/**
+ * Mailbox register to Interrupt HWP for CPU cmd
+ * Usage of IPA_UC_MAILBOX_m_n doorbell instead of IPA_IRQ_EE_UC_0
+ * due to HW limitation.
+ *
+ */
+#define IPA_CPU_2_HW_CMD_MBOX_m 0
+#define IPA_CPU_2_HW_CMD_MBOX_n 23
+
+/**
+ * enum ipa3_cpu_2_hw_commands - Values that represent the commands from the CPU
+ * IPA_CPU_2_HW_CMD_NO_OP : No operation is required.
+ * IPA_CPU_2_HW_CMD_UPDATE_FLAGS : Update SW flags which defines the behavior
+ * of HW.
+ * IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST : Launch predefined test over HW.
+ * IPA_CPU_2_HW_CMD_DEBUG_GET_INFO : Read HW internal debug information.
+ * IPA_CPU_2_HW_CMD_ERR_FATAL : CPU instructs HW to perform error fatal
+ * handling.
+ * IPA_CPU_2_HW_CMD_CLK_GATE : CPU instructs HW to goto Clock Gated state.
+ * IPA_CPU_2_HW_CMD_CLK_UNGATE : CPU instructs HW to goto Clock Ungated state.
+ * IPA_CPU_2_HW_CMD_MEMCPY : CPU instructs HW to do memcopy using QMB.
+ * IPA_CPU_2_HW_CMD_RESET_PIPE : Command to reset a pipe - SW WA for a HW bug.
+ * IPA_CPU_2_HW_CMD_GSI_CH_EMPTY : Command to check for GSI channel emptiness.
+ */
+enum ipa3_cpu_2_hw_commands {
+ IPA_CPU_2_HW_CMD_NO_OP =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+ IPA_CPU_2_HW_CMD_UPDATE_FLAGS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+ IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+ IPA_CPU_2_HW_CMD_DEBUG_GET_INFO =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
+ IPA_CPU_2_HW_CMD_ERR_FATAL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
+ IPA_CPU_2_HW_CMD_CLK_GATE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
+ IPA_CPU_2_HW_CMD_CLK_UNGATE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
+ IPA_CPU_2_HW_CMD_MEMCPY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7),
+ IPA_CPU_2_HW_CMD_RESET_PIPE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8),
+ IPA_CPU_2_HW_CMD_REG_WRITE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 9),
+ IPA_CPU_2_HW_CMD_GSI_CH_EMPTY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 10),
+};
+
+/**
+ * enum ipa3_hw_2_cpu_responses - Values that represent common HW responses
+ * to CPU commands.
+ * @IPA_HW_2_CPU_RESPONSE_NO_OP : No operation response
+ * @IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED : HW shall send this command once
+ * boot sequence is completed and HW is ready to serve commands from CPU
+ * @IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED: Response to CPU commands
+ * @IPA_HW_2_CPU_RESPONSE_DEBUG_GET_INFO : Response to
+ * IPA_CPU_2_HW_CMD_DEBUG_GET_INFO command
+ */
+enum ipa3_hw_2_cpu_responses {
+ IPA_HW_2_CPU_RESPONSE_NO_OP =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+ IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+ IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+ IPA_HW_2_CPU_RESPONSE_DEBUG_GET_INFO =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
+};
+
+/**
+ * struct IpaHwResetPipeCmdData_t - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_MEMCPY command.
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+struct IpaHwMemCopyData_t {
+ u32 destination_addr;
+ u32 source_addr;
+ u32 dest_buffer_size;
+ u32 source_buffer_size;
+};
+
+/**
+ * union IpaHwResetPipeCmdData_t - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_RESET_PIPE command.
+ * @pipeNum : Pipe number to be reset
+ * @direction : 1 - IPA Producer, 0 - IPA Consumer
+ * @reserved_02_03 : Reserved
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+union IpaHwResetPipeCmdData_t {
+ struct IpaHwResetPipeCmdParams_t {
+ u8 pipeNum;
+ u8 direction;
+ u32 reserved_02_03;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * struct IpaHwRegWriteCmdData_t - holds the parameters for
+ * IPA_CPU_2_HW_CMD_REG_WRITE command. Parameters are
+ * sent as 64b immediate parameters.
+ * @RegisterAddress: RG10 register address where the value needs to be written
+ * @RegisterValue: 32-Bit value to be written into the register
+ */
+struct IpaHwRegWriteCmdData_t {
+ u32 RegisterAddress;
+ u32 RegisterValue;
+};
+
+/**
+ * union IpaHwCpuCmdCompletedResponseData_t - Structure holding the parameters
+ * for IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED response.
+ * @originalCmdOp : The original command opcode
+ * @status : 0 for success indication, otherwise failure
+ * @reserved : Reserved
+ *
+ * Parameters are sent as 32b immediate parameters.
+ */
+union IpaHwCpuCmdCompletedResponseData_t {
+ struct IpaHwCpuCmdCompletedResponseParams_t {
+ u32 originalCmdOp:8;
+ u32 status:8;
+ u32 reserved:16;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwUpdateFlagsCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_UPDATE_FLAGS command
+ * @newFlags: SW flags defined the behavior of HW.
+ * This field is expected to be used as bitmask for enum ipa3_hw_flags
+ */
+union IpaHwUpdateFlagsCmdData_t {
+ struct IpaHwUpdateFlagsCmdParams_t {
+ u32 newFlags;
+ } params;
+ u32 raw32b;
+};
+
+/**
+ * union IpaHwChkChEmptyCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_GSI_CH_EMPTY command. Parameters are sent as 32b
+ * immediate parameters.
+ * @ee_n : EE owner of the channel
+ * @vir_ch_id : GSI virtual channel ID of the channel to checked of emptiness
+ * @reserved_02_04 : Reserved
+ */
+union IpaHwChkChEmptyCmdData_t {
+ struct IpaHwChkChEmptyCmdParams_t {
+ u8 ee_n;
+ u8 vir_ch_id;
+ u16 reserved_02_04;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * When resource group 10 limitation mitigation is enabled, uC send
+ * cmd should be able to run in interrupt context, so using spin lock
+ * instead of mutex.
+ */
+#define IPA3_UC_LOCK(flags) \
+do { \
+ if (ipa3_ctx->apply_rg10_wa) \
+ spin_lock_irqsave(&ipa3_ctx->uc_ctx.uc_spinlock, flags); \
+ else \
+ mutex_lock(&ipa3_ctx->uc_ctx.uc_lock); \
+} while (0)
+
+#define IPA3_UC_UNLOCK(flags) \
+do { \
+ if (ipa3_ctx->apply_rg10_wa) \
+ spin_unlock_irqrestore(&ipa3_ctx->uc_ctx.uc_spinlock, flags); \
+ else \
+ mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock); \
+} while (0)
+
+struct ipa3_uc_hdlrs ipa3_uc_hdlrs[IPA_HW_NUM_FEATURES] = { { 0 } };
+
+const char *ipa_hw_error_str(enum ipa3_hw_errors err_type)
+{
+ const char *str;
+
+ switch (err_type) {
+ case IPA_HW_ERROR_NONE:
+ str = "IPA_HW_ERROR_NONE";
+ break;
+ case IPA_HW_INVALID_DOORBELL_ERROR:
+ str = "IPA_HW_INVALID_DOORBELL_ERROR";
+ break;
+ case IPA_HW_DMA_ERROR:
+ str = "IPA_HW_DMA_ERROR";
+ break;
+ case IPA_HW_FATAL_SYSTEM_ERROR:
+ str = "IPA_HW_FATAL_SYSTEM_ERROR";
+ break;
+ case IPA_HW_INVALID_OPCODE:
+ str = "IPA_HW_INVALID_OPCODE";
+ break;
+ case IPA_HW_INVALID_PARAMS:
+ str = "IPA_HW_INVALID_PARAMS";
+ break;
+ case IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE:
+ str = "IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE";
+ break;
+ case IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE:
+ str = "IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE";
+ break;
+ case IPA_HW_GSI_CH_NOT_EMPTY_FAILURE:
+ str = "IPA_HW_GSI_CH_NOT_EMPTY_FAILURE";
+ break;
+ default:
+ str = "INVALID ipa_hw_errors type";
+ }
+
+ return str;
+}
+
+static void ipa3_log_evt_hdlr(void)
+{
+ int i;
+
+ if (!ipa3_ctx->uc_ctx.uc_event_top_ofst) {
+ ipa3_ctx->uc_ctx.uc_event_top_ofst =
+ ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams;
+ if (ipa3_ctx->uc_ctx.uc_event_top_ofst +
+ sizeof(struct IpaHwEventLogInfoData_t) >=
+ ipa3_ctx->ctrl->ipa_reg_base_ofst +
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
+ ipa3_ctx->smem_sz) {
+ IPAERR("uc_top 0x%x outside SRAM\n",
+ ipa3_ctx->uc_ctx.uc_event_top_ofst);
+ goto bad_uc_top_ofst;
+ }
+
+ ipa3_ctx->uc_ctx.uc_event_top_mmio = ioremap(
+ ipa3_ctx->ipa_wrapper_base +
+ ipa3_ctx->uc_ctx.uc_event_top_ofst,
+ sizeof(struct IpaHwEventLogInfoData_t));
+ if (!ipa3_ctx->uc_ctx.uc_event_top_mmio) {
+ IPAERR("fail to ioremap uc top\n");
+ goto bad_uc_top_ofst;
+ }
+
+ for (i = 0; i < IPA_HW_NUM_FEATURES; i++) {
+ if (ipa3_uc_hdlrs[i].ipa_uc_event_log_info_hdlr)
+ ipa3_uc_hdlrs[i].ipa_uc_event_log_info_hdlr
+ (ipa3_ctx->uc_ctx.uc_event_top_mmio);
+ }
+ } else {
+
+ if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams !=
+ ipa3_ctx->uc_ctx.uc_event_top_ofst) {
+ IPAERR("uc top ofst changed new=%u cur=%u\n",
+ ipa3_ctx->uc_ctx.uc_sram_mmio->
+ eventParams,
+ ipa3_ctx->uc_ctx.uc_event_top_ofst);
+ }
+ }
+
+ return;
+
+bad_uc_top_ofst:
+ ipa3_ctx->uc_ctx.uc_event_top_ofst = 0;
+}
+
+/**
+ * ipa3_uc_state_check() - Check the status of the uC interface
+ *
+ * Return value: 0 if the uC is loaded, interface is initialized
+ * and there was no recent failure in one of the commands.
+ * A negative value is returned otherwise.
+ */
+int ipa3_uc_state_check(void)
+{
+ if (!ipa3_ctx->uc_ctx.uc_inited) {
+ IPAERR("uC interface not initialized\n");
+ return -EFAULT;
+ }
+
+ if (!ipa3_ctx->uc_ctx.uc_loaded) {
+ IPAERR("uC is not loaded\n");
+ return -EFAULT;
+ }
+
+ if (ipa3_ctx->uc_ctx.uc_failed) {
+ IPAERR("uC has failed its last command\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * ipa3_uc_loaded_check() - Check the uC has been loaded
+ *
+ * Return value: 1 if the uC is loaded, 0 otherwise
+ */
+int ipa3_uc_loaded_check(void)
+{
+ return ipa3_ctx->uc_ctx.uc_loaded;
+}
+EXPORT_SYMBOL(ipa3_uc_loaded_check);
+
+static void ipa3_uc_event_handler(enum ipa_irq_type interrupt,
+ void *private_data,
+ void *interrupt_data)
+{
+ union IpaHwErrorEventData_t evt;
+ u8 feature;
+
+ WARN_ON(private_data != ipa3_ctx);
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ IPADBG("uC evt opcode=%u\n",
+ ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
+
+
+ feature = EXTRACT_UC_FEATURE(ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
+
+ if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
+ IPAERR("Invalid feature %u for event %u\n",
+ feature, ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return;
+ }
+ /* Feature specific handling */
+ if (ipa3_uc_hdlrs[feature].ipa_uc_event_hdlr)
+ ipa3_uc_hdlrs[feature].ipa_uc_event_hdlr
+ (ipa3_ctx->uc_ctx.uc_sram_mmio);
+
+ /* General handling */
+ if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+ IPA_HW_2_CPU_EVENT_ERROR) {
+ evt.raw32b = ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams;
+ IPAERR("uC Error, evt errorType = %s\n",
+ ipa_hw_error_str(evt.params.errorType));
+ ipa3_ctx->uc_ctx.uc_failed = true;
+ ipa3_ctx->uc_ctx.uc_error_type = evt.params.errorType;
+ ipa3_ctx->uc_ctx.uc_error_timestamp =
+ ipahal_read_reg(IPA_TAG_TIMER);
+ BUG();
+ } else if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+ IPA_HW_2_CPU_EVENT_LOG_INFO) {
+ IPADBG("uC evt log info ofst=0x%x\n",
+ ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams);
+ ipa3_log_evt_hdlr();
+ } else {
+ IPADBG("unsupported uC evt opcode=%u\n",
+ ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
+ }
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+}
+
+int ipa3_uc_panic_notifier(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ int result = 0;
+ struct ipa_active_client_logging_info log_info;
+
+ IPADBG("this=%p evt=%lu ptr=%p\n", this, event, ptr);
+
+ result = ipa3_uc_state_check();
+ if (result)
+ goto fail;
+
+ IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
+ if (ipa3_inc_client_enable_clks_no_block(&log_info))
+ goto fail;
+
+ ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp =
+ IPA_CPU_2_HW_CMD_ERR_FATAL;
+ ipa3_ctx->uc_ctx.pending_cmd = ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp;
+ /* ensure write to shared memory is done before triggering uc */
+ wmb();
+
+ if (ipa3_ctx->apply_rg10_wa)
+ ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n,
+ IPA_CPU_2_HW_CMD_MBOX_m,
+ IPA_CPU_2_HW_CMD_MBOX_n, 0x1);
+ else
+ ipahal_write_reg_n(IPA_IRQ_EE_UC_n, 0, 0x1);
+
+ /* give uc enough time to save state */
+ udelay(IPA_PKT_FLUSH_TO_US);
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPADBG("err_fatal issued\n");
+
+fail:
+ return NOTIFY_DONE;
+}
+
+static void ipa3_uc_response_hdlr(enum ipa_irq_type interrupt,
+ void *private_data,
+ void *interrupt_data)
+{
+ union IpaHwCpuCmdCompletedResponseData_t uc_rsp;
+ u8 feature;
+ int res;
+ int i;
+
+ WARN_ON(private_data != ipa3_ctx);
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPADBG("uC rsp opcode=%u\n",
+ ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp);
+
+ feature = EXTRACT_UC_FEATURE(ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp);
+
+ if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
+ IPAERR("Invalid feature %u for event %u\n",
+ feature, ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return;
+ }
+
+ /* Feature specific handling */
+ if (ipa3_uc_hdlrs[feature].ipa3_uc_response_hdlr) {
+ res = ipa3_uc_hdlrs[feature].ipa3_uc_response_hdlr(
+ ipa3_ctx->uc_ctx.uc_sram_mmio,
+ &ipa3_ctx->uc_ctx.uc_status);
+ if (res == 0) {
+ IPADBG("feature %d specific response handler\n",
+ feature);
+ complete_all(&ipa3_ctx->uc_ctx.uc_completion);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return;
+ }
+ }
+
+ /* General handling */
+ if (ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp ==
+ IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED) {
+ ipa3_ctx->uc_ctx.uc_loaded = true;
+
+ IPADBG("IPA uC loaded\n");
+ /*
+ * The proxy vote is held until uC is loaded to ensure that
+ * IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED is received.
+ */
+ ipa3_proxy_clk_unvote();
+
+ for (i = 0; i < IPA_HW_NUM_FEATURES; i++) {
+ if (ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr)
+ ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr();
+ }
+ } else if (ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp ==
+ IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) {
+ uc_rsp.raw32b = ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams;
+ IPADBG("uC cmd response opcode=%u status=%u\n",
+ uc_rsp.params.originalCmdOp,
+ uc_rsp.params.status);
+ if (uc_rsp.params.originalCmdOp ==
+ ipa3_ctx->uc_ctx.pending_cmd) {
+ ipa3_ctx->uc_ctx.uc_status = uc_rsp.params.status;
+ complete_all(&ipa3_ctx->uc_ctx.uc_completion);
+ } else {
+ IPAERR("Expected cmd=%u rcvd cmd=%u\n",
+ ipa3_ctx->uc_ctx.pending_cmd,
+ uc_rsp.params.originalCmdOp);
+ }
+ } else {
+ IPAERR("Unsupported uC rsp opcode = %u\n",
+ ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp);
+ }
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+static int ipa3_uc_send_cmd_64b_param(u32 cmd_lo, u32 cmd_hi, u32 opcode,
+ u32 expected_status, bool polling_mode, unsigned long timeout_jiffies)
+{
+ int index;
+ union IpaHwCpuCmdCompletedResponseData_t uc_rsp;
+ unsigned long flags;
+ int retries = 0;
+
+send_cmd_lock:
+ IPA3_UC_LOCK(flags);
+
+ if (ipa3_uc_state_check()) {
+ IPADBG("uC send command aborted\n");
+ IPA3_UC_UNLOCK(flags);
+ return -EBADF;
+ }
+send_cmd:
+ if (ipa3_ctx->apply_rg10_wa) {
+ if (!polling_mode)
+ IPADBG("Overriding mode to polling mode\n");
+ polling_mode = true;
+ } else {
+ init_completion(&ipa3_ctx->uc_ctx.uc_completion);
+ }
+
+ ipa3_ctx->uc_ctx.uc_sram_mmio->cmdParams = cmd_lo;
+ ipa3_ctx->uc_ctx.uc_sram_mmio->cmdParams_hi = cmd_hi;
+ ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp = opcode;
+ ipa3_ctx->uc_ctx.pending_cmd = opcode;
+ ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp = 0;
+ ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams = 0;
+
+ ipa3_ctx->uc_ctx.uc_status = 0;
+
+ /* ensure write to shared memory is done before triggering uc */
+ wmb();
+
+ if (ipa3_ctx->apply_rg10_wa)
+ ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n,
+ IPA_CPU_2_HW_CMD_MBOX_m,
+ IPA_CPU_2_HW_CMD_MBOX_n, 0x1);
+ else
+ ipahal_write_reg_n(IPA_IRQ_EE_UC_n, 0, 0x1);
+
+ if (polling_mode) {
+ for (index = 0; index < IPA_UC_POLL_MAX_RETRY; index++) {
+ if (ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp ==
+ IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) {
+ uc_rsp.raw32b = ipa3_ctx->uc_ctx.uc_sram_mmio->
+ responseParams;
+ if (uc_rsp.params.originalCmdOp ==
+ ipa3_ctx->uc_ctx.pending_cmd) {
+ ipa3_ctx->uc_ctx.uc_status =
+ uc_rsp.params.status;
+ break;
+ }
+ }
+ if (ipa3_ctx->apply_rg10_wa)
+ udelay(IPA_UC_POLL_SLEEP_USEC);
+ else
+ usleep_range(IPA_UC_POLL_SLEEP_USEC,
+ IPA_UC_POLL_SLEEP_USEC);
+ }
+
+ if (index == IPA_UC_POLL_MAX_RETRY) {
+ IPAERR("uC max polling retries reached\n");
+ if (ipa3_ctx->uc_ctx.uc_failed) {
+ IPAERR("uC reported on Error, errorType = %s\n",
+ ipa_hw_error_str(ipa3_ctx->
+ uc_ctx.uc_error_type));
+ }
+ IPA3_UC_UNLOCK(flags);
+ BUG();
+ return -EFAULT;
+ }
+ } else {
+ if (wait_for_completion_timeout(&ipa3_ctx->uc_ctx.uc_completion,
+ timeout_jiffies) == 0) {
+ IPAERR("uC timed out\n");
+ if (ipa3_ctx->uc_ctx.uc_failed) {
+ IPAERR("uC reported on Error, errorType = %s\n",
+ ipa_hw_error_str(ipa3_ctx->
+ uc_ctx.uc_error_type));
+ }
+ IPA3_UC_UNLOCK(flags);
+ BUG();
+ return -EFAULT;
+ }
+ }
+
+ if (ipa3_ctx->uc_ctx.uc_status != expected_status) {
+ if (ipa3_ctx->uc_ctx.uc_status ==
+ IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE) {
+ retries++;
+ if (retries == IPA_GSI_CHANNEL_STOP_MAX_RETRY) {
+ IPAERR("Failed after %d tries\n", retries);
+ IPA3_UC_UNLOCK(flags);
+ BUG();
+ return -EFAULT;
+ }
+ IPA3_UC_UNLOCK(flags);
+ ipa3_inject_dma_task_for_gsi();
+ /* sleep for short period to flush IPA */
+ usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
+ IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
+ goto send_cmd_lock;
+ }
+
+ if (ipa3_ctx->uc_ctx.uc_status ==
+ IPA_HW_GSI_CH_NOT_EMPTY_FAILURE) {
+ retries++;
+ if (retries >= IPA_GSI_CHANNEL_EMPTY_MAX_RETRY) {
+ IPAERR("Failed after %d tries\n", retries);
+ IPA3_UC_UNLOCK(flags);
+ return -EFAULT;
+ }
+ if (ipa3_ctx->apply_rg10_wa)
+ udelay(
+ IPA_GSI_CHANNEL_EMPTY_SLEEP_MAX_USEC / 2 +
+ IPA_GSI_CHANNEL_EMPTY_SLEEP_MIN_USEC / 2);
+ else
+ usleep_range(
+ IPA_GSI_CHANNEL_EMPTY_SLEEP_MIN_USEC,
+ IPA_GSI_CHANNEL_EMPTY_SLEEP_MAX_USEC);
+ goto send_cmd;
+ }
+
+ IPAERR("Recevied status %u, Expected status %u\n",
+ ipa3_ctx->uc_ctx.uc_status, expected_status);
+ IPA3_UC_UNLOCK(flags);
+ return -EFAULT;
+ }
+
+ IPA3_UC_UNLOCK(flags);
+
+ IPADBG("uC cmd %u send succeeded\n", opcode);
+
+ return 0;
+}
+
+/**
+ * ipa3_uc_interface_init() - Initialize the interface with the uC
+ *
+ * Return value: 0 on success, negative value otherwise
+ */
+int ipa3_uc_interface_init(void)
+{
+ int result;
+ unsigned long phys_addr;
+
+ if (ipa3_ctx->uc_ctx.uc_inited) {
+ IPADBG("uC interface already initialized\n");
+ return 0;
+ }
+
+ mutex_init(&ipa3_ctx->uc_ctx.uc_lock);
+ spin_lock_init(&ipa3_ctx->uc_ctx.uc_spinlock);
+
+ phys_addr = ipa3_ctx->ipa_wrapper_base +
+ ipa3_ctx->ctrl->ipa_reg_base_ofst +
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0);
+ ipa3_ctx->uc_ctx.uc_sram_mmio = ioremap(phys_addr,
+ IPA_RAM_UC_SMEM_SIZE);
+ if (!ipa3_ctx->uc_ctx.uc_sram_mmio) {
+ IPAERR("Fail to ioremap IPA uC SRAM\n");
+ result = -ENOMEM;
+ goto remap_fail;
+ }
+
+ if (!ipa3_ctx->apply_rg10_wa) {
+ result = ipa3_add_interrupt_handler(IPA_UC_IRQ_0,
+ ipa3_uc_event_handler, true,
+ ipa3_ctx);
+ if (result) {
+ IPAERR("Fail to register for UC_IRQ0 rsp interrupt\n");
+ result = -EFAULT;
+ goto irq_fail0;
+ }
+
+ result = ipa3_add_interrupt_handler(IPA_UC_IRQ_1,
+ ipa3_uc_response_hdlr, true,
+ ipa3_ctx);
+ if (result) {
+ IPAERR("fail to register for UC_IRQ1 rsp interrupt\n");
+ result = -EFAULT;
+ goto irq_fail1;
+ }
+ }
+
+ ipa3_ctx->uc_ctx.uc_inited = true;
+
+ IPADBG("IPA uC interface is initialized\n");
+ return 0;
+
+irq_fail1:
+ ipa3_remove_interrupt_handler(IPA_UC_IRQ_0);
+irq_fail0:
+ iounmap(ipa3_ctx->uc_ctx.uc_sram_mmio);
+remap_fail:
+ return result;
+}
+
+/**
+ * ipa3_uc_load_notify() - Notification about uC loading
+ *
+ * This function should be called when IPA uC interface layer cannot
+ * determine by itself about uC loading by waits for external notification.
+ * Example is resource group 10 limitation were ipa driver does not get uC
+ * interrupts.
+ * The function should perform actions that were not done at init due to uC
+ * not being loaded then.
+ */
+void ipa3_uc_load_notify(void)
+{
+ int i;
+ int result;
+
+ if (!ipa3_ctx->apply_rg10_wa)
+ return;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ ipa3_ctx->uc_ctx.uc_loaded = true;
+ IPADBG("IPA uC loaded\n");
+
+ ipa3_proxy_clk_unvote();
+
+ ipa3_init_interrupts();
+
+ result = ipa3_add_interrupt_handler(IPA_UC_IRQ_0,
+ ipa3_uc_event_handler, true,
+ ipa3_ctx);
+ if (result)
+ IPAERR("Fail to register for UC_IRQ0 rsp interrupt.\n");
+
+ for (i = 0; i < IPA_HW_NUM_FEATURES; i++) {
+ if (ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr)
+ ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr();
+ }
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+EXPORT_SYMBOL(ipa3_uc_load_notify);
+
+/**
+ * ipa3_uc_send_cmd() - Send a command to the uC
+ *
+ * Note1: This function sends command with 32bit parameter and do not
+ * use the higher 32bit of the command parameter (set to zero).
+ *
+ * Note2: In case the operation times out (No response from the uC) or
+ * polling maximal amount of retries has reached, the logic
+ * considers it as an invalid state of the uC/IPA, and
+ * issues a kernel panic.
+ *
+ * Returns: 0 on success.
+ * -EINVAL in case of invalid input.
+ * -EBADF in case uC interface is not initialized /
+ * or the uC has failed previously.
+ * -EFAULT in case the received status doesn't match
+ * the expected.
+ */
+int ipa3_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
+ bool polling_mode, unsigned long timeout_jiffies)
+{
+ return ipa3_uc_send_cmd_64b_param(cmd, 0, opcode,
+ expected_status, polling_mode, timeout_jiffies);
+}
+
+/**
+ * ipa3_uc_register_handlers() - Registers event, response and log event
+ * handlers for a specific feature.Please note
+ * that currently only one handler can be
+ * registered per feature.
+ *
+ * Return value: None
+ */
+void ipa3_uc_register_handlers(enum ipa3_hw_features feature,
+ struct ipa3_uc_hdlrs *hdlrs)
+{
+ unsigned long flags;
+
+ if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
+ IPAERR("Feature %u is invalid, not registering hdlrs\n",
+ feature);
+ return;
+ }
+
+ IPA3_UC_LOCK(flags);
+ ipa3_uc_hdlrs[feature] = *hdlrs;
+ IPA3_UC_UNLOCK(flags);
+
+ IPADBG("uC handlers registered for feature %u\n", feature);
+}
+
+/**
+ * ipa3_uc_reset_pipe() - reset a BAM pipe using the uC interface
+ * @ipa_client: [in] ipa client handle representing the pipe
+ *
+ * The function uses the uC interface in order to issue a BAM
+ * PIPE reset request. The uC makes sure there's no traffic in
+ * the TX command queue before issuing the reset.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_uc_reset_pipe(enum ipa_client_type ipa_client)
+{
+ union IpaHwResetPipeCmdData_t cmd;
+ int ep_idx;
+ int ret;
+
+ ep_idx = ipa3_get_ep_mapping(ipa_client);
+ if (ep_idx == -1) {
+ IPAERR("Invalid IPA client\n");
+ return 0;
+ }
+
+ /*
+ * If the uC interface has not been initialized yet,
+ * continue with the sequence without resetting the
+ * pipe.
+ */
+ if (ipa3_uc_state_check()) {
+ IPADBG("uC interface will not be used to reset %s pipe %d\n",
+ IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD",
+ ep_idx);
+ return 0;
+ }
+
+ /*
+ * IPA consumer = 0, IPA producer = 1.
+ * IPA driver concept of PROD/CONS is the opposite of the
+ * IPA HW concept. Therefore, IPA AP CLIENT PRODUCER = IPA CONSUMER,
+ * and vice-versa.
+ */
+ cmd.params.direction = (u8)(IPA_CLIENT_IS_PROD(ipa_client) ? 0 : 1);
+ cmd.params.pipeNum = (u8)ep_idx;
+
+ IPADBG("uC pipe reset on IPA %s pipe %d\n",
+ IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD", ep_idx);
+
+ ret = ipa3_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_RESET_PIPE, 0,
+ false, 10*HZ);
+
+ return ret;
+}
+
+int ipa3_uc_is_gsi_channel_empty(enum ipa_client_type ipa_client)
+{
+ struct ipa_gsi_ep_config *gsi_ep_info;
+ union IpaHwChkChEmptyCmdData_t cmd;
+ int ret;
+
+ gsi_ep_info = ipa3_get_gsi_ep_info(ipa3_get_ep_mapping(ipa_client));
+ if (!gsi_ep_info) {
+ IPAERR("Invalid IPA ep index\n");
+ return 0;
+ }
+
+ if (ipa3_uc_state_check()) {
+ IPADBG("uC cannot be used to validate ch emptiness clnt=%d\n"
+ , ipa_client);
+ return 0;
+ }
+
+ cmd.params.ee_n = gsi_ep_info->ee;
+ cmd.params.vir_ch_id = gsi_ep_info->ipa_gsi_chan_num;
+
+ IPADBG("uC emptiness check for IPA GSI Channel %d\n",
+ gsi_ep_info->ipa_gsi_chan_num);
+
+ ret = ipa3_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_GSI_CH_EMPTY, 0,
+ false, 10*HZ);
+
+ return ret;
+}
+
+
+/**
+ * ipa3_uc_notify_clk_state() - notify to uC of clock enable / disable
+ * @enabled: true if clock are enabled
+ *
+ * The function uses the uC interface in order to notify uC before IPA clocks
+ * are disabled to make sure uC is not in the middle of operation.
+ * Also after clocks are enabled ned to notify uC to start processing.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_uc_notify_clk_state(bool enabled)
+{
+ u32 opcode;
+
+ /*
+ * If the uC interface has not been initialized yet,
+ * don't notify the uC on the enable/disable
+ */
+ if (ipa3_uc_state_check()) {
+ IPADBG("uC interface will not notify the UC on clock state\n");
+ return 0;
+ }
+
+ IPADBG("uC clock %s notification\n", (enabled) ? "UNGATE" : "GATE");
+
+ opcode = (enabled) ? IPA_CPU_2_HW_CMD_CLK_UNGATE :
+ IPA_CPU_2_HW_CMD_CLK_GATE;
+
+ return ipa3_uc_send_cmd(0, opcode, 0, true, 0);
+}
+
+/**
+ * ipa3_uc_update_hw_flags() - send uC the HW flags to be used
+ * @flags: This field is expected to be used as bitmask for enum ipa3_hw_flags
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_uc_update_hw_flags(u32 flags)
+{
+ union IpaHwUpdateFlagsCmdData_t cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.params.newFlags = flags;
+ return ipa3_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_UPDATE_FLAGS, 0,
+ false, HZ);
+}
+
+/**
+ * ipa3_uc_rg10_write_reg() - write to register possibly via uC
+ *
+ * if the RG10 limitation workaround is enabled, then writing
+ * to a register will be proxied by the uC due to H/W limitation.
+ * This func should be called for RG10 registers only
+ *
+ * @Parameters: Like ipahal_write_reg_n() parameters
+ *
+ */
+void ipa3_uc_rg10_write_reg(enum ipahal_reg_name reg, u32 n, u32 val)
+{
+ int ret;
+ u32 paddr;
+
+ if (!ipa3_ctx->apply_rg10_wa)
+ return ipahal_write_reg_n(reg, n, val);
+
+
+ /* calculate register physical address */
+ paddr = ipa3_ctx->ipa_wrapper_base + ipa3_ctx->ctrl->ipa_reg_base_ofst;
+ paddr += ipahal_get_reg_n_ofst(reg, n);
+
+ IPADBG("Sending uC cmd to reg write: addr=0x%x val=0x%x\n",
+ paddr, val);
+ ret = ipa3_uc_send_cmd_64b_param(paddr, val,
+ IPA_CPU_2_HW_CMD_REG_WRITE, 0, true, 0);
+ if (ret) {
+ IPAERR("failed to send cmd to uC for reg write\n");
+ BUG();
+ }
+}
+
+/**
+ * ipa3_uc_memcpy() - Perform a memcpy action using IPA uC
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
+{
+ int res;
+ struct ipa_mem_buffer mem;
+ struct IpaHwMemCopyData_t *cmd;
+
+ IPADBG("dest 0x%pa src 0x%pa len %d\n", &dest, &src, len);
+ mem.size = sizeof(cmd);
+ mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+ return -ENOMEM;
+ }
+ cmd = (struct IpaHwMemCopyData_t *)mem.base;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->destination_addr = dest;
+ cmd->dest_buffer_size = len;
+ cmd->source_addr = src;
+ cmd->source_buffer_size = len;
+ res = ipa3_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MEMCPY, 0,
+ true, 10 * HZ);
+ if (res) {
+ IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+ goto free_coherent;
+ }
+
+ res = 0;
+free_coherent:
+ dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+ return res;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c
new file mode 100644
index 0000000..7949d91
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c
@@ -0,0 +1,962 @@
+/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa.h>
+#include "ipa_i.h"
+
+/* MHI uC interface definitions */
+#define IPA_HW_INTERFACE_MHI_VERSION 0x0004
+
+#define IPA_HW_MAX_NUMBER_OF_CHANNELS 2
+#define IPA_HW_MAX_NUMBER_OF_EVENTRINGS 2
+#define IPA_HW_MAX_CHANNEL_HANDLE (IPA_HW_MAX_NUMBER_OF_CHANNELS-1)
+
+/**
+ * Values that represent the MHI commands from CPU to IPA HW.
+ * @IPA_CPU_2_HW_CMD_MHI_INIT: Initialize HW to be ready for MHI processing.
+ * Once operation was completed HW shall respond with
+ * IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED.
+ * @IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL: Initialize specific channel to be ready
+ * to serve MHI transfers. Once initialization was completed HW shall
+ * respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE.
+ * IPA_HW_MHI_CHANNEL_STATE_ENABLE
+ * @IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI: Update MHI MSI interrupts data.
+ * Once operation was completed HW shall respond with
+ * IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED.
+ * @IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE: Change specific channel
+ * processing state following host request. Once operation was completed
+ * HW shall respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE.
+ * @IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO: Info related to DL UL syncronization.
+ * @IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE: Cmd to stop event ring processing.
+ */
+enum ipa_cpu_2_hw_mhi_commands {
+ IPA_CPU_2_HW_CMD_MHI_INIT
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+ IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1),
+ IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2),
+ IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 3),
+ IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4),
+ IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5)
+};
+
+/**
+ * Values that represent MHI related HW responses to CPU commands.
+ * @IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE: Response to
+ * IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL or
+ * IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE commands.
+ */
+enum ipa_hw_2_cpu_mhi_responses {
+ IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+};
+
+/**
+ * Values that represent MHI related HW event to be sent to CPU.
+ * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR: Event specify the device detected an
+ * error in an element from the transfer ring associated with the channel
+ * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST: Event specify a bam
+ * interrupt was asserted when MHI engine is suspended
+ */
+enum ipa_hw_2_cpu_mhi_events {
+ IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+ IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1),
+};
+
+/**
+ * Channel error types.
+ * @IPA_HW_CHANNEL_ERROR_NONE: No error persists.
+ * @IPA_HW_CHANNEL_INVALID_RE_ERROR: Invalid Ring Element was detected
+ */
+enum ipa_hw_channel_errors {
+ IPA_HW_CHANNEL_ERROR_NONE,
+ IPA_HW_CHANNEL_INVALID_RE_ERROR
+};
+
+/**
+ * MHI error types.
+ * @IPA_HW_INVALID_MMIO_ERROR: Invalid data read from MMIO space
+ * @IPA_HW_INVALID_CHANNEL_ERROR: Invalid data read from channel context array
+ * @IPA_HW_INVALID_EVENT_ERROR: Invalid data read from event ring context array
+ * @IPA_HW_NO_ED_IN_RING_ERROR: No event descriptors are available to report on
+ * secondary event ring
+ * @IPA_HW_LINK_ERROR: Link error
+ */
+enum ipa_hw_mhi_errors {
+ IPA_HW_INVALID_MMIO_ERROR
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+ IPA_HW_INVALID_CHANNEL_ERROR
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1),
+ IPA_HW_INVALID_EVENT_ERROR
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2),
+ IPA_HW_NO_ED_IN_RING_ERROR
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4),
+ IPA_HW_LINK_ERROR
+ = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5),
+};
+
+
+/**
+ * Structure referring to the common and MHI section of 128B shared memory
+ * located in offset zero of SW Partition in IPA SRAM.
+ * The shared memory is used for communication between IPA HW and CPU.
+ * @common: common section in IPA SRAM
+ * @interfaceVersionMhi: The MHI interface version as reported by HW
+ * @mhiState: Overall MHI state
+ * @reserved_2B: reserved
+ * @mhiCnl0State: State of MHI channel 0.
+ * The state carries information regarding the error type.
+ * See IPA_HW_MHI_CHANNEL_STATES.
+ * @mhiCnl0State: State of MHI channel 1.
+ * @mhiCnl0State: State of MHI channel 2.
+ * @mhiCnl0State: State of MHI channel 3
+ * @mhiCnl0State: State of MHI channel 4.
+ * @mhiCnl0State: State of MHI channel 5.
+ * @mhiCnl0State: State of MHI channel 6.
+ * @mhiCnl0State: State of MHI channel 7.
+ * @reserved_37_34: reserved
+ * @reserved_3B_38: reserved
+ * @reserved_3F_3C: reserved
+ */
+struct IpaHwSharedMemMhiMapping_t {
+ struct IpaHwSharedMemCommonMapping_t common;
+ u16 interfaceVersionMhi;
+ u8 mhiState;
+ u8 reserved_2B;
+ u8 mhiCnl0State;
+ u8 mhiCnl1State;
+ u8 mhiCnl2State;
+ u8 mhiCnl3State;
+ u8 mhiCnl4State;
+ u8 mhiCnl5State;
+ u8 mhiCnl6State;
+ u8 mhiCnl7State;
+ u32 reserved_37_34;
+ u32 reserved_3B_38;
+ u32 reserved_3F_3C;
+};
+
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT command.
+ * Parameters are sent as pointer thus should be reside in address accessible
+ * to HW.
+ * @msiAddress: The MSI base (in device space) used for asserting the interrupt
+ * (MSI) associated with the event ring
+ * mmioBaseAddress: The address (in device space) of MMIO structure in
+ * host space
+ * deviceMhiCtrlBaseAddress: Base address of the memory region in the device
+ * address space where the MHI control data structures are allocated by
+ * the host, including channel context array, event context array,
+ * and rings. This value is used for host/device address translation.
+ * deviceMhiDataBaseAddress: Base address of the memory region in the device
+ * address space where the MHI data buffers are allocated by the host.
+ * This value is used for host/device address translation.
+ * firstChannelIndex: First channel ID. Doorbell 0 is mapped to this channel
+ * firstEventRingIndex: First event ring ID. Doorbell 16 is mapped to this
+ * event ring.
+ */
+struct IpaHwMhiInitCmdData_t {
+ u32 msiAddress;
+ u32 mmioBaseAddress;
+ u32 deviceMhiCtrlBaseAddress;
+ u32 deviceMhiDataBaseAddress;
+ u32 firstChannelIndex;
+ u32 firstEventRingIndex;
+};
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL
+ * command. Parameters are sent as 32b immediate parameters.
+ * @hannelHandle: The channel identifier as allocated by driver.
+ * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @contexArrayIndex: Unique index for channels, between 0 and 255. The index is
+ * used as an index in channel context array structures.
+ * @bamPipeId: The BAM pipe number for pipe dedicated for this channel
+ * @channelDirection: The direction of the channel as defined in the channel
+ * type field (CHTYPE) in the channel context data structure.
+ * @reserved: reserved.
+ */
+union IpaHwMhiInitChannelCmdData_t {
+ struct IpaHwMhiInitChannelCmdParams_t {
+ u32 channelHandle:8;
+ u32 contexArrayIndex:8;
+ u32 bamPipeId:6;
+ u32 channelDirection:2;
+ u32 reserved:8;
+ } params;
+ u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI command.
+ * @msiAddress_low: The MSI lower base addr (in device space) used for asserting
+ * the interrupt (MSI) associated with the event ring.
+ * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting
+ * the interrupt (MSI) associated with the event ring.
+ * @msiMask: Mask indicating number of messages assigned by the host to device
+ * @msiData: Data Pattern to use when generating the MSI
+ */
+struct IpaHwMhiMsiCmdData_t {
+ u32 msiAddress_low;
+ u32 msiAddress_hi;
+ u32 msiMask;
+ u32 msiData;
+};
+
+/**
+ * Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE command.
+ * Parameters are sent as 32b immediate parameters.
+ * @requestedState: The requested channel state as was indicated from Host.
+ * Use IPA_HW_MHI_CHANNEL_STATES to specify the requested state
+ * @channelHandle: The channel identifier as allocated by driver.
+ * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @LPTransitionRejected: Indication that low power state transition was
+ * rejected
+ * @reserved: reserved
+ */
+union IpaHwMhiChangeChannelStateCmdData_t {
+ struct IpaHwMhiChangeChannelStateCmdParams_t {
+ u32 requestedState:8;
+ u32 channelHandle:8;
+ u32 LPTransitionRejected:8;
+ u32 reserved:8;
+ } params;
+ u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE command.
+ * Parameters are sent as 32b immediate parameters.
+ * @channelHandle: The channel identifier as allocated by driver.
+ * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @reserved: reserved
+ */
+union IpaHwMhiStopEventUpdateData_t {
+ struct IpaHwMhiStopEventUpdateDataParams_t {
+ u32 channelHandle:8;
+ u32 reserved:24;
+ } params;
+ u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ * IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE response.
+ * Parameters are sent as 32b immediate parameters.
+ * @state: The new channel state. In case state is not as requested this is
+ * error indication for the last command
+ * @channelHandle: The channel identifier
+ * @additonalParams: For stop: the number of pending bam descriptors currently
+ * queued
+*/
+union IpaHwMhiChangeChannelStateResponseData_t {
+ struct IpaHwMhiChangeChannelStateResponseParams_t {
+ u32 state:8;
+ u32 channelHandle:8;
+ u32 additonalParams:16;
+ } params;
+ u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ * IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR event.
+ * Parameters are sent as 32b immediate parameters.
+ * @errorType: Type of error - IPA_HW_CHANNEL_ERRORS
+ * @channelHandle: The channel identifier as allocated by driver.
+ * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @reserved: reserved
+ */
+union IpaHwMhiChannelErrorEventData_t {
+ struct IpaHwMhiChannelErrorEventParams_t {
+ u32 errorType:8;
+ u32 channelHandle:8;
+ u32 reserved:16;
+ } params;
+ u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ * IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST event.
+ * Parameters are sent as 32b immediate parameters.
+ * @channelHandle: The channel identifier as allocated by driver.
+ * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @reserved: reserved
+ */
+union IpaHwMhiChannelWakeupEventData_t {
+ struct IpaHwMhiChannelWakeupEventParams_t {
+ u32 channelHandle:8;
+ u32 reserved:24;
+ } params;
+ u32 raw32b;
+};
+
+/**
+ * Structure holding the MHI Common statistics
+ * @numULDLSync: Number of times UL activity trigged due to DL activity
+ * @numULTimerExpired: Number of times UL Accm Timer expired
+ */
+struct IpaHwStatsMhiCmnInfoData_t {
+ u32 numULDLSync;
+ u32 numULTimerExpired;
+ u32 numChEvCtxWpRead;
+ u32 reserved;
+};
+
+/**
+ * Structure holding the MHI Channel statistics
+ * @doorbellInt: The number of doorbell int
+ * @reProccesed: The number of ring elements processed
+ * @bamFifoFull: Number of times Bam Fifo got full
+ * @bamFifoEmpty: Number of times Bam Fifo got empty
+ * @bamFifoUsageHigh: Number of times Bam fifo usage went above 75%
+ * @bamFifoUsageLow: Number of times Bam fifo usage went below 25%
+ * @bamInt: Number of BAM Interrupts
+ * @ringFull: Number of times Transfer Ring got full
+ * @ringEmpty: umber of times Transfer Ring got empty
+ * @ringUsageHigh: Number of times Transfer Ring usage went above 75%
+ * @ringUsageLow: Number of times Transfer Ring usage went below 25%
+ * @delayedMsi: Number of times device triggered MSI to host after
+ * Interrupt Moderation Timer expiry
+ * @immediateMsi: Number of times device triggered MSI to host immediately
+ * @thresholdMsi: Number of times device triggered MSI due to max pending
+ * events threshold reached
+ * @numSuspend: Number of times channel was suspended
+ * @numResume: Number of times channel was suspended
+ * @num_OOB: Number of times we indicated that we are OOB
+ * @num_OOB_timer_expiry: Number of times we indicated that we are OOB
+ * after timer expiry
+ * @num_OOB_moderation_timer_start: Number of times we started timer after
+ * sending OOB and hitting OOB again before we processed threshold
+ * number of packets
+ * @num_db_mode_evt: Number of times we indicated that we are in Doorbell mode
+ */
+struct IpaHwStatsMhiCnlInfoData_t {
+ u32 doorbellInt;
+ u32 reProccesed;
+ u32 bamFifoFull;
+ u32 bamFifoEmpty;
+ u32 bamFifoUsageHigh;
+ u32 bamFifoUsageLow;
+ u32 bamInt;
+ u32 ringFull;
+ u32 ringEmpty;
+ u32 ringUsageHigh;
+ u32 ringUsageLow;
+ u32 delayedMsi;
+ u32 immediateMsi;
+ u32 thresholdMsi;
+ u32 numSuspend;
+ u32 numResume;
+ u32 num_OOB;
+ u32 num_OOB_timer_expiry;
+ u32 num_OOB_moderation_timer_start;
+ u32 num_db_mode_evt;
+};
+
+/**
+ * Structure holding the MHI statistics
+ * @mhiCmnStats: Stats pertaining to MHI
+ * @mhiCnlStats: Stats pertaining to each channel
+ */
+struct IpaHwStatsMhiInfoData_t {
+ struct IpaHwStatsMhiCmnInfoData_t mhiCmnStats;
+ struct IpaHwStatsMhiCnlInfoData_t mhiCnlStats[
+ IPA_HW_MAX_NUMBER_OF_CHANNELS];
+};
+
+/**
+ * Structure holding the MHI Common Config info
+ * @isDlUlSyncEnabled: Flag to indicate if DL-UL synchronization is enabled
+ * @UlAccmVal: Out Channel(UL) accumulation time in ms when DL UL Sync is
+ * enabled
+ * @ulMsiEventThreshold: Threshold at which HW fires MSI to host for UL events
+ * @dlMsiEventThreshold: Threshold at which HW fires MSI to host for DL events
+ */
+struct IpaHwConfigMhiCmnInfoData_t {
+ u8 isDlUlSyncEnabled;
+ u8 UlAccmVal;
+ u8 ulMsiEventThreshold;
+ u8 dlMsiEventThreshold;
+};
+
+/**
+ * Structure holding the parameters for MSI info data
+ * @msiAddress_low: The MSI lower base addr (in device space) used for asserting
+ * the interrupt (MSI) associated with the event ring.
+ * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting
+ * the interrupt (MSI) associated with the event ring.
+ * @msiMask: Mask indicating number of messages assigned by the host to device
+ * @msiData: Data Pattern to use when generating the MSI
+ */
+struct IpaHwConfigMhiMsiInfoData_t {
+ u32 msiAddress_low;
+ u32 msiAddress_hi;
+ u32 msiMask;
+ u32 msiData;
+};
+
+/**
+ * Structure holding the MHI Channel Config info
+ * @transferRingSize: The Transfer Ring size in terms of Ring Elements
+ * @transferRingIndex: The Transfer Ring channel number as defined by host
+ * @eventRingIndex: The Event Ring Index associated with this Transfer Ring
+ * @bamPipeIndex: The BAM Pipe associated with this channel
+ * @isOutChannel: Indication for the direction of channel
+ * @reserved_0: Reserved byte for maintaining 4byte alignment
+ * @reserved_1: Reserved byte for maintaining 4byte alignment
+ */
+struct IpaHwConfigMhiCnlInfoData_t {
+ u16 transferRingSize;
+ u8 transferRingIndex;
+ u8 eventRingIndex;
+ u8 bamPipeIndex;
+ u8 isOutChannel;
+ u8 reserved_0;
+ u8 reserved_1;
+};
+
+/**
+ * Structure holding the MHI Event Config info
+ * @msiVec: msi vector to invoke MSI interrupt
+ * @intmodtValue: Interrupt moderation timer (in milliseconds)
+ * @eventRingSize: The Event Ring size in terms of Ring Elements
+ * @eventRingIndex: The Event Ring number as defined by host
+ * @reserved_0: Reserved byte for maintaining 4byte alignment
+ * @reserved_1: Reserved byte for maintaining 4byte alignment
+ * @reserved_2: Reserved byte for maintaining 4byte alignment
+ */
+struct IpaHwConfigMhiEventInfoData_t {
+ u32 msiVec;
+ u16 intmodtValue;
+ u16 eventRingSize;
+ u8 eventRingIndex;
+ u8 reserved_0;
+ u8 reserved_1;
+ u8 reserved_2;
+};
+
+/**
+ * Structure holding the MHI Config info
+ * @mhiCmnCfg: Common Config pertaining to MHI
+ * @mhiMsiCfg: Config pertaining to MSI config
+ * @mhiCnlCfg: Config pertaining to each channel
+ * @mhiEvtCfg: Config pertaining to each event Ring
+ */
+struct IpaHwConfigMhiInfoData_t {
+ struct IpaHwConfigMhiCmnInfoData_t mhiCmnCfg;
+ struct IpaHwConfigMhiMsiInfoData_t mhiMsiCfg;
+ struct IpaHwConfigMhiCnlInfoData_t mhiCnlCfg[
+ IPA_HW_MAX_NUMBER_OF_CHANNELS];
+ struct IpaHwConfigMhiEventInfoData_t mhiEvtCfg[
+ IPA_HW_MAX_NUMBER_OF_EVENTRINGS];
+};
+
+
+struct ipa3_uc_mhi_ctx {
+ u8 expected_responseOp;
+ u32 expected_responseParams;
+ void (*ready_cb)(void);
+ void (*wakeup_request_cb)(void);
+ u32 mhi_uc_stats_ofst;
+ struct IpaHwStatsMhiInfoData_t *mhi_uc_stats_mmio;
+};
+
+#define PRINT_COMMON_STATS(x) \
+ (nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \
+ #x "=0x%x\n", ipa3_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCmnStats.x))
+
+#define PRINT_CHANNEL_STATS(ch, x) \
+ (nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \
+ #x "=0x%x\n", ipa3_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCnlStats[ch].x))
+
+struct ipa3_uc_mhi_ctx *ipa3_uc_mhi_ctx;
+
+static int ipa3_uc_mhi_response_hdlr(struct IpaHwSharedMemCommonMapping_t
+ *uc_sram_mmio, u32 *uc_status)
+{
+ IPADBG("responseOp=%d\n", uc_sram_mmio->responseOp);
+ if (uc_sram_mmio->responseOp == ipa3_uc_mhi_ctx->expected_responseOp &&
+ uc_sram_mmio->responseParams ==
+ ipa3_uc_mhi_ctx->expected_responseParams) {
+ *uc_status = 0;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void ipa3_uc_mhi_event_hdlr(struct IpaHwSharedMemCommonMapping_t
+ *uc_sram_mmio)
+{
+ if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+ IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR) {
+ union IpaHwMhiChannelErrorEventData_t evt;
+
+ IPAERR("Channel error\n");
+ evt.raw32b = uc_sram_mmio->eventParams;
+ IPAERR("errorType=%d channelHandle=%d reserved=%d\n",
+ evt.params.errorType, evt.params.channelHandle,
+ evt.params.reserved);
+ } else if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+ IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST) {
+ union IpaHwMhiChannelWakeupEventData_t evt;
+
+ IPADBG("WakeUp channel request\n");
+ evt.raw32b = uc_sram_mmio->eventParams;
+ IPADBG("channelHandle=%d reserved=%d\n",
+ evt.params.channelHandle, evt.params.reserved);
+ ipa3_uc_mhi_ctx->wakeup_request_cb();
+ }
+}
+
+static void ipa3_uc_mhi_event_log_info_hdlr(
+ struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+
+{
+ if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_MHI)) == 0) {
+ IPAERR("MHI feature missing 0x%x\n",
+ uc_event_top_mmio->featureMask);
+ return;
+ }
+
+ if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_MHI].
+ params.size != sizeof(struct IpaHwStatsMhiInfoData_t)) {
+ IPAERR("mhi stats sz invalid exp=%zu is=%u\n",
+ sizeof(struct IpaHwStatsMhiInfoData_t),
+ uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_MHI].params.size);
+ return;
+ }
+
+ ipa3_uc_mhi_ctx->mhi_uc_stats_ofst = uc_event_top_mmio->
+ statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_MHI].params.offset;
+ IPAERR("MHI stats ofst=0x%x\n", ipa3_uc_mhi_ctx->mhi_uc_stats_ofst);
+ if (ipa3_uc_mhi_ctx->mhi_uc_stats_ofst +
+ sizeof(struct IpaHwStatsMhiInfoData_t) >=
+ ipa3_ctx->ctrl->ipa_reg_base_ofst +
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
+ ipa3_ctx->smem_sz) {
+ IPAERR("uc_mhi_stats 0x%x outside SRAM\n",
+ ipa3_uc_mhi_ctx->mhi_uc_stats_ofst);
+ return;
+ }
+
+ ipa3_uc_mhi_ctx->mhi_uc_stats_mmio =
+ ioremap(ipa3_ctx->ipa_wrapper_base +
+ ipa3_uc_mhi_ctx->mhi_uc_stats_ofst,
+ sizeof(struct IpaHwStatsMhiInfoData_t));
+ if (!ipa3_uc_mhi_ctx->mhi_uc_stats_mmio) {
+ IPAERR("fail to ioremap uc mhi stats\n");
+ return;
+ }
+}
+
+int ipa3_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void))
+{
+ struct ipa3_uc_hdlrs hdlrs;
+
+ if (ipa3_uc_mhi_ctx) {
+ IPAERR("Already initialized\n");
+ return -EFAULT;
+ }
+
+ ipa3_uc_mhi_ctx = kzalloc(sizeof(*ipa3_uc_mhi_ctx), GFP_KERNEL);
+ if (!ipa3_uc_mhi_ctx) {
+ IPAERR("no mem\n");
+ return -ENOMEM;
+ }
+
+ ipa3_uc_mhi_ctx->ready_cb = ready_cb;
+ ipa3_uc_mhi_ctx->wakeup_request_cb = wakeup_request_cb;
+
+ memset(&hdlrs, 0, sizeof(hdlrs));
+ hdlrs.ipa_uc_loaded_hdlr = ipa3_uc_mhi_ctx->ready_cb;
+ hdlrs.ipa3_uc_response_hdlr = ipa3_uc_mhi_response_hdlr;
+ hdlrs.ipa_uc_event_hdlr = ipa3_uc_mhi_event_hdlr;
+ hdlrs.ipa_uc_event_log_info_hdlr = ipa3_uc_mhi_event_log_info_hdlr;
+ ipa3_uc_register_handlers(IPA_HW_FEATURE_MHI, &hdlrs);
+
+ IPADBG("Done\n");
+ return 0;
+}
+
+void ipa3_uc_mhi_cleanup(void)
+{
+ struct ipa3_uc_hdlrs null_hdlrs = { 0 };
+
+ IPADBG("Enter\n");
+
+ if (!ipa3_uc_mhi_ctx) {
+ IPAERR("ipa3_uc_mhi_ctx is not initialized\n");
+ return;
+ }
+ ipa3_uc_register_handlers(IPA_HW_FEATURE_MHI, &null_hdlrs);
+ kfree(ipa3_uc_mhi_ctx);
+ ipa3_uc_mhi_ctx = NULL;
+
+ IPADBG("Done\n");
+}
+
+int ipa3_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
+ u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx,
+ u32 first_evt_idx)
+{
+ int res;
+ struct ipa_mem_buffer mem;
+ struct IpaHwMhiInitCmdData_t *init_cmd_data;
+ struct IpaHwMhiMsiCmdData_t *msi_cmd;
+
+ if (!ipa3_uc_mhi_ctx) {
+ IPAERR("Not initialized\n");
+ return -EFAULT;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ res = ipa3_uc_update_hw_flags(0);
+ if (res) {
+ IPAERR("ipa3_uc_update_hw_flags failed %d\n", res);
+ goto disable_clks;
+ }
+
+ mem.size = sizeof(*init_cmd_data);
+ mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+ res = -ENOMEM;
+ goto disable_clks;
+ }
+ memset(mem.base, 0, mem.size);
+ init_cmd_data = (struct IpaHwMhiInitCmdData_t *)mem.base;
+ init_cmd_data->msiAddress = msi->addr_low;
+ init_cmd_data->mmioBaseAddress = mmio_addr;
+ init_cmd_data->deviceMhiCtrlBaseAddress = host_ctrl_addr;
+ init_cmd_data->deviceMhiDataBaseAddress = host_data_addr;
+ init_cmd_data->firstChannelIndex = first_ch_idx;
+ init_cmd_data->firstEventRingIndex = first_evt_idx;
+ res = ipa3_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MHI_INIT, 0,
+ false, HZ);
+ if (res) {
+ IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+ dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
+ mem.phys_base);
+ goto disable_clks;
+ }
+
+ dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+ mem.size = sizeof(*msi_cmd);
+ mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+ res = -ENOMEM;
+ goto disable_clks;
+ }
+
+ msi_cmd = (struct IpaHwMhiMsiCmdData_t *)mem.base;
+ msi_cmd->msiAddress_hi = msi->addr_hi;
+ msi_cmd->msiAddress_low = msi->addr_low;
+ msi_cmd->msiData = msi->data;
+ msi_cmd->msiMask = msi->mask;
+ res = ipa3_uc_send_cmd((u32)mem.phys_base,
+ IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI, 0, false, HZ);
+ if (res) {
+ IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+ dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
+ mem.phys_base);
+ goto disable_clks;
+ }
+
+ dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+ res = 0;
+
+disable_clks:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return res;
+
+}
+
+int ipa3_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle,
+ int contexArrayIndex, int channelDirection)
+
+{
+ int res;
+ union IpaHwMhiInitChannelCmdData_t init_cmd;
+ union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+
+ if (!ipa3_uc_mhi_ctx) {
+ IPAERR("Not initialized\n");
+ return -EFAULT;
+ }
+
+ if (ipa_ep_idx < 0 || ipa_ep_idx >= ipa3_ctx->ipa_num_pipes) {
+ IPAERR("Invalid ipa_ep_idx.\n");
+ return -EINVAL;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ memset(&uc_rsp, 0, sizeof(uc_rsp));
+ uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+ uc_rsp.params.channelHandle = channelHandle;
+ ipa3_uc_mhi_ctx->expected_responseOp =
+ IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+ ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+ memset(&init_cmd, 0, sizeof(init_cmd));
+ init_cmd.params.channelHandle = channelHandle;
+ init_cmd.params.contexArrayIndex = contexArrayIndex;
+ init_cmd.params.bamPipeId = ipa_ep_idx;
+ init_cmd.params.channelDirection = channelDirection;
+
+ res = ipa3_uc_send_cmd(init_cmd.raw32b,
+ IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL, 0, false, HZ);
+ if (res) {
+ IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+ goto disable_clks;
+ }
+
+ res = 0;
+
+disable_clks:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return res;
+}
+
+
+int ipa3_uc_mhi_reset_channel(int channelHandle)
+{
+ union IpaHwMhiChangeChannelStateCmdData_t cmd;
+ union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+ int res;
+
+ if (!ipa3_uc_mhi_ctx) {
+ IPAERR("Not initialized\n");
+ return -EFAULT;
+ }
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ memset(&uc_rsp, 0, sizeof(uc_rsp));
+ uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
+ uc_rsp.params.channelHandle = channelHandle;
+ ipa3_uc_mhi_ctx->expected_responseOp =
+ IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+ ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
+ cmd.params.channelHandle = channelHandle;
+ res = ipa3_uc_send_cmd(cmd.raw32b,
+ IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ);
+ if (res) {
+ IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+ goto disable_clks;
+ }
+
+ res = 0;
+
+disable_clks:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return res;
+}
+
+int ipa3_uc_mhi_suspend_channel(int channelHandle)
+{
+ union IpaHwMhiChangeChannelStateCmdData_t cmd;
+ union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+ int res;
+
+ if (!ipa3_uc_mhi_ctx) {
+ IPAERR("Not initialized\n");
+ return -EFAULT;
+ }
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ memset(&uc_rsp, 0, sizeof(uc_rsp));
+ uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
+ uc_rsp.params.channelHandle = channelHandle;
+ ipa3_uc_mhi_ctx->expected_responseOp =
+ IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+ ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
+ cmd.params.channelHandle = channelHandle;
+ res = ipa3_uc_send_cmd(cmd.raw32b,
+ IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ);
+ if (res) {
+ IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+ goto disable_clks;
+ }
+
+ res = 0;
+
+disable_clks:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return res;
+}
+
+int ipa3_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected)
+{
+ union IpaHwMhiChangeChannelStateCmdData_t cmd;
+ union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+ int res;
+
+ if (!ipa3_uc_mhi_ctx) {
+ IPAERR("Not initialized\n");
+ return -EFAULT;
+ }
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ memset(&uc_rsp, 0, sizeof(uc_rsp));
+ uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+ uc_rsp.params.channelHandle = channelHandle;
+ ipa3_uc_mhi_ctx->expected_responseOp =
+ IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+ ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_RUN;
+ cmd.params.channelHandle = channelHandle;
+ cmd.params.LPTransitionRejected = LPTransitionRejected;
+ res = ipa3_uc_send_cmd(cmd.raw32b,
+ IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ);
+ if (res) {
+ IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+ goto disable_clks;
+ }
+
+ res = 0;
+
+disable_clks:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return res;
+}
+
+int ipa3_uc_mhi_stop_event_update_channel(int channelHandle)
+{
+ union IpaHwMhiStopEventUpdateData_t cmd;
+ int res;
+
+ if (!ipa3_uc_mhi_ctx) {
+ IPAERR("Not initialized\n");
+ return -EFAULT;
+ }
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.params.channelHandle = channelHandle;
+
+ ipa3_uc_mhi_ctx->expected_responseOp =
+ IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE;
+ ipa3_uc_mhi_ctx->expected_responseParams = cmd.raw32b;
+
+ res = ipa3_uc_send_cmd(cmd.raw32b,
+ IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE, 0, false, HZ);
+ if (res) {
+ IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+ goto disable_clks;
+ }
+
+ res = 0;
+disable_clks:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return res;
+}
+
+int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd)
+{
+ int res;
+
+ if (!ipa3_uc_mhi_ctx) {
+ IPAERR("Not initialized\n");
+ return -EFAULT;
+ }
+
+ IPADBG("isDlUlSyncEnabled=0x%x UlAccmVal=0x%x\n",
+ cmd->params.isDlUlSyncEnabled, cmd->params.UlAccmVal);
+ IPADBG("ulMsiEventThreshold=0x%x dlMsiEventThreshold=0x%x\n",
+ cmd->params.ulMsiEventThreshold,
+ cmd->params.dlMsiEventThreshold);
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ res = ipa3_uc_send_cmd(cmd->raw32b,
+ IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO, 0, false, HZ);
+ if (res) {
+ IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+ goto disable_clks;
+ }
+
+ res = 0;
+disable_clks:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return res;
+}
+
+int ipa3_uc_mhi_print_stats(char *dbg_buff, int size)
+{
+ int nBytes = 0;
+ int i;
+
+ if (!ipa3_uc_mhi_ctx->mhi_uc_stats_mmio) {
+ IPAERR("MHI uc stats is not valid\n");
+ return 0;
+ }
+
+ nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes,
+ "Common Stats:\n");
+ PRINT_COMMON_STATS(numULDLSync);
+ PRINT_COMMON_STATS(numULTimerExpired);
+ PRINT_COMMON_STATS(numChEvCtxWpRead);
+
+ for (i = 0; i < IPA_HW_MAX_NUMBER_OF_CHANNELS; i++) {
+ nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes,
+ "Channel %d Stats:\n", i);
+ PRINT_CHANNEL_STATS(i, doorbellInt);
+ PRINT_CHANNEL_STATS(i, reProccesed);
+ PRINT_CHANNEL_STATS(i, bamFifoFull);
+ PRINT_CHANNEL_STATS(i, bamFifoEmpty);
+ PRINT_CHANNEL_STATS(i, bamFifoUsageHigh);
+ PRINT_CHANNEL_STATS(i, bamFifoUsageLow);
+ PRINT_CHANNEL_STATS(i, bamInt);
+ PRINT_CHANNEL_STATS(i, ringFull);
+ PRINT_CHANNEL_STATS(i, ringEmpty);
+ PRINT_CHANNEL_STATS(i, ringUsageHigh);
+ PRINT_CHANNEL_STATS(i, ringUsageLow);
+ PRINT_CHANNEL_STATS(i, delayedMsi);
+ PRINT_CHANNEL_STATS(i, immediateMsi);
+ PRINT_CHANNEL_STATS(i, thresholdMsi);
+ PRINT_CHANNEL_STATS(i, numSuspend);
+ PRINT_CHANNEL_STATS(i, numResume);
+ PRINT_CHANNEL_STATS(i, num_OOB);
+ PRINT_CHANNEL_STATS(i, num_OOB_timer_expiry);
+ PRINT_CHANNEL_STATS(i, num_OOB_moderation_timer_start);
+ PRINT_CHANNEL_STATS(i, num_db_mode_evt);
+ }
+
+ return nBytes;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
new file mode 100644
index 0000000..7b89184
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
@@ -0,0 +1,410 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "ipa_i.h"
+
+#define IPA_UC_NTN_DB_PA_TX 0x79620DC
+#define IPA_UC_NTN_DB_PA_RX 0x79620D8
+
+static void ipa3_uc_ntn_event_handler(struct IpaHwSharedMemCommonMapping_t
+ *uc_sram_mmio)
+
+{
+ union Ipa3HwNTNErrorEventData_t ntn_evt;
+
+ if (uc_sram_mmio->eventOp ==
+ IPA_HW_2_CPU_EVENT_NTN_ERROR) {
+ ntn_evt.raw32b = uc_sram_mmio->eventParams;
+ IPADBG("uC NTN evt errType=%u pipe=%d cherrType=%u\n",
+ ntn_evt.params.ntn_error_type,
+ ntn_evt.params.ipa_pipe_number,
+ ntn_evt.params.ntn_ch_err_type);
+ }
+}
+
+static void ipa3_uc_ntn_event_log_info_handler(
+struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+
+{
+ if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_NTN)) == 0) {
+ IPAERR("NTN feature missing 0x%x\n",
+ uc_event_top_mmio->featureMask);
+ return;
+ }
+
+ if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_NTN].
+ params.size != sizeof(struct Ipa3HwStatsNTNInfoData_t)) {
+ IPAERR("NTN stats sz invalid exp=%zu is=%u\n",
+ sizeof(struct Ipa3HwStatsNTNInfoData_t),
+ uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_NTN].params.size);
+ return;
+ }
+
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst = uc_event_top_mmio->
+ statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_NTN].params.offset;
+ IPAERR("NTN stats ofst=0x%x\n", ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+ if (ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst +
+ sizeof(struct Ipa3HwStatsNTNInfoData_t) >=
+ ipa3_ctx->ctrl->ipa_reg_base_ofst +
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
+ ipa3_ctx->smem_sz) {
+ IPAERR("uc_ntn_stats 0x%x outside SRAM\n",
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+ return;
+ }
+
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio =
+ ioremap(ipa3_ctx->ipa_wrapper_base +
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst,
+ sizeof(struct Ipa3HwStatsNTNInfoData_t));
+ if (!ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+ IPAERR("fail to ioremap uc ntn stats\n");
+ return;
+ }
+}
+
+/**
+ * ipa2_get_wdi_stats() - Query WDI statistics from uc
+ * @stats: [inout] stats blob from client populated by driver
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats)
+{
+#define TX_STATS(y) stats->tx_ch_stats[0].y = \
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) stats->rx_ch_stats[0].y = \
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+ if (unlikely(!ipa3_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (!stats || !ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+ IPAERR("bad parms stats=%p ntn_stats=%p\n",
+ stats,
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio);
+ return -EINVAL;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ TX_STATS(num_pkts_processed);
+ TX_STATS(tail_ptr_val);
+ TX_STATS(num_db_fired);
+ TX_STATS(tx_comp_ring_stats.ringFull);
+ TX_STATS(tx_comp_ring_stats.ringEmpty);
+ TX_STATS(tx_comp_ring_stats.ringUsageHigh);
+ TX_STATS(tx_comp_ring_stats.ringUsageLow);
+ TX_STATS(tx_comp_ring_stats.RingUtilCount);
+ TX_STATS(bam_stats.bamFifoFull);
+ TX_STATS(bam_stats.bamFifoEmpty);
+ TX_STATS(bam_stats.bamFifoUsageHigh);
+ TX_STATS(bam_stats.bamFifoUsageLow);
+ TX_STATS(bam_stats.bamUtilCount);
+ TX_STATS(num_db);
+ TX_STATS(num_unexpected_db);
+ TX_STATS(num_bam_int_handled);
+ TX_STATS(num_bam_int_in_non_running_state);
+ TX_STATS(num_qmb_int_handled);
+ TX_STATS(num_bam_int_handled_while_wait_for_bam);
+ TX_STATS(num_bam_int_handled_while_not_in_bam);
+
+ RX_STATS(max_outstanding_pkts);
+ RX_STATS(num_pkts_processed);
+ RX_STATS(rx_ring_rp_value);
+ RX_STATS(rx_ind_ring_stats.ringFull);
+ RX_STATS(rx_ind_ring_stats.ringEmpty);
+ RX_STATS(rx_ind_ring_stats.ringUsageHigh);
+ RX_STATS(rx_ind_ring_stats.ringUsageLow);
+ RX_STATS(rx_ind_ring_stats.RingUtilCount);
+ RX_STATS(bam_stats.bamFifoFull);
+ RX_STATS(bam_stats.bamFifoEmpty);
+ RX_STATS(bam_stats.bamFifoUsageHigh);
+ RX_STATS(bam_stats.bamFifoUsageLow);
+ RX_STATS(bam_stats.bamUtilCount);
+ RX_STATS(num_bam_int_handled);
+ RX_STATS(num_db);
+ RX_STATS(num_unexpected_db);
+ RX_STATS(num_pkts_in_dis_uninit_state);
+ RX_STATS(num_bam_int_handled_while_not_in_bam);
+ RX_STATS(num_bam_int_handled_while_in_bam_state);
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+int ipa3_ntn_init(void)
+{
+ struct ipa3_uc_hdlrs uc_ntn_cbs = { 0 };
+
+ uc_ntn_cbs.ipa_uc_event_hdlr = ipa3_uc_ntn_event_handler;
+ uc_ntn_cbs.ipa_uc_event_log_info_hdlr =
+ ipa3_uc_ntn_event_log_info_handler;
+
+ ipa3_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs);
+
+ return 0;
+}
+
+static int ipa3_uc_send_ntn_setup_pipe_cmd(
+ struct ipa_ntn_setup_info *ntn_info, u8 dir)
+{
+ int ipa_ep_idx;
+ int result = 0;
+ struct ipa_mem_buffer cmd;
+ struct Ipa3HwNtnSetUpCmdData_t *Ntn_params;
+ struct IpaHwOffloadSetUpCmdData_t *cmd_data;
+
+ if (ntn_info == NULL) {
+ IPAERR("invalid input\n");
+ return -EINVAL;
+ }
+
+ ipa_ep_idx = ipa_get_ep_mapping(ntn_info->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("fail to get ep idx.\n");
+ return -EFAULT;
+ }
+
+ IPADBG("client=%d ep=%d\n", ntn_info->client, ipa_ep_idx);
+
+ IPADBG("ring_base_pa = 0x%pa\n",
+ &ntn_info->ring_base_pa);
+ IPADBG("ntn_ring_size = %d\n", ntn_info->ntn_ring_size);
+ IPADBG("buff_pool_base_pa = 0x%pa\n", &ntn_info->buff_pool_base_pa);
+ IPADBG("num_buffers = %d\n", ntn_info->num_buffers);
+ IPADBG("data_buff_size = %d\n", ntn_info->data_buff_size);
+ IPADBG("tail_ptr_base_pa = 0x%pa\n", &ntn_info->ntn_reg_base_ptr_pa);
+
+ cmd.size = sizeof(*cmd_data);
+ cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+ &cmd.phys_base, GFP_KERNEL);
+ if (cmd.base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ return -ENOMEM;
+ }
+
+ cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base;
+ cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+ Ntn_params = &cmd_data->SetupCh_params.NtnSetupCh_params;
+ Ntn_params->ring_base_pa = ntn_info->ring_base_pa;
+ Ntn_params->buff_pool_base_pa = ntn_info->buff_pool_base_pa;
+ Ntn_params->ntn_ring_size = ntn_info->ntn_ring_size;
+ Ntn_params->num_buffers = ntn_info->num_buffers;
+ Ntn_params->ntn_reg_base_ptr_pa = ntn_info->ntn_reg_base_ptr_pa;
+ Ntn_params->data_buff_size = ntn_info->data_buff_size;
+ Ntn_params->ipa_pipe_number = ipa_ep_idx;
+ Ntn_params->dir = dir;
+
+ result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result)
+ result = -EFAULT;
+
+ dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+ return result;
+}
+
+/**
+ * ipa3_setup_uc_ntn_pipes() - setup uc offload pipes
+ */
+int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp)
+{
+ struct ipa3_ep_context *ep_ul;
+ struct ipa3_ep_context *ep_dl;
+ int ipa_ep_idx_ul;
+ int ipa_ep_idx_dl;
+ int result = 0;
+
+ if (in == NULL) {
+ IPAERR("invalid input\n");
+ return -EINVAL;
+ }
+
+ ipa_ep_idx_ul = ipa_get_ep_mapping(in->ul.client);
+ ipa_ep_idx_dl = ipa_get_ep_mapping(in->dl.client);
+ if (ipa_ep_idx_ul == -1 || ipa_ep_idx_dl == -1) {
+ IPAERR("fail to alloc EP.\n");
+ return -EFAULT;
+ }
+
+ ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul];
+ ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl];
+
+ if (ep_ul->valid || ep_dl->valid) {
+ IPAERR("EP already allocated.\n");
+ return -EFAULT;
+ }
+
+ memset(ep_ul, 0, offsetof(struct ipa3_ep_context, sys));
+ memset(ep_dl, 0, offsetof(struct ipa3_ep_context, sys));
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ /* setup ul ep cfg */
+ ep_ul->valid = 1;
+ ep_ul->client = in->ul.client;
+ result = ipa3_enable_data_path(ipa_ep_idx_ul);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx_ul);
+ return -EFAULT;
+ }
+ ep_ul->client_notify = notify;
+ ep_ul->priv = priv;
+
+ memset(&ep_ul->cfg, 0, sizeof(ep_ul->cfg));
+ ep_ul->cfg.nat.nat_en = IPA_SRC_NAT;
+ ep_ul->cfg.hdr.hdr_len = hdr_len;
+ ep_ul->cfg.mode.mode = IPA_BASIC;
+
+ if (ipa3_cfg_ep(ipa_ep_idx_ul, &ep_ul->cfg)) {
+ IPAERR("fail to setup ul pipe cfg\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->ul, IPA_NTN_RX_DIR)) {
+ IPAERR("fail to send cmd to uc for ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa3_install_dflt_flt_rules(ipa_ep_idx_ul);
+ outp->ul_uc_db_pa = IPA_UC_NTN_DB_PA_RX;
+ ep_ul->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+ IPADBG("client %d (ep: %d) connected\n", in->ul.client,
+ ipa_ep_idx_ul);
+
+ /* setup dl ep cfg */
+ ep_dl->valid = 1;
+ ep_dl->client = in->dl.client;
+ result = ipa3_enable_data_path(ipa_ep_idx_dl);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx_dl);
+ result = -EFAULT;
+ goto fail;
+ }
+
+ memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg));
+ ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT;
+ ep_dl->cfg.hdr.hdr_len = hdr_len;
+ ep_dl->cfg.mode.mode = IPA_BASIC;
+
+ if (ipa3_cfg_ep(ipa_ep_idx_dl, &ep_dl->cfg)) {
+ IPAERR("fail to setup dl pipe cfg\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->dl, IPA_NTN_TX_DIR)) {
+ IPAERR("fail to send cmd to uc for dl pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX;
+ ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+ IPADBG("client %d (ep: %d) connected\n", in->dl.client,
+ ipa_ep_idx_dl);
+
+fail:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return result;
+}
+
+/**
+ * ipa3_tear_down_uc_offload_pipes() - tear down uc offload pipes
+ */
+
+int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
+ int ipa_ep_idx_dl)
+{
+ struct ipa_mem_buffer cmd;
+ struct ipa3_ep_context *ep_ul, *ep_dl;
+ struct IpaHwOffloadCommonChCmdData_t *cmd_data;
+ union Ipa3HwNtnCommonChCmdData_t *tear;
+ int result = 0;
+
+ IPADBG("ep_ul = %d\n", ipa_ep_idx_ul);
+ IPADBG("ep_dl = %d\n", ipa_ep_idx_dl);
+
+ ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul];
+ ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl];
+
+ if (ep_ul->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED ||
+ ep_dl->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED) {
+ IPAERR("channel bad state: ul %d dl %d\n",
+ ep_ul->uc_offload_state, ep_dl->uc_offload_state);
+ return -EFAULT;
+ }
+
+ cmd.size = sizeof(*cmd_data);
+ cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+ &cmd.phys_base, GFP_KERNEL);
+ if (cmd.base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ return -ENOMEM;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ /* teardown the UL pipe */
+ cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base;
+ cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+ tear = &cmd_data->CommonCh_params.NtnCommonCh_params;
+ tear->params.ipa_pipe_number = ipa_ep_idx_ul;
+ result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result) {
+ IPAERR("fail to tear down ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa3_disable_data_path(ipa_ep_idx_ul);
+ ipa3_delete_dflt_flt_rules(ipa_ep_idx_ul);
+ memset(&ipa3_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa3_ep_context));
+ IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
+
+ /* teardown the DL pipe */
+ tear->params.ipa_pipe_number = ipa_ep_idx_dl;
+ result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result) {
+ IPAERR("fail to tear down ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa3_disable_data_path(ipa_ep_idx_dl);
+ memset(&ipa3_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa3_ep_context));
+ IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
+
+fail:
+ dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
new file mode 100644
index 0000000..946fc7e
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
@@ -0,0 +1,580 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_UC_OFFLOAD_I_H_
+#define _IPA_UC_OFFLOAD_I_H_
+
+#include <linux/ipa.h>
+#include "ipa_i.h"
+
+/*
+ * Neutrino protocol related data structures
+ */
+
+#define IPA_UC_MAX_NTN_TX_CHANNELS 1
+#define IPA_UC_MAX_NTN_RX_CHANNELS 1
+
+#define IPA_NTN_TX_DIR 1
+#define IPA_NTN_RX_DIR 2
+
+/**
+ * @brief Enum value determined based on the feature it
+ * corresponds to
+ * +----------------+----------------+
+ * | 3 bits | 5 bits |
+ * +----------------+----------------+
+ * | HW_FEATURE | OPCODE |
+ * +----------------+----------------+
+ *
+ */
+#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode)
+#define EXTRACT_UC_FEATURE(value) (value >> 5)
+
+#define IPA_HW_NUM_FEATURES 0x8
+
+/**
+ * enum ipa3_hw_features - Values that represent the features supported
+ * in IPA HW
+ * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW
+ * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW
+ * @IPA_HW_FEATURE_POWER_COLLAPSE: Feature related to IPA Power collapse
+ * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW
+ * @IPA_HW_FEATURE_ZIP: Feature related to CMP/DCMP operation in IPA HW
+ * @IPA_HW_FEATURE_NTN : Feature related to NTN operation in IPA HW
+ * @IPA_HW_FEATURE_OFFLOAD : Feature related to NTN operation in IPA HW
+*/
+enum ipa3_hw_features {
+ IPA_HW_FEATURE_COMMON = 0x0,
+ IPA_HW_FEATURE_MHI = 0x1,
+ IPA_HW_FEATURE_POWER_COLLAPSE = 0x2,
+ IPA_HW_FEATURE_WDI = 0x3,
+ IPA_HW_FEATURE_ZIP = 0x4,
+ IPA_HW_FEATURE_NTN = 0x5,
+ IPA_HW_FEATURE_OFFLOAD = 0x6,
+ IPA_HW_FEATURE_MAX = IPA_HW_NUM_FEATURES
+};
+
+/**
+ * enum ipa3_hw_2_cpu_events - Values that represent HW event to be sent to CPU.
+ * @IPA_HW_2_CPU_EVENT_NO_OP : No event present
+ * @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the
+ * device
+ * @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information
+ */
+enum ipa3_hw_2_cpu_events {
+ IPA_HW_2_CPU_EVENT_NO_OP =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+ IPA_HW_2_CPU_EVENT_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+ IPA_HW_2_CPU_EVENT_LOG_INFO =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+};
+
+/**
+ * enum ipa3_hw_errors - Common error types.
+ * @IPA_HW_ERROR_NONE : No error persists
+ * @IPA_HW_INVALID_DOORBELL_ERROR : Invalid data read from doorbell
+ * @IPA_HW_DMA_ERROR : Unexpected DMA error
+ * @IPA_HW_FATAL_SYSTEM_ERROR : HW has crashed and requires reset.
+ * @IPA_HW_INVALID_OPCODE : Invalid opcode sent
+ * @IPA_HW_INVALID_PARAMS : Invalid params for the requested command
+ * @IPA_HW_GSI_CH_NOT_EMPTY_FAILURE : GSI channel emptiness validation failed
+ */
+enum ipa3_hw_errors {
+ IPA_HW_ERROR_NONE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+ IPA_HW_INVALID_DOORBELL_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+ IPA_HW_DMA_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+ IPA_HW_FATAL_SYSTEM_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
+ IPA_HW_INVALID_OPCODE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
+ IPA_HW_INVALID_PARAMS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
+ IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
+ IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7),
+ IPA_HW_GSI_CH_NOT_EMPTY_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8)
+};
+
+/**
+ * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common
+ * section in 128B shared memory located in offset zero of SW Partition in IPA
+ * SRAM.
+ * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS
+ * @cmdParams : CPU->HW command parameter lower 32bit.
+ * @cmdParams_hi : CPU->HW command parameter higher 32bit.
+ * of parameters (immediate parameters) and point on structure in system memory
+ * (in such case the address must be accessible for HW)
+ * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES
+ * @responseParams : HW->CPU response parameter. The parameter filed can hold 32
+ * bits of parameters (immediate parameters) and point on structure in system
+ * memory
+ * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS
+ * @eventParams : HW->CPU event parameter. The parameter filed can hold 32
+ * bits of parameters (immediate parameters) and point on
+ * structure in system memory
+ * @firstErrorAddress : Contains the address of first error-source on SNOC
+ * @hwState : State of HW. The state carries information regarding the
+ * error type.
+ * @warningCounter : The warnings counter. The counter carries information
+ * regarding non fatal errors in HW
+ * @interfaceVersionCommon : The Common interface version as reported by HW
+ *
+ * The shared memory is used for communication between IPA HW and CPU.
+ */
+struct IpaHwSharedMemCommonMapping_t {
+ u8 cmdOp;
+ u8 reserved_01;
+ u16 reserved_03_02;
+ u32 cmdParams;
+ u32 cmdParams_hi;
+ u8 responseOp;
+ u8 reserved_0D;
+ u16 reserved_0F_0E;
+ u32 responseParams;
+ u8 eventOp;
+ u8 reserved_15;
+ u16 reserved_17_16;
+ u32 eventParams;
+ u32 firstErrorAddress;
+ u8 hwState;
+ u8 warningCounter;
+ u16 reserved_23_22;
+ u16 interfaceVersionCommon;
+ u16 reserved_27_26;
+} __packed;
+
+/**
+ * union Ipa3HwFeatureInfoData_t - parameters for stats/config blob
+ *
+ * @offset : Location of a feature within the EventInfoData
+ * @size : Size of the feature
+ */
+union Ipa3HwFeatureInfoData_t {
+ struct IpaHwFeatureInfoParams_t {
+ u32 offset:16;
+ u32 size:16;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwErrorEventData_t - HW->CPU Common Events
+ * @errorType : Entered when a system error is detected by the HW. Type of
+ * error is specified by IPA_HW_ERRORS
+ * @reserved : Reserved
+ */
+union IpaHwErrorEventData_t {
+ struct IpaHwErrorEventParams_t {
+ u32 errorType:8;
+ u32 reserved:24;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * struct Ipa3HwEventInfoData_t - Structure holding the parameters for
+ * statistics and config info
+ *
+ * @baseAddrOffset : Base Address Offset of the statistics or config
+ * structure from IPA_WRAPPER_BASE
+ * @Ipa3HwFeatureInfoData_t : Location and size of each feature within
+ * the statistics or config structure
+ *
+ * @note Information about each feature in the featureInfo[]
+ * array is populated at predefined indices per the IPA_HW_FEATURES
+ * enum definition
+ */
+struct Ipa3HwEventInfoData_t {
+ u32 baseAddrOffset;
+ union Ipa3HwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES];
+} __packed;
+
+/**
+ * struct IpaHwEventLogInfoData_t - Structure holding the parameters for
+ * IPA_HW_2_CPU_EVENT_LOG_INFO Event
+ *
+ * @featureMask : Mask indicating the features enabled in HW.
+ * Refer IPA_HW_FEATURE_MASK
+ * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event
+ * Log Buffer structure
+ * @statsInfo : Statistics related information
+ * @configInfo : Configuration related information
+ *
+ * @note The offset location of this structure from IPA_WRAPPER_BASE
+ * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO
+ * Event
+ */
+struct IpaHwEventLogInfoData_t {
+ u32 featureMask;
+ u32 circBuffBaseAddrOffset;
+ struct Ipa3HwEventInfoData_t statsInfo;
+ struct Ipa3HwEventInfoData_t configInfo;
+
+} __packed;
+
+/**
+ * struct ipa3_uc_ntn_ctx
+ * @ntn_uc_stats_ofst: Neutrino stats offset
+ * @ntn_uc_stats_mmio: Neutrino stats
+ * @priv: private data of client
+ * @uc_ready_cb: uc Ready cb
+ */
+struct ipa3_uc_ntn_ctx {
+ u32 ntn_uc_stats_ofst;
+ struct Ipa3HwStatsNTNInfoData_t *ntn_uc_stats_mmio;
+ void *priv;
+ ipa_uc_ready_cb uc_ready_cb;
+};
+
+/**
+ * enum ipa3_hw_2_cpu_ntn_events - Values that represent HW event
+ * to be sent to CPU
+ * @IPA_HW_2_CPU_EVENT_NTN_ERROR : Event to specify that HW
+ * detected an error in NTN
+ *
+ */
+enum ipa3_hw_2_cpu_ntn_events {
+ IPA_HW_2_CPU_EVENT_NTN_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_NTN, 0),
+};
+
+
+/**
+ * enum ipa3_hw_ntn_errors - NTN specific error types.
+ * @IPA_HW_NTN_ERROR_NONE : No error persists
+ * @IPA_HW_NTN_CHANNEL_ERROR : Error is specific to channel
+ */
+enum ipa3_hw_ntn_errors {
+ IPA_HW_NTN_ERROR_NONE = 0,
+ IPA_HW_NTN_CHANNEL_ERROR = 1
+};
+
+/**
+ * enum ipa3_hw_ntn_channel_states - Values that represent NTN
+ * channel state machine.
+ * @IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED : Channel is
+ * initialized but disabled
+ * @IPA_HW_NTN_CHANNEL_STATE_RUNNING : Channel is running.
+ * Entered after SET_UP_COMMAND is processed successfully
+ * @IPA_HW_NTN_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_NTN_CHANNEL_STATE_INVALID : Invalid state. Shall not
+ * be in use in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not reflect the
+ * sub-state the state machine may be in.
+ */
+enum ipa3_hw_ntn_channel_states {
+ IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED = 1,
+ IPA_HW_NTN_CHANNEL_STATE_RUNNING = 2,
+ IPA_HW_NTN_CHANNEL_STATE_ERROR = 3,
+ IPA_HW_NTN_CHANNEL_STATE_INVALID = 0xFF
+};
+
+/**
+ * enum ipa3_hw_ntn_channel_errors - List of NTN Channel error
+ * types. This is present in the event param
+ * @IPA_HW_NTN_CH_ERR_NONE: No error persists
+ * @IPA_HW_NTN_TX_FSM_ERROR: Error in the state machine
+ * transition
+ * @IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL: Error while calculating
+ * num RE to bring
+ * @IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL: Write pointer update
+ * failed in Rx ring
+ * @IPA_HW_NTN_RX_FSM_ERROR: Error in the state machine
+ * transition
+ * @IPA_HW_NTN_RX_CACHE_NON_EMPTY:
+ * @IPA_HW_NTN_CH_ERR_RESERVED:
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in.
+ */
+enum ipa3_hw_ntn_channel_errors {
+ IPA_HW_NTN_CH_ERR_NONE = 0,
+ IPA_HW_NTN_TX_RING_WP_UPDATE_FAIL = 1,
+ IPA_HW_NTN_TX_FSM_ERROR = 2,
+ IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL = 3,
+ IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL = 4,
+ IPA_HW_NTN_RX_FSM_ERROR = 5,
+ IPA_HW_NTN_RX_CACHE_NON_EMPTY = 6,
+ IPA_HW_NTN_CH_ERR_RESERVED = 0xFF
+};
+
+
+/**
+ * struct Ipa3HwNtnSetUpCmdData_t - Ntn setup command data
+ * @ring_base_pa: physical address of the base of the Tx/Rx NTN
+ * ring
+ * @buff_pool_base_pa: physical address of the base of the Tx/Rx
+ * buffer pool
+ * @ntn_ring_size: size of the Tx/Rx NTN ring
+ * @num_buffers: Rx/tx buffer pool size
+ * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN
+ * Ring's tail pointer
+ * @ipa_pipe_number: IPA pipe number that has to be used for the
+ * Tx/Rx path
+ * @dir: Tx/Rx Direction
+ * @data_buff_size: size of the each data buffer allocated in
+ * DDR
+ */
+struct Ipa3HwNtnSetUpCmdData_t {
+ u32 ring_base_pa;
+ u32 buff_pool_base_pa;
+ u16 ntn_ring_size;
+ u16 num_buffers;
+ u32 ntn_reg_base_ptr_pa;
+ u8 ipa_pipe_number;
+ u8 dir;
+ u16 data_buff_size;
+
+} __packed;
+
+/**
+ * struct Ipa3HwNtnCommonChCmdData_t - Structure holding the
+ * parameters for Ntn Tear down command data params
+ *
+ *@ipa_pipe_number: IPA pipe number. This could be Tx or an Rx pipe
+ */
+union Ipa3HwNtnCommonChCmdData_t {
+ struct IpaHwNtnCommonChCmdParams_t {
+ u32 ipa_pipe_number :8;
+ u32 reserved :24;
+ } __packed params;
+ uint32_t raw32b;
+} __packed;
+
+
+/**
+ * struct Ipa3HwNTNErrorEventData_t - Structure holding the
+ * IPA_HW_2_CPU_EVENT_NTN_ERROR event. The parameters are passed
+ * as immediate params in the shared memory
+ *
+ *@ntn_error_type: type of NTN error (ipa3_hw_ntn_errors)
+ *@ipa_pipe_number: IPA pipe number on which error has happened
+ * Applicable only if error type indicates channel error
+ *@ntn_ch_err_type: Information about the channel error (if
+ * available)
+ */
+union Ipa3HwNTNErrorEventData_t {
+ struct IpaHwNTNErrorEventParams_t {
+ u32 ntn_error_type :8;
+ u32 reserved :8;
+ u32 ipa_pipe_number :8;
+ u32 ntn_ch_err_type :8;
+ } __packed params;
+ uint32_t raw32b;
+} __packed;
+
+/**
+ * struct NTN3RxInfoData_t - NTN Structure holding the Rx pipe
+ * information
+ *
+ *@max_outstanding_pkts: Number of outstanding packets in Rx
+ * Ring
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@rx_ring_rp_value: Read pointer last advertized to the WLAN FW
+ *
+ *@ntn_ch_err_type: Information about the channel error (if
+ * available)
+ *@rx_ind_ring_stats:
+ *@bam_stats:
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_pkts_in_dis_uninit_state:
+ *@num_bam_int_handled_while_not_in_bam: Number of Bam
+ * Interrupts handled by FW
+ *@num_bam_int_handled_while_in_bam_state: Number of Bam
+ * Interrupts handled by FW
+ */
+struct NTN3RxInfoData_t {
+ u32 max_outstanding_pkts;
+ u32 num_pkts_processed;
+ u32 rx_ring_rp_value;
+ struct IpaHwRingStats_t rx_ind_ring_stats;
+ struct IpaHwBamStats_t bam_stats;
+ u32 num_bam_int_handled;
+ u32 num_db;
+ u32 num_unexpected_db;
+ u32 num_pkts_in_dis_uninit_state;
+ u32 num_bam_int_handled_while_not_in_bam;
+ u32 num_bam_int_handled_while_in_bam_state;
+} __packed;
+
+
+/**
+ * struct NTNTxInfoData_t - Structure holding the NTN Tx channel
+ * Ensure that this is always word aligned
+ *
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@tail_ptr_val: Latest value of doorbell written to copy engine
+ *@num_db_fired: Number of DB from uC FW to Copy engine
+ *
+ *@tx_comp_ring_stats:
+ *@bam_stats:
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_bam_int_in_non_running_state: Number of Bam interrupts
+ * while not in Running state
+ *@num_qmb_int_handled: Number of QMB interrupts handled
+ *@num_bam_int_handled_while_wait_for_bam: Number of times the
+ * Imm Cmd is injected due to fw_desc change
+ */
+struct NTNTxInfoData_t {
+ u32 num_pkts_processed;
+ u32 tail_ptr_val;
+ u32 num_db_fired;
+ struct IpaHwRingStats_t tx_comp_ring_stats;
+ struct IpaHwBamStats_t bam_stats;
+ u32 num_db;
+ u32 num_unexpected_db;
+ u32 num_bam_int_handled;
+ u32 num_bam_int_in_non_running_state;
+ u32 num_qmb_int_handled;
+ u32 num_bam_int_handled_while_wait_for_bam;
+ u32 num_bam_int_handled_while_not_in_bam;
+} __packed;
+
+
+/**
+ * struct Ipa3HwStatsNTNInfoData_t - Structure holding the NTN Tx
+ * channel Ensure that this is always word aligned
+ *
+ */
+struct Ipa3HwStatsNTNInfoData_t {
+ struct NTN3RxInfoData_t rx_ch_stats[IPA_UC_MAX_NTN_RX_CHANNELS];
+ struct NTNTxInfoData_t tx_ch_stats[IPA_UC_MAX_NTN_TX_CHANNELS];
+} __packed;
+
+
+/*
+ * uC offload related data structures
+ */
+#define IPA_UC_OFFLOAD_CONNECTED BIT(0)
+#define IPA_UC_OFFLOAD_ENABLED BIT(1)
+#define IPA_UC_OFFLOAD_RESUMED BIT(2)
+
+/**
+ * enum ipa_cpu_2_hw_offload_commands - Values that represent
+ * the offload commands from CPU
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP : Command to set up
+ * Offload protocol's Tx/Rx Path
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_RX_SET_UP : Command to tear down
+ * Offload protocol's Tx/ Rx Path
+ */
+enum ipa_cpu_2_hw_offload_commands {
+ IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+};
+
+
+/**
+ * enum ipa3_hw_offload_channel_states - Values that represent
+ * offload channel state machine.
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED : Channel is
+ * initialized but disabled
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING : Channel is running.
+ * Entered after SET_UP_COMMAND is processed successfully
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID : Invalid state. Shall not
+ * be in use in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in
+ */
+enum ipa3_hw_offload_channel_states {
+ IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED = 1,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING = 2,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR = 3,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID = 0xFF
+};
+
+
+/**
+ * enum ipa3_hw_2_cpu_cmd_resp_status - Values that represent
+ * offload related command response status to be sent to CPU.
+ */
+enum ipa3_hw_2_cpu_offload_cmd_resp_status {
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 0),
+ IPA_HW_2_CPU_OFFLOAD_MAX_TX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4),
+ IPA_HW_2_CPU_OFFLOAD_UNKNOWN_TX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5),
+ IPA_HW_2_CPU_OFFLOAD_TX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6),
+ IPA_HW_2_CPU_OFFLOAD_TX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 7),
+ IPA_HW_2_CPU_OFFLOAD_MAX_RX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 8),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 9),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 10),
+ IPA_HW_2_CPU_OFFLOAD_UNKNOWN_RX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 11),
+ IPA_HW_2_CPU_OFFLOAD_RX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 12),
+ IPA_HW_2_CPU_OFFLOAD_RX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 13),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 14),
+};
+
+/**
+ * struct IpaHwSetUpCmd -
+ *
+ *
+ */
+union IpaHwSetUpCmd {
+ struct Ipa3HwNtnSetUpCmdData_t NtnSetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwOffloadSetUpCmdData_t -
+ *
+ *
+ */
+struct IpaHwOffloadSetUpCmdData_t {
+ u8 protocol;
+ union IpaHwSetUpCmd SetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwCommonChCmd - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN
+ *
+ *
+ */
+union IpaHwCommonChCmd {
+ union Ipa3HwNtnCommonChCmdData_t NtnCommonCh_params;
+} __packed;
+
+struct IpaHwOffloadCommonChCmdData_t {
+ u8 protocol;
+ union IpaHwCommonChCmd CommonCh_params;
+} __packed;
+
+#endif /* _IPA_UC_OFFLOAD_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
new file mode 100644
index 0000000..e1deb58
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -0,0 +1,1815 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "ipa_i.h"
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+
+#define IPA_HOLB_TMR_DIS 0x0
+
+#define IPA_HW_INTERFACE_WDI_VERSION 0x0001
+#define IPA_HW_WDI_RX_MBOX_START_INDEX 48
+#define IPA_HW_WDI_TX_MBOX_START_INDEX 50
+#define IPA_WDI_RING_ALIGNMENT 8
+
+#define IPA_WDI_CONNECTED BIT(0)
+#define IPA_WDI_ENABLED BIT(1)
+#define IPA_WDI_RESUMED BIT(2)
+#define IPA_UC_POLL_SLEEP_USEC 100
+
+#define IPA_WDI_RX_RING_RES 0
+#define IPA_WDI_RX_RING_RP_RES 1
+#define IPA_WDI_RX_COMP_RING_RES 2
+#define IPA_WDI_RX_COMP_RING_WP_RES 3
+#define IPA_WDI_TX_RING_RES 4
+#define IPA_WDI_CE_RING_RES 5
+#define IPA_WDI_CE_DB_RES 6
+#define IPA_WDI_MAX_RES 7
+
+struct ipa_wdi_res {
+ struct ipa_wdi_buffer_info *res;
+ unsigned int nents;
+ bool valid;
+};
+
+static struct ipa_wdi_res wdi_res[IPA_WDI_MAX_RES];
+
+static void ipa3_uc_wdi_loaded_handler(void);
+
+/**
+ * enum ipa_hw_2_cpu_wdi_events - Values that represent HW event to be sent to
+ * CPU.
+ * @IPA_HW_2_CPU_EVENT_WDI_ERROR : Event to specify that HW detected an error
+ * in WDI
+ */
+enum ipa_hw_2_cpu_wdi_events {
+ IPA_HW_2_CPU_EVENT_WDI_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+};
+
+/**
+ * enum ipa_hw_wdi_channel_states - Values that represent WDI channel state
+ * machine.
+ * @IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED : Channel is initialized but
+ * disabled
+ * @IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND : Channel is enabled but in
+ * suspended state
+ * @IPA_HW_WDI_CHANNEL_STATE_RUNNING : Channel is running. Entered after
+ * SET_UP_COMMAND is processed successfully
+ * @IPA_HW_WDI_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_WDI_CHANNEL_STATE_INVALID : Invalid state. Shall not be in use in
+ * operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not reflect the
+ * sub-state the state machine may be in.
+ */
+enum ipa_hw_wdi_channel_states {
+ IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED = 1,
+ IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND = 2,
+ IPA_HW_WDI_CHANNEL_STATE_RUNNING = 3,
+ IPA_HW_WDI_CHANNEL_STATE_ERROR = 4,
+ IPA_HW_WDI_CHANNEL_STATE_INVALID = 0xFF
+};
+
+/**
+ * enum ipa3_cpu_2_hw_commands - Values that represent the WDI commands from
+ * CPU
+ * @IPA_CPU_2_HW_CMD_WDI_TX_SET_UP : Command to set up WDI Tx Path
+ * @IPA_CPU_2_HW_CMD_WDI_RX_SET_UP : Command to set up WDI Rx Path
+ * @IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG : Provide extended config info for Rx path
+ * @IPA_CPU_2_HW_CMD_WDI_CH_ENABLE : Command to enable a channel
+ * @IPA_CPU_2_HW_CMD_WDI_CH_DISABLE : Command to disable a channel
+ * @IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND : Command to suspend a channel
+ * @IPA_CPU_2_HW_CMD_WDI_CH_RESUME : Command to resume a channel
+ * @IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN : Command to tear down WDI Tx/ Rx Path
+ */
+enum ipa_cpu_2_hw_wdi_commands {
+ IPA_CPU_2_HW_CMD_WDI_TX_SET_UP =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+ IPA_CPU_2_HW_CMD_WDI_RX_SET_UP =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
+ IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
+ IPA_CPU_2_HW_CMD_WDI_CH_ENABLE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
+ IPA_CPU_2_HW_CMD_WDI_CH_DISABLE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
+ IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
+ IPA_CPU_2_HW_CMD_WDI_CH_RESUME =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
+ IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
+};
+
+/**
+ * enum ipa_hw_2_cpu_cmd_resp_status - Values that represent WDI related
+ * command response status to be sent to CPU.
+ */
+enum ipa_hw_2_cpu_cmd_resp_status {
+ IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+ IPA_HW_2_CPU_MAX_WDI_TX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
+ IPA_HW_2_CPU_WDI_CE_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
+ IPA_HW_2_CPU_WDI_CE_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
+ IPA_HW_2_CPU_WDI_CE_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
+ IPA_HW_2_CPU_WDI_COMP_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
+ IPA_HW_2_CPU_WDI_COMP_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
+ IPA_HW_2_CPU_WDI_COMP_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
+ IPA_HW_2_CPU_WDI_UNKNOWN_TX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 8),
+ IPA_HW_2_CPU_WDI_TX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 9),
+ IPA_HW_2_CPU_WDI_TX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 10),
+ IPA_HW_2_CPU_MAX_WDI_RX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 11),
+ IPA_HW_2_CPU_WDI_RX_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 12),
+ IPA_HW_2_CPU_WDI_RX_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 13),
+ IPA_HW_2_CPU_WDI_UNKNOWN_RX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 14),
+ IPA_HW_2_CPU_WDI_RX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 15),
+ IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 16),
+};
+
+/**
+ * enum ipa_hw_wdi_errors - WDI specific error types.
+ * @IPA_HW_WDI_ERROR_NONE : No error persists
+ * @IPA_HW_WDI_CHANNEL_ERROR : Error is specific to channel
+ */
+enum ipa_hw_wdi_errors {
+ IPA_HW_WDI_ERROR_NONE = 0,
+ IPA_HW_WDI_CHANNEL_ERROR = 1
+};
+
+/**
+ * enum ipa_hw_wdi_ch_errors = List of WDI Channel error types. This is present
+ * in the event param.
+ * @IPA_HW_WDI_CH_ERR_NONE : No error persists
+ * @IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL : Write pointer update failed in Tx
+ * Completion ring
+ * @IPA_HW_WDI_TX_FSM_ERROR : Error in the state machine transition
+ * @IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL : Error while calculating num RE to bring
+ * @IPA_HW_WDI_CH_ERR_RESERVED : Reserved - Not available for CPU to use
+*/
+enum ipa_hw_wdi_ch_errors {
+ IPA_HW_WDI_CH_ERR_NONE = 0,
+ IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL = 1,
+ IPA_HW_WDI_TX_FSM_ERROR = 2,
+ IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL = 3,
+ IPA_HW_WDI_CH_ERR_RESERVED = 0xFF
+};
+
+/**
+ * struct IpaHwSharedMemWdiMapping_t - Structure referring to the common and
+ * WDI section of 128B shared memory located in offset zero of SW Partition in
+ * IPA SRAM.
+ *
+ * The shared memory is used for communication between IPA HW and CPU.
+ */
+struct IpaHwSharedMemWdiMapping_t {
+ struct IpaHwSharedMemCommonMapping_t common;
+ u32 reserved_2B_28;
+ u32 reserved_2F_2C;
+ u32 reserved_33_30;
+ u32 reserved_37_34;
+ u32 reserved_3B_38;
+ u32 reserved_3F_3C;
+ u16 interfaceVersionWdi;
+ u16 reserved_43_42;
+ u8 wdi_tx_ch_0_state;
+ u8 wdi_rx_ch_0_state;
+ u16 reserved_47_46;
+} __packed;
+
+/**
+ * struct IpaHwWdiTxSetUpCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_TX_SET_UP command.
+ * @comp_ring_base_pa : This is the physical address of the base of the Tx
+ * completion ring
+ * @comp_ring_size : This is the size of the Tx completion ring
+ * @reserved_comp_ring : Reserved field for expansion of Completion ring params
+ * @ce_ring_base_pa : This is the physical address of the base of the Copy
+ * Engine Source Ring
+ * @ce_ring_size : Copy Engine Ring size
+ * @reserved_ce_ring : Reserved field for expansion of CE ring params
+ * @ce_ring_doorbell_pa : This is the physical address of the doorbell that the
+ * IPA uC has to write into to trigger the copy engine
+ * @num_tx_buffers : Number of pkt buffers allocated. The size of the CE ring
+ * and the Tx completion ring has to be atleast ( num_tx_buffers + 1)
+ * @ipa_pipe_number : This is the IPA pipe number that has to be used for the
+ * Tx path
+ * @reserved : Reserved field
+ *
+ * Parameters are sent as pointer thus should be reside in address accessible
+ * to HW
+ */
+struct IpaHwWdiTxSetUpCmdData_t {
+ u32 comp_ring_base_pa;
+ u16 comp_ring_size;
+ u16 reserved_comp_ring;
+ u32 ce_ring_base_pa;
+ u16 ce_ring_size;
+ u16 reserved_ce_ring;
+ u32 ce_ring_doorbell_pa;
+ u16 num_tx_buffers;
+ u8 ipa_pipe_number;
+ u8 reserved;
+} __packed;
+
+struct IpaHwWdi2TxSetUpCmdData_t {
+ u32 comp_ring_base_pa;
+ u32 comp_ring_base_pa_hi;
+ u16 comp_ring_size;
+ u16 reserved_comp_ring;
+ u32 ce_ring_base_pa;
+ u32 ce_ring_base_pa_hi;
+ u16 ce_ring_size;
+ u16 reserved_ce_ring;
+ u32 ce_ring_doorbell_pa;
+ u32 ce_ring_doorbell_pa_hi;
+ u16 num_tx_buffers;
+ u8 ipa_pipe_number;
+ u8 reserved;
+} __packed;
+/**
+ * struct IpaHwWdiRxSetUpCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_RX_SET_UP command.
+ * @rx_ring_base_pa : This is the physical address of the base of the Rx ring
+ * (containing Rx buffers)
+ * @rx_ring_size : This is the size of the Rx ring
+ * @rx_ring_rp_pa : This is the physical address of the location through which
+ * IPA uc is expected to communicate about the Read pointer into the Rx Ring
+ * @ipa_pipe_number : This is the IPA pipe number that has to be used for the
+ * Rx path
+ *
+ * Parameters are sent as pointer thus should be reside in address accessible
+ * to HW
+*/
+struct IpaHwWdiRxSetUpCmdData_t {
+ u32 rx_ring_base_pa;
+ u32 rx_ring_size;
+ u32 rx_ring_rp_pa;
+ u8 ipa_pipe_number;
+} __packed;
+
+struct IpaHwWdi2RxSetUpCmdData_t {
+ u32 rx_ring_base_pa;
+ u32 rx_ring_base_pa_hi;
+ u32 rx_ring_size;
+ u32 rx_ring_rp_pa;
+ u32 rx_ring_rp_pa_hi;
+ u32 rx_comp_ring_base_pa;
+ u32 rx_comp_ring_base_pa_hi;
+ u32 rx_comp_ring_size;
+ u32 rx_comp_ring_wp_pa;
+ u32 rx_comp_ring_wp_pa_hi;
+ u8 ipa_pipe_number;
+} __packed;
+/**
+ * union IpaHwWdiRxExtCfgCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG command.
+ * @ipa_pipe_number : The IPA pipe number for which this config is passed
+ * @qmap_id : QMAP ID to be set in the metadata register
+ * @reserved : Reserved
+ *
+ * The parameters are passed as immediate params in the shared memory
+*/
+union IpaHwWdiRxExtCfgCmdData_t {
+ struct IpaHwWdiRxExtCfgCmdParams_t {
+ u32 ipa_pipe_number:8;
+ u32 qmap_id:8;
+ u32 reserved:16;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwWdiCommonChCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN,
+ * IPA_CPU_2_HW_CMD_WDI_CH_ENABLE,
+ * IPA_CPU_2_HW_CMD_WDI_CH_DISABLE,
+ * IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND,
+ * IPA_CPU_2_HW_CMD_WDI_CH_RESUME command.
+ * @ipa_pipe_number : The IPA pipe number. This could be Tx or an Rx pipe
+ * @reserved : Reserved
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+union IpaHwWdiCommonChCmdData_t {
+ struct IpaHwWdiCommonChCmdParams_t {
+ u32 ipa_pipe_number:8;
+ u32 reserved:24;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwWdiErrorEventData_t - parameters for IPA_HW_2_CPU_EVENT_WDI_ERROR
+ * event.
+ * @wdi_error_type : The IPA pipe number to be torn down. This could be Tx or
+ * an Rx pipe
+ * @reserved : Reserved
+ * @ipa_pipe_number : IPA pipe number on which error has happened. Applicable
+ * only if error type indicates channel error
+ * @wdi_ch_err_type : Information about the channel error (if available)
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+union IpaHwWdiErrorEventData_t {
+ struct IpaHwWdiErrorEventParams_t {
+ u32 wdi_error_type:8;
+ u32 reserved:8;
+ u32 ipa_pipe_number:8;
+ u32 wdi_ch_err_type:8;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+static void ipa3_uc_wdi_event_log_info_handler(
+struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+
+{
+ if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_WDI)) == 0) {
+ IPAERR("WDI feature missing 0x%x\n",
+ uc_event_top_mmio->featureMask);
+ return;
+ }
+
+ if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_WDI].
+ params.size != sizeof(struct IpaHwStatsWDIInfoData_t)) {
+ IPAERR("wdi stats sz invalid exp=%zu is=%u\n",
+ sizeof(struct IpaHwStatsWDIInfoData_t),
+ uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_WDI].params.size);
+ return;
+ }
+
+ ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst = uc_event_top_mmio->
+ statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_WDI].params.offset;
+ IPAERR("WDI stats ofst=0x%x\n", ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst);
+ if (ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst +
+ sizeof(struct IpaHwStatsWDIInfoData_t) >=
+ ipa3_ctx->ctrl->ipa_reg_base_ofst +
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
+ ipa3_ctx->smem_sz) {
+ IPAERR("uc_wdi_stats 0x%x outside SRAM\n",
+ ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst);
+ return;
+ }
+
+ ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio =
+ ioremap(ipa3_ctx->ipa_wrapper_base +
+ ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst,
+ sizeof(struct IpaHwStatsWDIInfoData_t));
+ if (!ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) {
+ IPAERR("fail to ioremap uc wdi stats\n");
+ return;
+ }
+}
+
+static void ipa3_uc_wdi_event_handler(struct IpaHwSharedMemCommonMapping_t
+ *uc_sram_mmio)
+
+{
+ union IpaHwWdiErrorEventData_t wdi_evt;
+ struct IpaHwSharedMemWdiMapping_t *wdi_sram_mmio_ext;
+
+ if (uc_sram_mmio->eventOp ==
+ IPA_HW_2_CPU_EVENT_WDI_ERROR) {
+ wdi_evt.raw32b = uc_sram_mmio->eventParams;
+ IPADBG("uC WDI evt errType=%u pipe=%d cherrType=%u\n",
+ wdi_evt.params.wdi_error_type,
+ wdi_evt.params.ipa_pipe_number,
+ wdi_evt.params.wdi_ch_err_type);
+ wdi_sram_mmio_ext =
+ (struct IpaHwSharedMemWdiMapping_t *)
+ uc_sram_mmio;
+ IPADBG("tx_ch_state=%u rx_ch_state=%u\n",
+ wdi_sram_mmio_ext->wdi_tx_ch_0_state,
+ wdi_sram_mmio_ext->wdi_rx_ch_0_state);
+ }
+}
+
+/**
+ * ipa3_get_wdi_stats() - Query WDI statistics from uc
+ * @stats: [inout] stats blob from client populated by driver
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
+{
+#define TX_STATS(y) stats->tx_ch_stats.y = \
+ ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->tx_ch_stats.y
+#define RX_STATS(y) stats->rx_ch_stats.y = \
+ ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->rx_ch_stats.y
+
+ if (!stats || !ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) {
+ IPAERR("bad parms stats=%p wdi_stats=%p\n",
+ stats,
+ ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio);
+ return -EINVAL;
+ }
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ TX_STATS(num_pkts_processed);
+ TX_STATS(copy_engine_doorbell_value);
+ TX_STATS(num_db_fired);
+ TX_STATS(tx_comp_ring_stats.ringFull);
+ TX_STATS(tx_comp_ring_stats.ringEmpty);
+ TX_STATS(tx_comp_ring_stats.ringUsageHigh);
+ TX_STATS(tx_comp_ring_stats.ringUsageLow);
+ TX_STATS(tx_comp_ring_stats.RingUtilCount);
+ TX_STATS(bam_stats.bamFifoFull);
+ TX_STATS(bam_stats.bamFifoEmpty);
+ TX_STATS(bam_stats.bamFifoUsageHigh);
+ TX_STATS(bam_stats.bamFifoUsageLow);
+ TX_STATS(bam_stats.bamUtilCount);
+ TX_STATS(num_db);
+ TX_STATS(num_unexpected_db);
+ TX_STATS(num_bam_int_handled);
+ TX_STATS(num_bam_int_in_non_running_state);
+ TX_STATS(num_qmb_int_handled);
+ TX_STATS(num_bam_int_handled_while_wait_for_bam);
+
+ RX_STATS(max_outstanding_pkts);
+ RX_STATS(num_pkts_processed);
+ RX_STATS(rx_ring_rp_value);
+ RX_STATS(rx_ind_ring_stats.ringFull);
+ RX_STATS(rx_ind_ring_stats.ringEmpty);
+ RX_STATS(rx_ind_ring_stats.ringUsageHigh);
+ RX_STATS(rx_ind_ring_stats.ringUsageLow);
+ RX_STATS(rx_ind_ring_stats.RingUtilCount);
+ RX_STATS(bam_stats.bamFifoFull);
+ RX_STATS(bam_stats.bamFifoEmpty);
+ RX_STATS(bam_stats.bamFifoUsageHigh);
+ RX_STATS(bam_stats.bamFifoUsageLow);
+ RX_STATS(bam_stats.bamUtilCount);
+ RX_STATS(num_bam_int_handled);
+ RX_STATS(num_db);
+ RX_STATS(num_unexpected_db);
+ RX_STATS(num_pkts_in_dis_uninit_state);
+ RX_STATS(reserved1);
+ RX_STATS(reserved2);
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+int ipa3_wdi_init(void)
+{
+ struct ipa3_uc_hdlrs uc_wdi_cbs = { 0 };
+
+ uc_wdi_cbs.ipa_uc_event_hdlr = ipa3_uc_wdi_event_handler;
+ uc_wdi_cbs.ipa_uc_event_log_info_hdlr =
+ ipa3_uc_wdi_event_log_info_handler;
+ uc_wdi_cbs.ipa_uc_loaded_hdlr =
+ ipa3_uc_wdi_loaded_handler;
+
+ ipa3_uc_register_handlers(IPA_HW_FEATURE_WDI, &uc_wdi_cbs);
+
+ return 0;
+}
+
+static int ipa_create_uc_smmu_mapping_pa(phys_addr_t pa, size_t len,
+ bool device, unsigned long *iova)
+{
+ struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
+ unsigned long va = roundup(cb->next_addr, PAGE_SIZE);
+ int prot = IOMMU_READ | IOMMU_WRITE;
+ size_t true_len = roundup(len + pa - rounddown(pa, PAGE_SIZE),
+ PAGE_SIZE);
+ int ret;
+
+ if (!cb->valid) {
+ IPAERR("No SMMU CB setup\n");
+ return -EINVAL;
+ }
+
+ ret = ipa3_iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
+ true_len,
+ device ? (prot | IOMMU_DEVICE) : prot);
+ if (ret) {
+ IPAERR("iommu map failed for pa=%pa len=%zu\n", &pa, true_len);
+ return -EINVAL;
+ }
+
+ ipa3_ctx->wdi_map_cnt++;
+ cb->next_addr = va + true_len;
+ *iova = va + pa - rounddown(pa, PAGE_SIZE);
+ return 0;
+}
+
+static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt,
+ unsigned long *iova)
+{
+ struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
+ unsigned long va = roundup(cb->next_addr, PAGE_SIZE);
+ int prot = IOMMU_READ | IOMMU_WRITE;
+ int ret;
+ int i;
+ struct scatterlist *sg;
+ unsigned long start_iova = va;
+ phys_addr_t phys;
+ size_t len;
+ int count = 0;
+
+ if (!cb->valid) {
+ IPAERR("No SMMU CB setup\n");
+ return -EINVAL;
+ }
+ if (!sgt) {
+ IPAERR("Bad parameters, scatter / gather list is NULL\n");
+ return -EINVAL;
+ }
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ /* directly get sg_tbl PA from wlan-driver */
+ phys = sg->dma_address;
+ len = PAGE_ALIGN(sg->offset + sg->length);
+
+ ret = ipa3_iommu_map(cb->mapping->domain, va, phys, len, prot);
+ if (ret) {
+ IPAERR("iommu map failed for pa=%pa len=%zu\n",
+ &phys, len);
+ goto bad_mapping;
+ }
+ va += len;
+ ipa3_ctx->wdi_map_cnt++;
+ count++;
+ }
+ cb->next_addr = va;
+ *iova = start_iova;
+
+ return 0;
+
+bad_mapping:
+ for_each_sg(sgt->sgl, sg, count, i)
+ iommu_unmap(cb->mapping->domain, sg_dma_address(sg),
+ sg_dma_len(sg));
+ return -EINVAL;
+}
+
+static void ipa_release_uc_smmu_mappings(enum ipa_client_type client)
+{
+ struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
+ int i;
+ int j;
+ int start;
+ int end;
+
+ if (IPA_CLIENT_IS_CONS(client)) {
+ start = IPA_WDI_TX_RING_RES;
+ end = IPA_WDI_CE_DB_RES;
+ } else {
+ start = IPA_WDI_RX_RING_RES;
+ if (ipa3_ctx->ipa_wdi2)
+ end = IPA_WDI_RX_COMP_RING_WP_RES;
+ else
+ end = IPA_WDI_RX_RING_RP_RES;
+ }
+
+ for (i = start; i <= end; i++) {
+ if (wdi_res[i].valid) {
+ for (j = 0; j < wdi_res[i].nents; j++) {
+ iommu_unmap(cb->mapping->domain,
+ wdi_res[i].res[j].iova,
+ wdi_res[i].res[j].size);
+ ipa3_ctx->wdi_map_cnt--;
+ }
+ kfree(wdi_res[i].res);
+ wdi_res[i].valid = false;
+ }
+ }
+
+ if (ipa3_ctx->wdi_map_cnt == 0)
+ cb->next_addr = cb->va_end;
+
+}
+
+static void ipa_save_uc_smmu_mapping_pa(int res_idx, phys_addr_t pa,
+ unsigned long iova, size_t len)
+{
+ IPADBG("--res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx,
+ &pa, iova, len);
+ wdi_res[res_idx].res = kzalloc(sizeof(struct ipa_wdi_res), GFP_KERNEL);
+ if (!wdi_res[res_idx].res)
+ BUG();
+ wdi_res[res_idx].nents = 1;
+ wdi_res[res_idx].valid = true;
+ wdi_res[res_idx].res->pa = rounddown(pa, PAGE_SIZE);
+ wdi_res[res_idx].res->iova = rounddown(iova, PAGE_SIZE);
+ wdi_res[res_idx].res->size = roundup(len + pa - rounddown(pa,
+ PAGE_SIZE), PAGE_SIZE);
+ IPADBG("res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx,
+ &wdi_res[res_idx].res->pa, wdi_res[res_idx].res->iova,
+ wdi_res[res_idx].res->size);
+}
+
+static void ipa_save_uc_smmu_mapping_sgt(int res_idx, struct sg_table *sgt,
+ unsigned long iova)
+{
+ int i;
+ struct scatterlist *sg;
+ unsigned long curr_iova = iova;
+
+ if (!sgt) {
+ IPAERR("Bad parameters, scatter / gather list is NULL\n");
+ return;
+ }
+
+ wdi_res[res_idx].res = kcalloc(sgt->nents, sizeof(struct ipa_wdi_res),
+ GFP_KERNEL);
+ if (!wdi_res[res_idx].res)
+ BUG();
+ wdi_res[res_idx].nents = sgt->nents;
+ wdi_res[res_idx].valid = true;
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ /* directly get sg_tbl PA from wlan */
+ wdi_res[res_idx].res[i].pa = sg->dma_address;
+ wdi_res[res_idx].res[i].iova = curr_iova;
+ wdi_res[res_idx].res[i].size = PAGE_ALIGN(sg->offset +
+ sg->length);
+ IPADBG("res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx,
+ &wdi_res[res_idx].res[i].pa,
+ wdi_res[res_idx].res[i].iova,
+ wdi_res[res_idx].res[i].size);
+ curr_iova += wdi_res[res_idx].res[i].size;
+ }
+}
+
+static int ipa_create_uc_smmu_mapping(int res_idx, bool wlan_smmu_en,
+ phys_addr_t pa, struct sg_table *sgt, size_t len, bool device,
+ unsigned long *iova)
+{
+ /* support for SMMU on WLAN but no SMMU on IPA */
+ if (wlan_smmu_en && ipa3_ctx->smmu_s1_bypass) {
+ IPAERR("Unsupported SMMU pairing\n");
+ return -EINVAL;
+ }
+
+ /* legacy: no SMMUs on either end */
+ if (!wlan_smmu_en && ipa3_ctx->smmu_s1_bypass) {
+ *iova = pa;
+ return 0;
+ }
+
+ /* no SMMU on WLAN but SMMU on IPA */
+ if (!wlan_smmu_en && !ipa3_ctx->smmu_s1_bypass) {
+ if (ipa_create_uc_smmu_mapping_pa(pa, len,
+ (res_idx == IPA_WDI_CE_DB_RES) ? true : false, iova)) {
+ IPAERR("Fail to create mapping res %d\n", res_idx);
+ return -EFAULT;
+ }
+ ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len);
+ return 0;
+ }
+
+ /* SMMU on WLAN and SMMU on IPA */
+ if (wlan_smmu_en && !ipa3_ctx->smmu_s1_bypass) {
+ switch (res_idx) {
+ case IPA_WDI_RX_RING_RP_RES:
+ case IPA_WDI_RX_COMP_RING_WP_RES:
+ case IPA_WDI_CE_DB_RES:
+ if (ipa_create_uc_smmu_mapping_pa(pa, len,
+ (res_idx == IPA_WDI_CE_DB_RES) ? true : false,
+ iova)) {
+ IPAERR("Fail to create mapping res %d\n",
+ res_idx);
+ return -EFAULT;
+ }
+ ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len);
+ break;
+ case IPA_WDI_RX_RING_RES:
+ case IPA_WDI_RX_COMP_RING_RES:
+ case IPA_WDI_TX_RING_RES:
+ case IPA_WDI_CE_RING_RES:
+ if (ipa_create_uc_smmu_mapping_sgt(sgt, iova)) {
+ IPAERR("Fail to create mapping res %d\n",
+ res_idx);
+ return -EFAULT;
+ }
+ ipa_save_uc_smmu_mapping_sgt(res_idx, sgt, *iova);
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ipa3_connect_wdi_pipe() - WDI client connect
+ * @in: [in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+ struct ipa_wdi_out_params *out)
+{
+ int ipa_ep_idx;
+ int result = -EFAULT;
+ struct ipa3_ep_context *ep;
+ struct ipa_mem_buffer cmd;
+ struct IpaHwWdiTxSetUpCmdData_t *tx;
+ struct IpaHwWdiRxSetUpCmdData_t *rx;
+ struct IpaHwWdi2TxSetUpCmdData_t *tx_2;
+ struct IpaHwWdi2RxSetUpCmdData_t *rx_2;
+
+ struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+ unsigned long va;
+ phys_addr_t pa;
+ u32 len;
+
+ if (in == NULL || out == NULL || in->sys.client >= IPA_CLIENT_MAX) {
+ IPAERR("bad parm. in=%p out=%p\n", in, out);
+ if (in)
+ IPAERR("client = %d\n", in->sys.client);
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+ if (in->u.dl.comp_ring_base_pa % IPA_WDI_RING_ALIGNMENT ||
+ in->u.dl.ce_ring_base_pa % IPA_WDI_RING_ALIGNMENT) {
+ IPAERR("alignment failure on TX\n");
+ return -EINVAL;
+ }
+ } else {
+ if (in->u.ul.rdy_ring_base_pa % IPA_WDI_RING_ALIGNMENT) {
+ IPAERR("alignment failure on RX\n");
+ return -EINVAL;
+ }
+ }
+
+ result = ipa3_uc_state_check();
+ if (result)
+ return result;
+
+ ipa_ep_idx = ipa3_get_ep_mapping(in->sys.client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("fail to alloc EP.\n");
+ goto fail;
+ }
+
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+ if (ep->valid) {
+ IPAERR("EP already allocated.\n");
+ goto fail;
+ }
+
+ memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+ IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
+
+ IPADBG("client=%d ep=%d\n", in->sys.client, ipa_ep_idx);
+ if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+ if (ipa3_ctx->ipa_wdi2)
+ cmd.size = sizeof(*tx_2);
+ else
+ cmd.size = sizeof(*tx);
+ IPADBG("comp_ring_base_pa=0x%pa\n",
+ &in->u.dl.comp_ring_base_pa);
+ IPADBG("comp_ring_size=%d\n", in->u.dl.comp_ring_size);
+ IPADBG("ce_ring_base_pa=0x%pa\n", &in->u.dl.ce_ring_base_pa);
+ IPADBG("ce_ring_size=%d\n", in->u.dl.ce_ring_size);
+ IPADBG("ce_ring_doorbell_pa=0x%pa\n",
+ &in->u.dl.ce_door_bell_pa);
+ IPADBG("num_tx_buffers=%d\n", in->u.dl.num_tx_buffers);
+ } else {
+ if (ipa3_ctx->ipa_wdi2)
+ cmd.size = sizeof(*rx_2);
+ else
+ cmd.size = sizeof(*rx);
+ IPADBG("rx_ring_base_pa=0x%pa\n",
+ &in->u.ul.rdy_ring_base_pa);
+ IPADBG("rx_ring_size=%d\n",
+ in->u.ul.rdy_ring_size);
+ IPADBG("rx_ring_rp_pa=0x%pa\n",
+ &in->u.ul.rdy_ring_rp_pa);
+ IPADBG("rx_comp_ring_base_pa=0x%pa\n",
+ &in->u.ul.rdy_comp_ring_base_pa);
+ IPADBG("rx_comp_ring_size=%d\n",
+ in->u.ul.rdy_comp_ring_size);
+ IPADBG("rx_comp_ring_wp_pa=0x%pa\n",
+ &in->u.ul.rdy_comp_ring_wp_pa);
+ ipa3_ctx->uc_ctx.rdy_ring_base_pa =
+ in->u.ul.rdy_ring_base_pa;
+ ipa3_ctx->uc_ctx.rdy_ring_rp_pa =
+ in->u.ul.rdy_ring_rp_pa;
+ ipa3_ctx->uc_ctx.rdy_ring_size =
+ in->u.ul.rdy_ring_size;
+ ipa3_ctx->uc_ctx.rdy_comp_ring_base_pa =
+ in->u.ul.rdy_comp_ring_base_pa;
+ ipa3_ctx->uc_ctx.rdy_comp_ring_wp_pa =
+ in->u.ul.rdy_comp_ring_wp_pa;
+ ipa3_ctx->uc_ctx.rdy_comp_ring_size =
+ in->u.ul.rdy_comp_ring_size;
+
+ /* check if the VA is empty */
+ if (ipa3_ctx->ipa_wdi2) {
+ if (in->smmu_enabled) {
+ if (!in->u.ul_smmu.rdy_ring_rp_va ||
+ !in->u.ul_smmu.rdy_comp_ring_wp_va)
+ goto dma_alloc_fail;
+ } else {
+ if (!in->u.ul.rdy_ring_rp_va ||
+ !in->u.ul.rdy_comp_ring_wp_va)
+ goto dma_alloc_fail;
+ }
+ IPADBG("rdy_ring_rp value =%d\n",
+ in->smmu_enabled ?
+ *in->u.ul_smmu.rdy_ring_rp_va :
+ *in->u.ul.rdy_ring_rp_va);
+ IPADBG("rx_comp_ring_wp value=%d\n",
+ in->smmu_enabled ?
+ *in->u.ul_smmu.rdy_comp_ring_wp_va :
+ *in->u.ul.rdy_comp_ring_wp_va);
+ ipa3_ctx->uc_ctx.rdy_ring_rp_va =
+ in->smmu_enabled ?
+ in->u.ul_smmu.rdy_ring_rp_va :
+ in->u.ul.rdy_ring_rp_va;
+ ipa3_ctx->uc_ctx.rdy_comp_ring_wp_va =
+ in->smmu_enabled ?
+ in->u.ul_smmu.rdy_comp_ring_wp_va :
+ in->u.ul.rdy_comp_ring_wp_va;
+ }
+ }
+
+ cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+ &cmd.phys_base, GFP_KERNEL);
+ if (cmd.base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ result = -ENOMEM;
+ goto dma_alloc_fail;
+ }
+
+ if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+ if (ipa3_ctx->ipa_wdi2) {
+ tx_2 = (struct IpaHwWdi2TxSetUpCmdData_t *)cmd.base;
+
+ len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size :
+ in->u.dl.comp_ring_size;
+ IPADBG("TX_2 ring smmu_en=%d ring_size=%d %d\n",
+ in->smmu_enabled,
+ in->u.dl_smmu.comp_ring_size,
+ in->u.dl.comp_ring_size);
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES,
+ in->smmu_enabled,
+ in->u.dl.comp_ring_base_pa,
+ &in->u.dl_smmu.comp_ring,
+ len,
+ false,
+ &va)) {
+ IPAERR("fail to create uc mapping TX ring.\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ tx_2->comp_ring_base_pa_hi =
+ (u32) ((va & 0xFFFFFFFF00000000) >> 32);
+ tx_2->comp_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+ tx_2->comp_ring_size = len;
+ IPADBG("TX_2 comp_ring_base_pa_hi=0x%08x :0x%08x\n",
+ tx_2->comp_ring_base_pa_hi,
+ tx_2->comp_ring_base_pa);
+
+ len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size :
+ in->u.dl.ce_ring_size;
+ IPADBG("TX_2 CE ring smmu_en=%d ring_size=%d %d\n",
+ in->smmu_enabled,
+ in->u.dl_smmu.ce_ring_size,
+ in->u.dl.ce_ring_size);
+ /* WA: wlan passed ce_ring sg_table PA directly */
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES,
+ in->smmu_enabled,
+ in->u.dl.ce_ring_base_pa,
+ &in->u.dl_smmu.ce_ring,
+ len,
+ false,
+ &va)) {
+ IPAERR("fail to create uc mapping CE ring.\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ tx_2->ce_ring_base_pa_hi =
+ (u32) ((va & 0xFFFFFFFF00000000) >> 32);
+ tx_2->ce_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+ tx_2->ce_ring_size = len;
+ IPADBG("TX_2 ce_ring_base_pa_hi=0x%08x :0x%08x\n",
+ tx_2->ce_ring_base_pa_hi,
+ tx_2->ce_ring_base_pa);
+
+ pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa :
+ in->u.dl.ce_door_bell_pa;
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES,
+ in->smmu_enabled,
+ pa,
+ NULL,
+ 4,
+ true,
+ &va)) {
+ IPAERR("fail to create uc mapping CE DB.\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ tx_2->ce_ring_doorbell_pa_hi =
+ (u32) ((va & 0xFFFFFFFF00000000) >> 32);
+ tx_2->ce_ring_doorbell_pa = (u32) (va & 0xFFFFFFFF);
+ IPADBG("TX_2 ce_ring_doorbell_pa_hi=0x%08x :0x%08x\n",
+ tx_2->ce_ring_doorbell_pa_hi,
+ tx_2->ce_ring_doorbell_pa);
+
+ tx_2->num_tx_buffers = in->smmu_enabled ?
+ in->u.dl_smmu.num_tx_buffers :
+ in->u.dl.num_tx_buffers;
+ tx_2->ipa_pipe_number = ipa_ep_idx;
+ } else {
+ tx = (struct IpaHwWdiTxSetUpCmdData_t *)cmd.base;
+
+ len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size :
+ in->u.dl.comp_ring_size;
+ IPADBG("TX ring smmu_en=%d ring_size=%d %d\n",
+ in->smmu_enabled,
+ in->u.dl_smmu.comp_ring_size,
+ in->u.dl.comp_ring_size);
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES,
+ in->smmu_enabled,
+ in->u.dl.comp_ring_base_pa,
+ &in->u.dl_smmu.comp_ring,
+ len,
+ false,
+ &va)) {
+ IPAERR("fail to create uc mapping TX ring.\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ tx->comp_ring_base_pa = va;
+ tx->comp_ring_size = len;
+ len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size :
+ in->u.dl.ce_ring_size;
+ IPADBG("TX CE ring smmu_en=%d ring_size=%d %d\n",
+ in->smmu_enabled,
+ in->u.dl_smmu.ce_ring_size,
+ in->u.dl.ce_ring_size);
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES,
+ in->smmu_enabled,
+ in->u.dl.ce_ring_base_pa,
+ &in->u.dl_smmu.ce_ring,
+ len,
+ false,
+ &va)) {
+ IPAERR("fail to create uc mapping CE ring.\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ tx->ce_ring_base_pa = va;
+ tx->ce_ring_size = len;
+ pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa :
+ in->u.dl.ce_door_bell_pa;
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES,
+ in->smmu_enabled,
+ pa,
+ NULL,
+ 4,
+ true,
+ &va)) {
+ IPAERR("fail to create uc mapping CE DB.\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ tx->ce_ring_doorbell_pa = va;
+ tx->num_tx_buffers = in->u.dl.num_tx_buffers;
+ tx->ipa_pipe_number = ipa_ep_idx;
+ }
+ out->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
+ ipahal_get_reg_base() +
+ ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+ IPA_HW_WDI_TX_MBOX_START_INDEX/32,
+ IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
+ } else {
+ if (ipa3_ctx->ipa_wdi2) {
+ rx_2 = (struct IpaHwWdi2RxSetUpCmdData_t *)cmd.base;
+
+ len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size :
+ in->u.ul.rdy_ring_size;
+ IPADBG("RX_2 ring smmu_en=%d ring_size=%d %d\n",
+ in->smmu_enabled,
+ in->u.ul_smmu.rdy_ring_size,
+ in->u.ul.rdy_ring_size);
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES,
+ in->smmu_enabled,
+ in->u.ul.rdy_ring_base_pa,
+ &in->u.ul_smmu.rdy_ring,
+ len,
+ false,
+ &va)) {
+ IPAERR("fail to create uc RX_2 ring.\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ rx_2->rx_ring_base_pa_hi =
+ (u32) ((va & 0xFFFFFFFF00000000) >> 32);
+ rx_2->rx_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+ rx_2->rx_ring_size = len;
+ IPADBG("RX_2 rx_ring_base_pa_hi=0x%08x:0x%08x\n",
+ rx_2->rx_ring_base_pa_hi,
+ rx_2->rx_ring_base_pa);
+
+ pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa :
+ in->u.ul.rdy_ring_rp_pa;
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
+ in->smmu_enabled,
+ pa,
+ NULL,
+ 4,
+ false,
+ &va)) {
+ IPAERR("fail to create uc RX_2 rng RP\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ rx_2->rx_ring_rp_pa_hi =
+ (u32) ((va & 0xFFFFFFFF00000000) >> 32);
+ rx_2->rx_ring_rp_pa = (u32) (va & 0xFFFFFFFF);
+ IPADBG("RX_2 rx_ring_rp_pa_hi=0x%08x :0x%08x\n",
+ rx_2->rx_ring_rp_pa_hi,
+ rx_2->rx_ring_rp_pa);
+ len = in->smmu_enabled ?
+ in->u.ul_smmu.rdy_comp_ring_size :
+ in->u.ul.rdy_comp_ring_size;
+ IPADBG("RX_2 ring smmu_en=%d comp_ring_size=%d %d\n",
+ in->smmu_enabled,
+ in->u.ul_smmu.rdy_comp_ring_size,
+ in->u.ul.rdy_comp_ring_size);
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_COMP_RING_RES,
+ in->smmu_enabled,
+ in->u.ul.rdy_comp_ring_base_pa,
+ &in->u.ul_smmu.rdy_comp_ring,
+ len,
+ false,
+ &va)) {
+ IPAERR("fail to create uc RX_2 comp_ring.\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ rx_2->rx_comp_ring_base_pa_hi =
+ (u32) ((va & 0xFFFFFFFF00000000) >> 32);
+ rx_2->rx_comp_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+ rx_2->rx_comp_ring_size = len;
+ IPADBG("RX_2 rx_comp_ring_base_pa_hi=0x%08x:0x%08x\n",
+ rx_2->rx_comp_ring_base_pa_hi,
+ rx_2->rx_comp_ring_base_pa);
+
+ pa = in->smmu_enabled ?
+ in->u.ul_smmu.rdy_comp_ring_wp_pa :
+ in->u.ul.rdy_comp_ring_wp_pa;
+ if (ipa_create_uc_smmu_mapping(
+ IPA_WDI_RX_COMP_RING_WP_RES,
+ in->smmu_enabled,
+ pa,
+ NULL,
+ 4,
+ false,
+ &va)) {
+ IPAERR("fail to create uc RX_2 comp_rng WP\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ rx_2->rx_comp_ring_wp_pa_hi =
+ (u32) ((va & 0xFFFFFFFF00000000) >> 32);
+ rx_2->rx_comp_ring_wp_pa = (u32) (va & 0xFFFFFFFF);
+ IPADBG("RX_2 rx_comp_ring_wp_pa_hi=0x%08x:0x%08x\n",
+ rx_2->rx_comp_ring_wp_pa_hi,
+ rx_2->rx_comp_ring_wp_pa);
+ rx_2->ipa_pipe_number = ipa_ep_idx;
+ } else {
+ rx = (struct IpaHwWdiRxSetUpCmdData_t *)cmd.base;
+
+ len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size :
+ in->u.ul.rdy_ring_size;
+ IPADBG("RX ring smmu_en=%d ring_size=%d %d\n",
+ in->smmu_enabled,
+ in->u.ul_smmu.rdy_ring_size,
+ in->u.ul.rdy_ring_size);
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES,
+ in->smmu_enabled,
+ in->u.ul.rdy_ring_base_pa,
+ &in->u.ul_smmu.rdy_ring,
+ len,
+ false,
+ &va)) {
+ IPAERR("fail to create uc mapping RX ring.\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ rx->rx_ring_base_pa = va;
+ rx->rx_ring_size = len;
+
+ pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa :
+ in->u.ul.rdy_ring_rp_pa;
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
+ in->smmu_enabled,
+ pa,
+ NULL,
+ 4,
+ false,
+ &va)) {
+ IPAERR("fail to create uc mapping RX rng RP\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ rx->rx_ring_rp_pa = va;
+ rx->ipa_pipe_number = ipa_ep_idx;
+ }
+ out->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
+ ipahal_get_reg_base() +
+ ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+ IPA_HW_WDI_RX_MBOX_START_INDEX/32,
+ IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
+ }
+
+ ep->valid = 1;
+ ep->client = in->sys.client;
+ ep->keep_ipa_awake = in->sys.keep_ipa_awake;
+ result = ipa3_disable_data_path(ipa_ep_idx);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx);
+ goto uc_timeout;
+ }
+ if (IPA_CLIENT_IS_PROD(in->sys.client)) {
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_delay = true;
+ ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
+ }
+
+ result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CLIENT_IS_CONS(in->sys.client) ?
+ IPA_CPU_2_HW_CMD_WDI_TX_SET_UP :
+ IPA_CPU_2_HW_CMD_WDI_RX_SET_UP,
+ IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+
+ if (result) {
+ result = -EFAULT;
+ goto uc_timeout;
+ }
+
+ ep->skip_ep_cfg = in->sys.skip_ep_cfg;
+ ep->client_notify = in->sys.notify;
+ ep->priv = in->sys.priv;
+
+ if (!ep->skip_ep_cfg) {
+ if (ipa3_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) {
+ IPAERR("fail to configure EP.\n");
+ goto ipa_cfg_ep_fail;
+ }
+ IPADBG("ep configuration successful\n");
+ } else {
+ IPADBG("Skipping endpoint configuration.\n");
+ }
+
+ out->clnt_hdl = ipa_ep_idx;
+
+ if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client))
+ ipa3_install_dflt_flt_rules(ipa_ep_idx);
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+
+ dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+ ep->uc_offload_state |= IPA_WDI_CONNECTED;
+ IPADBG("client %d (ep: %d) connected\n", in->sys.client, ipa_ep_idx);
+
+ return 0;
+
+ipa_cfg_ep_fail:
+ memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+uc_timeout:
+ ipa_release_uc_smmu_mappings(in->sys.client);
+ dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+dma_alloc_fail:
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+fail:
+ return result;
+}
+
+/**
+ * ipa3_disconnect_wdi_pipe() - WDI client disconnect
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_disconnect_wdi_pipe(u32 clnt_hdl)
+{
+ int result = 0;
+ struct ipa3_ep_context *ep;
+ union IpaHwWdiCommonChCmdData_t tear;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm, %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ result = ipa3_uc_state_check();
+ if (result)
+ return result;
+
+ IPADBG("ep=%d\n", clnt_hdl);
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+ return -EFAULT;
+ }
+
+ if (!ep->keep_ipa_awake)
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ tear.params.ipa_pipe_number = clnt_hdl;
+
+ result = ipa3_uc_send_cmd(tear.raw32b,
+ IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN,
+ IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+
+ if (result) {
+ result = -EFAULT;
+ goto uc_timeout;
+ }
+
+ ipa3_delete_dflt_flt_rules(clnt_hdl);
+ ipa_release_uc_smmu_mappings(ep->client);
+
+ memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+uc_timeout:
+ return result;
+}
+
+/**
+ * ipa3_enable_wdi_pipe() - WDI client enable
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_enable_wdi_pipe(u32 clnt_hdl)
+{
+ int result = 0;
+ struct ipa3_ep_context *ep;
+ union IpaHwWdiCommonChCmdData_t enable;
+ struct ipa_ep_cfg_holb holb_cfg;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm, %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ result = ipa3_uc_state_check();
+ if (result)
+ return result;
+
+ IPADBG("ep=%d\n", clnt_hdl);
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+ return -EFAULT;
+ }
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+ enable.params.ipa_pipe_number = clnt_hdl;
+
+ result = ipa3_uc_send_cmd(enable.raw32b,
+ IPA_CPU_2_HW_CMD_WDI_CH_ENABLE,
+ IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+
+ if (result) {
+ result = -EFAULT;
+ goto uc_timeout;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ep->client)) {
+ memset(&holb_cfg, 0, sizeof(holb_cfg));
+ holb_cfg.en = IPA_HOLB_TMR_DIS;
+ holb_cfg.tmr_val = 0;
+ result = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
+ }
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+ ep->uc_offload_state |= IPA_WDI_ENABLED;
+ IPADBG("client (ep: %d) enabled\n", clnt_hdl);
+
+uc_timeout:
+ return result;
+}
+
+/**
+ * ipa3_disable_wdi_pipe() - WDI client disable
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_disable_wdi_pipe(u32 clnt_hdl)
+{
+ int result = 0;
+ struct ipa3_ep_context *ep;
+ union IpaHwWdiCommonChCmdData_t disable;
+ struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+ u32 prod_hdl;
+ int i;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm, %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ result = ipa3_uc_state_check();
+ if (result)
+ return result;
+
+ /* checking rdy_ring_rp_pa matches the rdy_comp_ring_wp_pa on WDI2.0 */
+ if (ipa3_ctx->ipa_wdi2) {
+ for (i = 0; i < IPA_UC_FINISH_MAX; i++) {
+ IPADBG("(%d) rp_value(%u), comp_wp_value(%u)\n",
+ i,
+ *ipa3_ctx->uc_ctx.rdy_ring_rp_va,
+ *ipa3_ctx->uc_ctx.rdy_comp_ring_wp_va);
+ if (*ipa3_ctx->uc_ctx.rdy_ring_rp_va !=
+ *ipa3_ctx->uc_ctx.rdy_comp_ring_wp_va) {
+ usleep_range(IPA_UC_WAIT_MIN_SLEEP,
+ IPA_UC_WAII_MAX_SLEEP);
+ } else {
+ break;
+ }
+ }
+ /* In case ipa_uc still haven't processed all
+ * pending descriptors, we have to assert
+ */
+ if (i == IPA_UC_FINISH_MAX)
+ WARN_ON(1);
+ }
+
+ IPADBG("ep=%d\n", clnt_hdl);
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+ return -EFAULT;
+ }
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ result = ipa3_disable_data_path(clnt_hdl);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ clnt_hdl);
+ result = -EPERM;
+ goto uc_timeout;
+ }
+
+ /**
+ * To avoid data stall during continuous SAP on/off before
+ * setting delay to IPA Consumer pipe, remove delay and enable
+ * holb on IPA Producer pipe
+ */
+ if (IPA_CLIENT_IS_PROD(ep->client)) {
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+
+ prod_hdl = ipa3_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+ if (ipa3_ctx->ep[prod_hdl].valid == 1) {
+ result = ipa3_disable_data_path(prod_hdl);
+ if (result) {
+ IPAERR("disable data path failed\n");
+ IPAERR("res=%d clnt=%d\n",
+ result, prod_hdl);
+ result = -EPERM;
+ goto uc_timeout;
+ }
+ }
+ usleep_range(IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC,
+ IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC);
+ }
+
+ disable.params.ipa_pipe_number = clnt_hdl;
+
+ result = ipa3_uc_send_cmd(disable.raw32b,
+ IPA_CPU_2_HW_CMD_WDI_CH_DISABLE,
+ IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+
+ if (result) {
+ result = -EFAULT;
+ goto uc_timeout;
+ }
+
+ /* Set the delay after disabling IPA Producer pipe */
+ if (IPA_CLIENT_IS_PROD(ep->client)) {
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_delay = true;
+ ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ }
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+ ep->uc_offload_state &= ~IPA_WDI_ENABLED;
+ IPADBG("client (ep: %d) disabled\n", clnt_hdl);
+
+uc_timeout:
+ return result;
+}
+
+/**
+ * ipa3_resume_wdi_pipe() - WDI client resume
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_resume_wdi_pipe(u32 clnt_hdl)
+{
+ int result = 0;
+ struct ipa3_ep_context *ep;
+ union IpaHwWdiCommonChCmdData_t resume;
+ struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm, %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ result = ipa3_uc_state_check();
+ if (result)
+ return result;
+
+ IPADBG("ep=%d\n", clnt_hdl);
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+ return -EFAULT;
+ }
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+ resume.params.ipa_pipe_number = clnt_hdl;
+
+ result = ipa3_uc_send_cmd(resume.raw32b,
+ IPA_CPU_2_HW_CMD_WDI_CH_RESUME,
+ IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+
+ if (result) {
+ result = -EFAULT;
+ goto uc_timeout;
+ }
+
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ if (result)
+ IPAERR("client (ep: %d) fail un-susp/delay result=%d\n",
+ clnt_hdl, result);
+ else
+ IPADBG("client (ep: %d) un-susp/delay\n", clnt_hdl);
+
+ ep->uc_offload_state |= IPA_WDI_RESUMED;
+ IPADBG("client (ep: %d) resumed\n", clnt_hdl);
+
+uc_timeout:
+ return result;
+}
+
+/**
+ * ipa3_suspend_wdi_pipe() - WDI client suspend
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_suspend_wdi_pipe(u32 clnt_hdl)
+{
+ int result = 0;
+ struct ipa3_ep_context *ep;
+ union IpaHwWdiCommonChCmdData_t suspend;
+ struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm, %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ result = ipa3_uc_state_check();
+ if (result)
+ return result;
+
+ IPADBG("ep=%d\n", clnt_hdl);
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED |
+ IPA_WDI_RESUMED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+ return -EFAULT;
+ }
+
+ suspend.params.ipa_pipe_number = clnt_hdl;
+
+ if (IPA_CLIENT_IS_PROD(ep->client)) {
+ IPADBG("Post suspend event first for IPA Producer\n");
+ IPADBG("Client: %d clnt_hdl: %d\n", ep->client, clnt_hdl);
+ result = ipa3_uc_send_cmd(suspend.raw32b,
+ IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND,
+ IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+
+ if (result) {
+ result = -EFAULT;
+ goto uc_timeout;
+ }
+ }
+
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ if (IPA_CLIENT_IS_CONS(ep->client)) {
+ ep_cfg_ctrl.ipa_ep_suspend = true;
+ result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ if (result)
+ IPAERR("client (ep: %d) failed to suspend result=%d\n",
+ clnt_hdl, result);
+ else
+ IPADBG("client (ep: %d) suspended\n", clnt_hdl);
+ } else {
+ ep_cfg_ctrl.ipa_ep_delay = true;
+ result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ if (result)
+ IPAERR("client (ep: %d) failed to delay result=%d\n",
+ clnt_hdl, result);
+ else
+ IPADBG("client (ep: %d) delayed\n", clnt_hdl);
+ }
+
+ if (IPA_CLIENT_IS_CONS(ep->client)) {
+ result = ipa3_uc_send_cmd(suspend.raw32b,
+ IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND,
+ IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+
+ if (result) {
+ result = -EFAULT;
+ goto uc_timeout;
+ }
+ }
+
+ ipa3_ctx->tag_process_before_gating = true;
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+ ep->uc_offload_state &= ~IPA_WDI_RESUMED;
+ IPADBG("client (ep: %d) suspended\n", clnt_hdl);
+
+uc_timeout:
+ return result;
+}
+
+int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
+{
+ int result = 0;
+ struct ipa3_ep_context *ep;
+ union IpaHwWdiRxExtCfgCmdData_t qmap;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm, %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ result = ipa3_uc_state_check();
+ if (result)
+ return result;
+
+ IPADBG("ep=%d\n", clnt_hdl);
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+ return -EFAULT;
+ }
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+ qmap.params.ipa_pipe_number = clnt_hdl;
+ qmap.params.qmap_id = qmap_id;
+
+ result = ipa3_uc_send_cmd(qmap.raw32b,
+ IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG,
+ IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+
+ if (result) {
+ result = -EFAULT;
+ goto uc_timeout;
+ }
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ IPADBG("client (ep: %d) qmap_id %d updated\n", clnt_hdl, qmap_id);
+
+uc_timeout:
+ return result;
+}
+
+/**
+ * ipa3_uc_reg_rdyCB() - To register uC
+ * ready CB if uC not ready
+ * @inout: [in/out] input/output parameters
+ * from/to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa3_uc_reg_rdyCB(
+ struct ipa_wdi_uc_ready_params *inout)
+{
+ int result = 0;
+
+ if (inout == NULL) {
+ IPAERR("bad parm. inout=%p ", inout);
+ return -EINVAL;
+ }
+
+ result = ipa3_uc_state_check();
+ if (result) {
+ inout->is_uC_ready = false;
+ ipa3_ctx->uc_wdi_ctx.uc_ready_cb = inout->notify;
+ ipa3_ctx->uc_wdi_ctx.priv = inout->priv;
+ } else {
+ inout->is_uC_ready = true;
+ }
+
+ return 0;
+}
+
+/**
+ * ipa3_uc_dereg_rdyCB() - To de-register uC ready CB
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa3_uc_dereg_rdyCB(void)
+{
+ ipa3_ctx->uc_wdi_ctx.uc_ready_cb = NULL;
+ ipa3_ctx->uc_wdi_ctx.priv = NULL;
+
+ return 0;
+}
+
+
+/**
+ * ipa3_uc_wdi_get_dbpa() - To retrieve
+ * doorbell physical address of wlan pipes
+ * @param: [in/out] input/output parameters
+ * from/to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa3_uc_wdi_get_dbpa(
+ struct ipa_wdi_db_params *param)
+{
+ if (param == NULL || param->client >= IPA_CLIENT_MAX) {
+ IPAERR("bad parm. param=%p ", param);
+ if (param)
+ IPAERR("client = %d\n", param->client);
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_CONS(param->client)) {
+ param->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
+ ipahal_get_reg_base() +
+ ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+ IPA_HW_WDI_TX_MBOX_START_INDEX/32,
+ IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
+ } else {
+ param->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
+ ipahal_get_reg_base() +
+ ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+ IPA_HW_WDI_RX_MBOX_START_INDEX/32,
+ IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
+ }
+
+ return 0;
+}
+
+static void ipa3_uc_wdi_loaded_handler(void)
+{
+ if (!ipa3_ctx) {
+ IPAERR("IPA ctx is null\n");
+ return;
+ }
+
+ if (ipa3_ctx->uc_wdi_ctx.uc_ready_cb) {
+ ipa3_ctx->uc_wdi_ctx.uc_ready_cb(
+ ipa3_ctx->uc_wdi_ctx.priv);
+
+ ipa3_ctx->uc_wdi_ctx.uc_ready_cb =
+ NULL;
+ ipa3_ctx->uc_wdi_ctx.priv = NULL;
+ }
+}
+
+int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
+{
+ struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx();
+ int i;
+ int ret = 0;
+ int prot = IOMMU_READ | IOMMU_WRITE;
+
+ if (!info) {
+ IPAERR("info = %p\n", info);
+ return -EINVAL;
+ }
+
+ if (!cb->valid) {
+ IPAERR("No SMMU CB setup\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_buffers; i++) {
+ IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
+ &info[i].pa, info[i].iova, info[i].size);
+ info[i].result = ipa3_iommu_map(cb->iommu,
+ rounddown(info[i].iova, PAGE_SIZE),
+ rounddown(info[i].pa, PAGE_SIZE),
+ roundup(info[i].size + info[i].pa -
+ rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE),
+ prot);
+ }
+
+ return ret;
+}
+
+int ipa3_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
+{
+ struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx();
+ int i;
+ int ret = 0;
+
+ if (!info) {
+ IPAERR("info = %p\n", info);
+ return -EINVAL;
+ }
+
+ if (!cb->valid) {
+ IPAERR("No SMMU CB setup\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_buffers; i++) {
+ IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
+ &info[i].pa, info[i].iova, info[i].size);
+ info[i].result = iommu_unmap(cb->iommu,
+ rounddown(info[i].iova, PAGE_SIZE),
+ roundup(info[i].size + info[i].pa -
+ rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE));
+ }
+
+ return ret;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
new file mode 100644
index 0000000..a6e462f6
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -0,0 +1,3639 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <net/ip.h>
+#include <linux/genalloc.h> /* gen_pool_alloc() */
+#include <linux/io.h>
+#include <linux/ratelimit.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm_gsi.h>
+#include <linux/elf.h>
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+#include "../ipa_rm_i.h"
+
+#define IPA_V3_0_CLK_RATE_SVS (75 * 1000 * 1000UL)
+#define IPA_V3_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL)
+#define IPA_V3_0_CLK_RATE_TURBO (200 * 1000 * 1000UL)
+#define IPA_V3_0_MAX_HOLB_TMR_VAL (4294967296 - 1)
+
+#define IPA_V3_0_BW_THRESHOLD_TURBO_MBPS (1000)
+#define IPA_V3_0_BW_THRESHOLD_NOMINAL_MBPS (600)
+
+#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK 0xFF0000
+#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT 0x10
+
+/* Max pipes + ICs for TAG process */
+#define IPA_TAG_MAX_DESC (IPA3_MAX_NUM_PIPES + 6)
+
+#define IPA_TAG_SLEEP_MIN_USEC (1000)
+#define IPA_TAG_SLEEP_MAX_USEC (2000)
+#define IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT (10 * HZ)
+#define IPA_BCR_REG_VAL_v3_0 (0x00000001)
+#define IPA_BCR_REG_VAL_v3_5 (0x0000003B)
+#define IPA_AGGR_GRAN_MIN (1)
+#define IPA_AGGR_GRAN_MAX (32)
+#define IPA_EOT_COAL_GRAN_MIN (1)
+#define IPA_EOT_COAL_GRAN_MAX (16)
+
+#define IPA_AGGR_BYTE_LIMIT (\
+ IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK >> \
+ IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT)
+#define IPA_AGGR_PKT_LIMIT (\
+ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >> \
+ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT)
+
+/* In IPAv3 only endpoints 0-3 can be configured to deaggregation */
+#define IPA_EP_SUPPORTS_DEAGGR(idx) ((idx) >= 0 && (idx) <= 3)
+
+/* configure IPA spare register 1 in order to have correct IPA version
+ * set bits 0,2,3 and 4. see SpareBits documentation.xlsx
+ */
+#define IPA_SPARE_REG_1_VAL (0x0000081D)
+
+
+/* HPS, DPS sequencers Types*/
+#define IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY 0x00000000
+/* DMA + DECIPHER/CIPHER */
+#define IPA_DPS_HPS_SEQ_TYPE_DMA_DEC 0x00000011
+/* Packet Processing + no decipher + uCP (for Ethernet Bridging) */
+#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP 0x00000002
+/* Packet Processing + decipher + uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_DEC_UCP 0x00000013
+/* 2 Packet Processing pass + no decipher + uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP 0x00000004
+/* 2 Packet Processing pass + decipher + uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP 0x00000015
+/* Packet Processing + no decipher + no uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP 0x00000006
+/* Packet Processing + no decipher + no uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_DEC_NO_UCP 0x00000017
+/* COMP/DECOMP */
+#define IPA_DPS_HPS_SEQ_TYPE_DMA_COMP_DECOMP 0x00000020
+/* Invalid sequencer type */
+#define IPA_DPS_HPS_SEQ_TYPE_INVALID 0xFFFFFFFF
+
+#define IPA_DPS_HPS_SEQ_TYPE_IS_DMA(seq_type) \
+ (seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY || \
+ seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_DEC || \
+ seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_COMP_DECOMP)
+
+#define QMB_MASTER_SELECT_DDR (0)
+#define QMB_MASTER_SELECT_PCIE (1)
+
+#define IPA_CLIENT_NOT_USED \
+ {-1, -1, false, IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR}
+
+/* Resource Group index*/
+#define IPA_GROUP_UL (0)
+#define IPA_GROUP_DL (1)
+#define IPA_GROUP_DPL IPA_GROUP_DL
+#define IPA_GROUP_DIAG (2)
+#define IPA_GROUP_DMA (3)
+#define IPA_GROUP_IMM_CMD IPA_GROUP_DMA
+#define IPA_GROUP_Q6ZIP (4)
+#define IPA_GROUP_Q6ZIP_GENERAL IPA_GROUP_Q6ZIP
+#define IPA_GROUP_UC_RX_Q (5)
+#define IPA_GROUP_Q6ZIP_ENGINE IPA_GROUP_UC_RX_Q
+#define IPA_GROUP_MAX (6)
+
+enum ipa_rsrc_grp_type_src {
+ IPA_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS,
+ IPA_RSRC_GRP_TYPE_SRC_HDR_SECTORS,
+ IPA_RSRC_GRP_TYPE_SRC_HDRI1_BUFFER,
+ IPA_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS,
+ IPA_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_RSRC_GRP_TYPE_SRC_HDRI2_BUFFERS,
+ IPA_RSRC_GRP_TYPE_SRC_HPS_DMARS,
+ IPA_RSRC_GRP_TYPE_SRC_ACK_ENTRIES,
+ IPA_RSRC_GRP_TYPE_SRC_MAX,
+};
+enum ipa_rsrc_grp_type_dst {
+ IPA_RSRC_GRP_TYPE_DST_DATA_SECTORS,
+ IPA_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS,
+ IPA_RSRC_GRP_TYPE_DST_DPS_DMARS,
+ IPA_RSRC_GRP_TYPE_DST_MAX,
+};
+enum ipa_rsrc_grp_type_rx {
+ IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ,
+ IPA_RSRC_GRP_TYPE_RX_MAX
+};
+struct rsrc_min_max {
+ u32 min;
+ u32 max;
+};
+
+static const struct rsrc_min_max ipa3_rsrc_src_grp_config
+ [IPA_RSRC_GRP_TYPE_SRC_MAX][IPA_GROUP_MAX] = {
+ /*UL DL DIAG DMA Not Used uC Rx*/
+ [IPA_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+ {3, 255}, {3, 255}, {1, 255}, {1, 255}, {1, 255}, {2, 255} },
+ [IPA_RSRC_GRP_TYPE_SRC_HDR_SECTORS] = {
+ {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+ [IPA_RSRC_GRP_TYPE_SRC_HDRI1_BUFFER] = {
+ {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+ [IPA_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = {
+ {14, 14}, {16, 16}, {5, 5}, {5, 5}, {0, 0}, {8, 8} },
+ [IPA_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+ {19, 19}, {26, 26}, {3, 3}, {7, 7}, {0, 0}, {8, 8} },
+ [IPA_RSRC_GRP_TYPE_SRC_HDRI2_BUFFERS] = {
+ {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+ [IPA_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+ {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+ [IPA_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+ {14, 14}, {16, 16}, {5, 5}, {5, 5}, {0, 0}, {8, 8} },
+};
+static const struct rsrc_min_max ipa3_rsrc_dst_grp_config
+ [IPA_RSRC_GRP_TYPE_DST_MAX][IPA_GROUP_MAX] = {
+ /*UL DL/DPL DIAG DMA Q6zip_gen Q6zip_eng*/
+ [IPA_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+ {2, 2}, {3, 3}, {0, 0}, {2, 2}, {3, 3}, {3, 3} },
+ [IPA_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS] = {
+ {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+ [IPA_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+ {1, 1}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, {0, 0} },
+};
+static const struct rsrc_min_max ipa3_rsrc_rx_grp_config
+ [IPA_RSRC_GRP_TYPE_RX_MAX][IPA_GROUP_MAX] = {
+ /*UL DL DIAG DMA Not Used uC Rx*/
+ [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+ {16, 16}, {24, 24}, {8, 8}, {8, 8}, {0, 0}, {8, 8} },
+};
+
+enum ipa_ver {
+ IPA_3_0,
+ IPA_VER_MAX,
+};
+
+struct ipa_ep_configuration {
+ int pipe_num;
+ int group_num;
+ bool support_flt;
+ int sequencer_type;
+ u8 qmb_master_sel;
+};
+
+static const struct ipa_ep_configuration ipa3_ep_mapping
+ [IPA_VER_MAX][IPA_CLIENT_MAX] = {
+ [IPA_3_0][IPA_CLIENT_HSIC1_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_WLAN1_PROD] = {10, IPA_GROUP_UL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_HSIC2_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_USB2_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_HSIC3_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_USB3_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_HSIC4_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_USB4_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_HSIC5_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_USB_PROD] = {1, IPA_GROUP_UL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_UC_USB_PROD] = {2, IPA_GROUP_UL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_A2_EMBEDDED_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_A2_TETHERED_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_APPS_LAN_WAN_PROD] = {14, IPA_GROUP_UL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_APPS_CMD_PROD]
+ = {22, IPA_GROUP_IMM_CMD, false,
+ IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_ODU_PROD] = {12, IPA_GROUP_UL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_MHI_PROD] = {0, IPA_GROUP_UL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_PCIE},
+ [IPA_3_0][IPA_CLIENT_Q6_LAN_PROD] = {9, IPA_GROUP_UL, false,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_Q6_WAN_PROD] = {5, IPA_GROUP_DL,
+ true, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_Q6_CMD_PROD]
+ = {6, IPA_GROUP_IMM_CMD, false,
+ IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_Q6_DECOMP_PROD] = {7, IPA_GROUP_Q6ZIP,
+ false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_Q6_DECOMP2_PROD] = {8, IPA_GROUP_Q6ZIP,
+ false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD]
+ = {12, IPA_GROUP_DMA, false,
+ IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+ QMB_MASTER_SELECT_PCIE},
+ [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
+ = {13, IPA_GROUP_DMA, false,
+ IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+ QMB_MASTER_SELECT_PCIE},
+ /* Only for test purpose */
+ [IPA_3_0][IPA_CLIENT_TEST_PROD] = {1, IPA_GROUP_UL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_TEST1_PROD] = {1, IPA_GROUP_UL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_TEST2_PROD] = {3, IPA_GROUP_UL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_TEST3_PROD] = {12, IPA_GROUP_UL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_TEST4_PROD] = {13, IPA_GROUP_UL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR},
+
+ [IPA_3_0][IPA_CLIENT_HSIC1_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_WLAN1_CONS] = {25, IPA_GROUP_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_HSIC2_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_USB2_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_WLAN2_CONS] = {27, IPA_GROUP_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_HSIC3_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_USB3_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_WLAN3_CONS] = {28, IPA_GROUP_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_HSIC4_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_USB4_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_WLAN4_CONS] = {29, IPA_GROUP_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_HSIC5_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_USB_CONS] = {26, IPA_GROUP_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_USB_DPL_CONS] = {17, IPA_GROUP_DPL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_A2_EMBEDDED_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_A2_TETHERED_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_A5_LAN_WAN_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_APPS_LAN_CONS] = {15, IPA_GROUP_UL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_APPS_WAN_CONS] = {16, IPA_GROUP_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_ODU_EMB_CONS] = {23, IPA_GROUP_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_ODU_TETH_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_MHI_CONS] = {23, IPA_GROUP_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_PCIE},
+ [IPA_3_0][IPA_CLIENT_Q6_LAN_CONS] = {19, IPA_GROUP_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_Q6_WAN_CONS] = {18, IPA_GROUP_UL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_Q6_DUN_CONS] = {30, IPA_GROUP_DIAG,
+ false, IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_Q6_DECOMP_CONS]
+ = {21, IPA_GROUP_Q6ZIP, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_Q6_DECOMP2_CONS]
+ = {4, IPA_GROUP_Q6ZIP, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS]
+ = {28, IPA_GROUP_DMA, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_PCIE},
+ [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS]
+ = {29, IPA_GROUP_DMA, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_PCIE},
+ [IPA_3_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = IPA_CLIENT_NOT_USED,
+ /* Only for test purpose */
+ [IPA_3_0][IPA_CLIENT_TEST_CONS] = {26, IPA_GROUP_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_TEST1_CONS] = {26, IPA_GROUP_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_TEST2_CONS] = {27, IPA_GROUP_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_TEST3_CONS] = {28, IPA_GROUP_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR},
+ [IPA_3_0][IPA_CLIENT_TEST4_CONS] = {29, IPA_GROUP_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR},
+};
+
+/* this array include information tuple:
+ * {ipa_ep_num, ipa_gsi_chan_num, ipa_if_tlv, ipa_if_aos, ee}
+ */
+static struct ipa_gsi_ep_config ipa_gsi_ep_info[] = {
+ {0, 0, 8, 16, 0},
+ {1, 3, 8, 16, 0},
+ {3, 5, 16, 32, 0},
+ {4, 9, 4, 4, 1},
+ {5, 0, 16, 32, 1},
+ {6, 1, 18, 28, 1},
+ {7, 2, 0, 0, 1},
+ {8, 3, 0, 0, 1},
+ {9, 4, 8, 12, 1},
+ {10, 1, 8, 16, 3},
+ {12, 9, 8, 16, 0},
+ {13, 10, 8, 16, 0},
+ {14, 11, 8, 16, 0},
+ {15, 7, 8, 12, 0},
+ {16, 8, 8, 12, 0},
+ {17, 2, 8, 12, 0},
+ {18, 5, 8, 12, 1},
+ {19, 6, 8, 12, 1},
+ {21, 8, 4, 4, 1},
+ {22, 6, 18, 28, 0},
+ {23, 1, 8, 8, 0},
+ {25, 4, 8, 8, 3},
+ {26, 12, 8, 8, 0},
+ {27, 4, 8, 8, 0},
+ {28, 13, 8, 8, 0},
+ {29, 14, 8, 8, 0},
+ {30, 7, 4, 4, 1},
+ {-1, -1, -1, -1, -1}
+};
+
+static struct msm_bus_vectors ipa_init_vectors_v3_0[] = {
+ {
+ .src = MSM_BUS_MASTER_IPA,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 0,
+ .ib = 0,
+ },
+ {
+ .src = MSM_BUS_MASTER_IPA,
+ .dst = MSM_BUS_SLAVE_OCIMEM,
+ .ab = 0,
+ .ib = 0,
+ },
+};
+
+static struct msm_bus_vectors ipa_nominal_perf_vectors_v3_0[] = {
+ {
+ .src = MSM_BUS_MASTER_IPA,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 100000000,
+ .ib = 1300000000,
+ },
+ {
+ .src = MSM_BUS_MASTER_IPA,
+ .dst = MSM_BUS_SLAVE_OCIMEM,
+ .ab = 100000000,
+ .ib = 1300000000,
+ },
+};
+
+static struct msm_bus_paths ipa_usecases_v3_0[] = {
+ {
+ ARRAY_SIZE(ipa_init_vectors_v3_0),
+ ipa_init_vectors_v3_0,
+ },
+ {
+ ARRAY_SIZE(ipa_nominal_perf_vectors_v3_0),
+ ipa_nominal_perf_vectors_v3_0,
+ },
+};
+
+static struct msm_bus_scale_pdata ipa_bus_client_pdata_v3_0 = {
+ ipa_usecases_v3_0,
+ ARRAY_SIZE(ipa_usecases_v3_0),
+ .name = "ipa",
+};
+
+void ipa3_active_clients_lock(void)
+{
+ unsigned long flags;
+
+ mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
+ spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
+ ipa3_ctx->ipa3_active_clients.mutex_locked = true;
+ spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
+}
+
+int ipa3_active_clients_trylock(unsigned long *flags)
+{
+ spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients.spinlock, *flags);
+ if (ipa3_ctx->ipa3_active_clients.mutex_locked) {
+ spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock,
+ *flags);
+ return 0;
+ }
+
+ return 1;
+}
+
+void ipa3_active_clients_trylock_unlock(unsigned long *flags)
+{
+ spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock, *flags);
+}
+
+void ipa3_active_clients_unlock(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
+ ipa3_ctx->ipa3_active_clients.mutex_locked = false;
+ spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
+ mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
+}
+
+/**
+ * ipa3_get_clients_from_rm_resource() - get IPA clients which are related to an
+ * IPA_RM resource
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ * @clients: [OUT] Empty array which will contain the list of clients. The
+ * caller must initialize this array.
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa3_get_clients_from_rm_resource(
+ enum ipa_rm_resource_name resource,
+ struct ipa3_client_names *clients)
+{
+ int i = 0;
+
+ if (resource < 0 ||
+ resource >= IPA_RM_RESOURCE_MAX ||
+ !clients) {
+ IPAERR("Bad parameters\n");
+ return -EINVAL;
+ }
+
+ switch (resource) {
+ case IPA_RM_RESOURCE_USB_CONS:
+ clients->names[i++] = IPA_CLIENT_USB_CONS;
+ break;
+ case IPA_RM_RESOURCE_USB_DPL_CONS:
+ clients->names[i++] = IPA_CLIENT_USB_DPL_CONS;
+ break;
+ case IPA_RM_RESOURCE_HSIC_CONS:
+ clients->names[i++] = IPA_CLIENT_HSIC1_CONS;
+ break;
+ case IPA_RM_RESOURCE_WLAN_CONS:
+ clients->names[i++] = IPA_CLIENT_WLAN1_CONS;
+ clients->names[i++] = IPA_CLIENT_WLAN2_CONS;
+ clients->names[i++] = IPA_CLIENT_WLAN3_CONS;
+ clients->names[i++] = IPA_CLIENT_WLAN4_CONS;
+ break;
+ case IPA_RM_RESOURCE_MHI_CONS:
+ clients->names[i++] = IPA_CLIENT_MHI_CONS;
+ break;
+ case IPA_RM_RESOURCE_USB_PROD:
+ clients->names[i++] = IPA_CLIENT_USB_PROD;
+ break;
+ case IPA_RM_RESOURCE_HSIC_PROD:
+ clients->names[i++] = IPA_CLIENT_HSIC1_PROD;
+ break;
+ case IPA_RM_RESOURCE_MHI_PROD:
+ clients->names[i++] = IPA_CLIENT_MHI_PROD;
+ break;
+ default:
+ break;
+ }
+ clients->length = i;
+
+ return 0;
+}
+
+/**
+ * ipa3_should_pipe_be_suspended() - returns true when the client's pipe should
+ * be suspended during a power save scenario. False otherwise.
+ *
+ * @client: [IN] IPA client
+ */
+bool ipa3_should_pipe_be_suspended(enum ipa_client_type client)
+{
+ struct ipa3_ep_context *ep;
+ int ipa_ep_idx;
+
+ ipa_ep_idx = ipa3_get_ep_mapping(client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ WARN_ON(1);
+ return false;
+ }
+
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+ if (ep->keep_ipa_awake)
+ return false;
+
+ if (client == IPA_CLIENT_USB_CONS ||
+ client == IPA_CLIENT_USB_DPL_CONS ||
+ client == IPA_CLIENT_MHI_CONS ||
+ client == IPA_CLIENT_HSIC1_CONS ||
+ client == IPA_CLIENT_WLAN1_CONS ||
+ client == IPA_CLIENT_WLAN2_CONS ||
+ client == IPA_CLIENT_WLAN3_CONS ||
+ client == IPA_CLIENT_WLAN4_CONS)
+ return true;
+
+ return false;
+}
+
+/**
+ * ipa3_suspend_resource_sync() - suspend client endpoints related to the IPA_RM
+ * resource and decrement active clients counter, which may result in clock
+ * gating of IPA clocks.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa3_suspend_resource_sync(enum ipa_rm_resource_name resource)
+{
+ struct ipa3_client_names clients;
+ int res;
+ int index;
+ struct ipa_ep_cfg_ctrl suspend;
+ enum ipa_client_type client;
+ int ipa_ep_idx;
+ bool pipe_suspended = false;
+
+ memset(&clients, 0, sizeof(clients));
+ res = ipa3_get_clients_from_rm_resource(resource, &clients);
+ if (res) {
+ IPAERR("Bad params.\n");
+ return res;
+ }
+
+ for (index = 0; index < clients.length; index++) {
+ client = clients.names[index];
+ ipa_ep_idx = ipa3_get_ep_mapping(client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ res = -EINVAL;
+ continue;
+ }
+ ipa3_ctx->resume_on_connect[client] = false;
+ if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+ ipa3_should_pipe_be_suspended(client)) {
+ if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+ /* suspend endpoint */
+ memset(&suspend, 0, sizeof(suspend));
+ suspend.ipa_ep_suspend = true;
+ ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
+ pipe_suspended = true;
+ }
+ }
+ }
+ /* Sleep ~1 msec */
+ if (pipe_suspended)
+ usleep_range(1000, 2000);
+
+ /* before gating IPA clocks do TAG process */
+ ipa3_ctx->tag_process_before_gating = true;
+ IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(resource));
+
+ return 0;
+}
+
+/**
+ * ipa3_suspend_resource_no_block() - suspend client endpoints related to the
+ * IPA_RM resource and decrement active clients counter. This function is
+ * guaranteed to avoid sleeping.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name resource)
+{
+ int res;
+ struct ipa3_client_names clients;
+ int index;
+ enum ipa_client_type client;
+ struct ipa_ep_cfg_ctrl suspend;
+ int ipa_ep_idx;
+ unsigned long flags;
+ struct ipa_active_client_logging_info log_info;
+
+ if (ipa3_active_clients_trylock(&flags) == 0)
+ return -EPERM;
+ if (ipa3_ctx->ipa3_active_clients.cnt == 1) {
+ res = -EPERM;
+ goto bail;
+ }
+
+ memset(&clients, 0, sizeof(clients));
+ res = ipa3_get_clients_from_rm_resource(resource, &clients);
+ if (res) {
+ IPAERR(
+ "ipa3_get_clients_from_rm_resource() failed, name = %d.\n",
+ resource);
+ goto bail;
+ }
+
+ for (index = 0; index < clients.length; index++) {
+ client = clients.names[index];
+ ipa_ep_idx = ipa3_get_ep_mapping(client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ res = -EINVAL;
+ continue;
+ }
+ ipa3_ctx->resume_on_connect[client] = false;
+ if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+ ipa3_should_pipe_be_suspended(client)) {
+ if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+ /* suspend endpoint */
+ memset(&suspend, 0, sizeof(suspend));
+ suspend.ipa_ep_suspend = true;
+ ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
+ }
+ }
+ }
+
+ if (res == 0) {
+ IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
+ ipa_rm_resource_str(resource));
+ ipa3_active_clients_log_dec(&log_info, true);
+ ipa3_ctx->ipa3_active_clients.cnt--;
+ IPADBG("active clients = %d\n",
+ ipa3_ctx->ipa3_active_clients.cnt);
+ }
+bail:
+ ipa3_active_clients_trylock_unlock(&flags);
+
+ return res;
+}
+
+/**
+ * ipa3_resume_resource() - resume client endpoints related to the IPA_RM
+ * resource.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa3_resume_resource(enum ipa_rm_resource_name resource)
+{
+
+ struct ipa3_client_names clients;
+ int res;
+ int index;
+ struct ipa_ep_cfg_ctrl suspend;
+ enum ipa_client_type client;
+ int ipa_ep_idx;
+
+ memset(&clients, 0, sizeof(clients));
+ res = ipa3_get_clients_from_rm_resource(resource, &clients);
+ if (res) {
+ IPAERR("ipa3_get_clients_from_rm_resource() failed.\n");
+ return res;
+ }
+
+ for (index = 0; index < clients.length; index++) {
+ client = clients.names[index];
+ ipa_ep_idx = ipa3_get_ep_mapping(client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ res = -EINVAL;
+ continue;
+ }
+ /*
+ * The related ep, will be resumed on connect
+ * while its resource is granted
+ */
+ ipa3_ctx->resume_on_connect[client] = true;
+ IPADBG("%d will be resumed on connect.\n", client);
+ if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+ ipa3_should_pipe_be_suspended(client)) {
+ if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+ memset(&suspend, 0, sizeof(suspend));
+ suspend.ipa_ep_suspend = false;
+ ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
+ }
+ }
+ }
+
+ return res;
+}
+
+/**
+ * _ipa_sram_settings_read_v3_0() - Read SRAM settings from HW
+ *
+ * Returns: None
+ */
+void _ipa_sram_settings_read_v3_0(void)
+{
+ struct ipahal_reg_shared_mem_size smem_sz;
+
+ memset(&smem_sz, 0, sizeof(smem_sz));
+
+ ipahal_read_reg_fields(IPA_SHARED_MEM_SIZE, &smem_sz);
+
+ ipa3_ctx->smem_restricted_bytes = smem_sz.shared_mem_baddr;
+ ipa3_ctx->smem_sz = smem_sz.shared_mem_sz;
+
+ /* reg fields are in 8B units */
+ ipa3_ctx->smem_restricted_bytes *= 8;
+ ipa3_ctx->smem_sz *= 8;
+ ipa3_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
+ ipa3_ctx->hdr_tbl_lcl = 0;
+ ipa3_ctx->hdr_proc_ctx_tbl_lcl = 1;
+
+ /*
+ * when proc ctx table is located in internal memory,
+ * modem entries resides first.
+ */
+ if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
+ ipa3_ctx->hdr_proc_ctx_tbl.start_offset =
+ IPA_MEM_PART(modem_hdr_proc_ctx_size);
+ }
+ ipa3_ctx->ip4_rt_tbl_hash_lcl = 0;
+ ipa3_ctx->ip4_rt_tbl_nhash_lcl = 0;
+ ipa3_ctx->ip6_rt_tbl_hash_lcl = 0;
+ ipa3_ctx->ip6_rt_tbl_nhash_lcl = 0;
+ ipa3_ctx->ip4_flt_tbl_hash_lcl = 0;
+ ipa3_ctx->ip4_flt_tbl_nhash_lcl = 0;
+ ipa3_ctx->ip6_flt_tbl_hash_lcl = 0;
+ ipa3_ctx->ip6_flt_tbl_nhash_lcl = 0;
+}
+
+/**
+ * ipa3_cfg_route() - configure IPA route
+ * @route: IPA route
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa3_cfg_route(struct ipahal_reg_route *route)
+{
+
+ IPADBG("disable_route_block=%d, default_pipe=%d, default_hdr_tbl=%d\n",
+ route->route_dis,
+ route->route_def_pipe,
+ route->route_def_hdr_table);
+ IPADBG("default_hdr_ofst=%d, default_frag_pipe=%d\n",
+ route->route_def_hdr_ofst,
+ route->route_frag_def_pipe);
+
+ IPADBG("default_retain_hdr=%d\n",
+ route->route_def_retain_hdr);
+
+ if (route->route_dis) {
+ IPAERR("Route disable is not supported!\n");
+ return -EPERM;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ ipahal_write_reg_fields(IPA_ROUTE, route);
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+/**
+ * ipa3_cfg_filter() - configure filter
+ * @disable: disable value
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa3_cfg_filter(u32 disable)
+{
+ IPAERR("Filter disable is not supported!\n");
+ return -EPERM;
+}
+
+/**
+ * ipa3_cfg_qsb() - Configure IPA QSB maximal reads and writes
+ *
+ * Returns: None
+ */
+void ipa3_cfg_qsb(void)
+{
+ int qsb_max_writes[2] = { 8, 2 };
+ int qsb_max_reads[2] = { 8, 8 };
+
+ ipahal_write_reg_fields(IPA_QSB_MAX_WRITES, qsb_max_writes);
+ ipahal_write_reg_fields(IPA_QSB_MAX_READS, qsb_max_reads);
+}
+
+/**
+ * ipa3_init_hw() - initialize HW
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa3_init_hw(void)
+{
+ u32 ipa_version = 0;
+ u32 val;
+
+ /* Read IPA version and make sure we have access to the registers */
+ ipa_version = ipahal_read_reg(IPA_VERSION);
+ if (ipa_version == 0)
+ return -EFAULT;
+
+ switch (ipa3_ctx->ipa_hw_type) {
+ case IPA_HW_v3_0:
+ case IPA_HW_v3_1:
+ val = IPA_BCR_REG_VAL_v3_0;
+ break;
+ case IPA_HW_v3_5:
+ case IPA_HW_v3_5_1:
+ val = IPA_BCR_REG_VAL_v3_5;
+ break;
+ default:
+ IPAERR("unknown HW type in dts\n");
+ return -EFAULT;
+ }
+
+ ipahal_write_reg(IPA_BCR, val);
+
+ ipa3_cfg_qsb();
+
+ return 0;
+}
+
+/**
+ * ipa3_get_hw_type_index() - Get HW type index which is used as the entry index
+ * into ipa3_ep_mapping[] array.
+ *
+ * Return value: HW type index
+ */
+u8 ipa3_get_hw_type_index(void)
+{
+ u8 hw_type_index;
+
+ switch (ipa3_ctx->ipa_hw_type) {
+ case IPA_HW_v3_0:
+ case IPA_HW_v3_1:
+ hw_type_index = IPA_3_0;
+ break;
+ default:
+ IPAERR("Incorrect IPA version %d\n", ipa3_ctx->ipa_hw_type);
+ hw_type_index = IPA_3_0;
+ break;
+ }
+
+ return hw_type_index;
+}
+
+/**
+ * ipa3_get_ep_mapping() - provide endpoint mapping
+ * @client: client type
+ *
+ * Return value: endpoint mapping
+ */
+int ipa3_get_ep_mapping(enum ipa_client_type client)
+{
+ if (client >= IPA_CLIENT_MAX || client < 0) {
+ IPAERR("Bad client number! client =%d\n", client);
+ return -EINVAL;
+ }
+
+ return ipa3_ep_mapping[ipa3_get_hw_type_index()][client].pipe_num;
+}
+
+/**
+ * ipa3_get_gsi_ep_info() - provide gsi ep information
+ * @ipa_ep_idx: IPA endpoint index
+ *
+ * Return value: pointer to ipa_gsi_ep_info
+ */
+struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info(int ipa_ep_idx)
+{
+ int i;
+
+ for (i = 0; ; i++) {
+ if (ipa_gsi_ep_info[i].ipa_ep_num < 0)
+ break;
+
+ if (ipa_gsi_ep_info[i].ipa_ep_num ==
+ ipa_ep_idx)
+ return &(ipa_gsi_ep_info[i]);
+ }
+
+ return NULL;
+}
+
+/**
+ * ipa_get_ep_group() - provide endpoint group by client
+ * @client: client type
+ *
+ * Return value: endpoint group
+ */
+int ipa_get_ep_group(enum ipa_client_type client)
+{
+ if (client >= IPA_CLIENT_MAX || client < 0) {
+ IPAERR("Bad client number! client =%d\n", client);
+ return -EINVAL;
+ }
+
+ return ipa3_ep_mapping[ipa3_get_hw_type_index()][client].group_num;
+}
+
+/**
+ * ipa3_get_qmb_master_sel() - provide QMB master selection for the client
+ * @client: client type
+ *
+ * Return value: QMB master index
+ */
+u8 ipa3_get_qmb_master_sel(enum ipa_client_type client)
+{
+ if (client >= IPA_CLIENT_MAX || client < 0) {
+ IPAERR("Bad client number! client =%d\n", client);
+ return -EINVAL;
+ }
+
+ return ipa3_ep_mapping[ipa3_get_hw_type_index()]
+ [client].qmb_master_sel;
+}
+
+/* ipa3_set_client() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+
+void ipa3_set_client(int index, enum ipacm_client_enum client, bool uplink)
+{
+ if (client >= IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) {
+ IPAERR("Bad client number! client =%d\n", client);
+ } else if (index >= IPA3_MAX_NUM_PIPES || index < 0) {
+ IPAERR("Bad pipe index! index =%d\n", index);
+ } else {
+ ipa3_ctx->ipacm_client[index].client_enum = client;
+ ipa3_ctx->ipacm_client[index].uplink = uplink;
+ }
+}
+
+/**
+ * ipa3_get_client() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+enum ipacm_client_enum ipa3_get_client(int pipe_idx)
+{
+ if (pipe_idx >= IPA3_MAX_NUM_PIPES || pipe_idx < 0) {
+ IPAERR("Bad pipe index! pipe_idx =%d\n", pipe_idx);
+ return IPACM_CLIENT_MAX;
+ } else {
+ return ipa3_ctx->ipacm_client[pipe_idx].client_enum;
+ }
+}
+
+/**
+ * ipa2_get_client_uplink() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+bool ipa3_get_client_uplink(int pipe_idx)
+{
+ return ipa3_ctx->ipacm_client[pipe_idx].uplink;
+}
+
+/**
+ * ipa3_get_rm_resource_from_ep() - get the IPA_RM resource which is related to
+ * the supplied pipe index.
+ *
+ * @pipe_idx:
+ *
+ * Return value: IPA_RM resource related to the pipe, -1 if a resource was not
+ * found.
+ */
+enum ipa_rm_resource_name ipa3_get_rm_resource_from_ep(int pipe_idx)
+{
+ int i;
+ int j;
+ enum ipa_client_type client;
+ struct ipa3_client_names clients;
+ bool found = false;
+
+ if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
+ IPAERR("Bad pipe index!\n");
+ return -EINVAL;
+ }
+
+ client = ipa3_ctx->ep[pipe_idx].client;
+
+ for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) {
+ memset(&clients, 0, sizeof(clients));
+ ipa3_get_clients_from_rm_resource(i, &clients);
+ for (j = 0; j < clients.length; j++) {
+ if (clients.names[j] == client) {
+ found = true;
+ break;
+ }
+ }
+ if (found)
+ break;
+ }
+
+ if (!found)
+ return -EFAULT;
+
+ return i;
+}
+
+/**
+ * ipa3_get_client_mapping() - provide client mapping
+ * @pipe_idx: IPA end-point number
+ *
+ * Return value: client mapping
+ */
+enum ipa_client_type ipa3_get_client_mapping(int pipe_idx)
+{
+ if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
+ IPAERR("Bad pipe index!\n");
+ return -EINVAL;
+ }
+
+ return ipa3_ctx->ep[pipe_idx].client;
+}
+
+/**
+ * ipa_init_ep_flt_bitmap() - Initialize the bitmap
+ * that represents the End-points that supports filtering
+ */
+void ipa_init_ep_flt_bitmap(void)
+{
+ enum ipa_client_type cl;
+ u8 hw_type_idx = ipa3_get_hw_type_index();
+ u32 bitmap;
+
+ bitmap = 0;
+
+ BUG_ON(ipa3_ctx->ep_flt_bitmap);
+
+ for (cl = 0; cl < IPA_CLIENT_MAX ; cl++) {
+ if (ipa3_ep_mapping[hw_type_idx][cl].support_flt) {
+ bitmap |=
+ (1U<<ipa3_ep_mapping[hw_type_idx][cl].pipe_num);
+ if (bitmap != ipa3_ctx->ep_flt_bitmap) {
+ ipa3_ctx->ep_flt_bitmap = bitmap;
+ ipa3_ctx->ep_flt_num++;
+ }
+ }
+ }
+}
+
+/**
+ * ipa_is_ep_support_flt() - Given an End-point check
+ * whether it supports filtering or not.
+ *
+ * @pipe_idx:
+ *
+ * Return values:
+ * true if supports and false if not
+ */
+bool ipa_is_ep_support_flt(int pipe_idx)
+{
+ if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
+ IPAERR("Bad pipe index!\n");
+ return false;
+ }
+
+ return ipa3_ctx->ep_flt_bitmap & (1U<<pipe_idx);
+}
+
+/**
+ * ipa3_cfg_ep_seq() - IPA end-point HPS/DPS sequencer type configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_cfg_ep_seq(u32 clnt_hdl, const struct ipa_ep_cfg_seq *seq_cfg)
+{
+ int type;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad param, clnt_hdl = %d", clnt_hdl);
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
+ IPAERR("SEQ does not apply to IPA consumer EP %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ /*
+ * Skip Configure sequencers type for test clients.
+ * These are configured dynamically in ipa3_cfg_ep_mode
+ */
+ if (IPA_CLIENT_IS_TEST(ipa3_ctx->ep[clnt_hdl].client)) {
+ IPADBG("Skip sequencers configuration for test clients\n");
+ return 0;
+ }
+
+ if (seq_cfg->set_dynamic)
+ type = seq_cfg->seq_type;
+ else
+ type = ipa3_ep_mapping[ipa3_get_hw_type_index()]
+ [ipa3_ctx->ep[clnt_hdl].client].sequencer_type;
+
+ if (type != IPA_DPS_HPS_SEQ_TYPE_INVALID) {
+ if (ipa3_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA &&
+ !IPA_DPS_HPS_SEQ_TYPE_IS_DMA(type)) {
+ IPAERR("Configuring non-DMA SEQ type to DMA pipe\n");
+ BUG();
+ }
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+ /* Configure sequencers type*/
+
+ IPADBG("set sequencers to sequence 0x%x, ep = %d\n", type,
+ clnt_hdl);
+ ipahal_write_reg_n(IPA_ENDP_INIT_SEQ_n, clnt_hdl, type);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+ } else {
+ IPADBG("should not set sequencer type of ep = %d\n", clnt_hdl);
+ }
+
+ return 0;
+}
+
+/**
+ * ipa3_cfg_ep - IPA end-point configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * This includes nat, header, mode, aggregation and route settings and is a one
+ * shot API to configure the IPA end-point fully
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg)
+{
+ int result = -EINVAL;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0 || ipa_ep_cfg == NULL) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ result = ipa3_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr);
+ if (result)
+ return result;
+
+ result = ipa3_cfg_ep_hdr_ext(clnt_hdl, &ipa_ep_cfg->hdr_ext);
+ if (result)
+ return result;
+
+ result = ipa3_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr);
+ if (result)
+ return result;
+
+ result = ipa3_cfg_ep_cfg(clnt_hdl, &ipa_ep_cfg->cfg);
+ if (result)
+ return result;
+
+ if (IPA_CLIENT_IS_PROD(ipa3_ctx->ep[clnt_hdl].client)) {
+ result = ipa3_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat);
+ if (result)
+ return result;
+
+ result = ipa3_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode);
+ if (result)
+ return result;
+
+ result = ipa3_cfg_ep_seq(clnt_hdl, &ipa_ep_cfg->seq);
+ if (result)
+ return result;
+
+ result = ipa3_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route);
+ if (result)
+ return result;
+
+ result = ipa3_cfg_ep_deaggr(clnt_hdl, &ipa_ep_cfg->deaggr);
+ if (result)
+ return result;
+ } else {
+ result = ipa3_cfg_ep_metadata_mask(clnt_hdl,
+ &ipa_ep_cfg->metadata_mask);
+ if (result)
+ return result;
+ }
+
+ return 0;
+}
+
+const char *ipa3_get_nat_en_str(enum ipa_nat_en_type nat_en)
+{
+ switch (nat_en) {
+ case (IPA_BYPASS_NAT):
+ return "NAT disabled";
+ case (IPA_SRC_NAT):
+ return "Source NAT";
+ case (IPA_DST_NAT):
+ return "Dst NAT";
+ }
+
+ return "undefined";
+}
+
+/**
+ * ipa3_cfg_ep_nat() - IPA end-point NAT configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat)
+{
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_nat == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl,
+ ipa3_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
+ IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ IPADBG("pipe=%d, nat_en=%d(%s)\n",
+ clnt_hdl,
+ ep_nat->nat_en,
+ ipa3_get_nat_en_str(ep_nat->nat_en));
+
+ /* copy over EP cfg */
+ ipa3_ctx->ep[clnt_hdl].cfg.nat = *ep_nat;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_NAT_n, clnt_hdl, ep_nat);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
+
+/**
+ * ipa3_cfg_ep_status() - IPA end-point status configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_cfg_ep_status(u32 clnt_hdl,
+ const struct ipahal_reg_ep_cfg_status *ep_status)
+{
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_status == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl,
+ ipa3_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+
+ IPADBG("pipe=%d, status_en=%d status_ep=%d status_location=%d\n",
+ clnt_hdl,
+ ep_status->status_en,
+ ep_status->status_ep,
+ ep_status->status_location);
+
+ /* copy over EP cfg */
+ ipa3_ctx->ep[clnt_hdl].status = *ep_status;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ ipahal_write_reg_n_fields(IPA_ENDP_STATUS_n, clnt_hdl, ep_status);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
+/**
+ * ipa3_cfg_ep_cfg() - IPA end-point cfg configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg)
+{
+ u8 qmb_master_sel;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0 || cfg == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl,
+ ipa3_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+
+ /* copy over EP cfg */
+ ipa3_ctx->ep[clnt_hdl].cfg.cfg = *cfg;
+
+ /* Override QMB master selection */
+ qmb_master_sel = ipa3_get_qmb_master_sel(ipa3_ctx->ep[clnt_hdl].client);
+ ipa3_ctx->ep[clnt_hdl].cfg.cfg.gen_qmb_master_sel = qmb_master_sel;
+ IPADBG(
+ "pipe=%d, frag_ofld_en=%d cs_ofld_en=%d mdata_hdr_ofst=%d gen_qmb_master_sel=%d\n",
+ clnt_hdl,
+ ipa3_ctx->ep[clnt_hdl].cfg.cfg.frag_offload_en,
+ ipa3_ctx->ep[clnt_hdl].cfg.cfg.cs_offload_en,
+ ipa3_ctx->ep[clnt_hdl].cfg.cfg.cs_metadata_hdr_offset,
+ ipa3_ctx->ep[clnt_hdl].cfg.cfg.gen_qmb_master_sel);
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_CFG_n, clnt_hdl,
+ &ipa3_ctx->ep[clnt_hdl].cfg.cfg);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
+/**
+ * ipa3_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_cfg_ep_metadata_mask(u32 clnt_hdl,
+ const struct ipa_ep_cfg_metadata_mask
+ *metadata_mask)
+{
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0 || metadata_mask == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl,
+ ipa3_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+
+ IPADBG("pipe=%d, metadata_mask=0x%x\n",
+ clnt_hdl,
+ metadata_mask->metadata_mask);
+
+ /* copy over EP cfg */
+ ipa3_ctx->ep[clnt_hdl].cfg.metadata_mask = *metadata_mask;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_METADATA_MASK_n,
+ clnt_hdl, metadata_mask);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
+/**
+ * ipa3_cfg_ep_hdr() - IPA end-point header configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr)
+{
+ struct ipa3_ep_context *ep;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_hdr == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+ IPADBG("pipe=%d metadata_reg_valid=%d\n",
+ clnt_hdl,
+ ep_hdr->hdr_metadata_reg_valid);
+
+ IPADBG("remove_additional=%d, a5_mux=%d, ofst_pkt_size=0x%x\n",
+ ep_hdr->hdr_remove_additional,
+ ep_hdr->hdr_a5_mux,
+ ep_hdr->hdr_ofst_pkt_size);
+
+ IPADBG("ofst_pkt_size_valid=%d, additional_const_len=0x%x\n",
+ ep_hdr->hdr_ofst_pkt_size_valid,
+ ep_hdr->hdr_additional_const_len);
+
+ IPADBG("ofst_metadata=0x%x, ofst_metadata_valid=%d, len=0x%x",
+ ep_hdr->hdr_ofst_metadata,
+ ep_hdr->hdr_ofst_metadata_valid,
+ ep_hdr->hdr_len);
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ /* copy over EP cfg */
+ ep->cfg.hdr = *ep_hdr;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_n, clnt_hdl, &ep->cfg.hdr);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
+/**
+ * ipa3_cfg_ep_hdr_ext() - IPA end-point extended header configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ep_hdr_ext: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_cfg_ep_hdr_ext(u32 clnt_hdl,
+ const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
+{
+ struct ipa3_ep_context *ep;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_hdr_ext == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+
+ IPADBG("pipe=%d hdr_pad_to_alignment=%d\n",
+ clnt_hdl,
+ ep_hdr_ext->hdr_pad_to_alignment);
+
+ IPADBG("hdr_total_len_or_pad_offset=%d\n",
+ ep_hdr_ext->hdr_total_len_or_pad_offset);
+
+ IPADBG("hdr_payload_len_inc_padding=%d hdr_total_len_or_pad=%d\n",
+ ep_hdr_ext->hdr_payload_len_inc_padding,
+ ep_hdr_ext->hdr_total_len_or_pad);
+
+ IPADBG("hdr_total_len_or_pad_valid=%d hdr_little_endian=%d\n",
+ ep_hdr_ext->hdr_total_len_or_pad_valid,
+ ep_hdr_ext->hdr_little_endian);
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ /* copy over EP cfg */
+ ep->cfg.hdr_ext = *ep_hdr_ext;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_EXT_n, clnt_hdl,
+ &ep->cfg.hdr_ext);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
+/**
+ * ipa3_cfg_ep_ctrl() - IPA end-point Control configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg_ctrl: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
+{
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || ep_ctrl == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ IPADBG("pipe=%d ep_suspend=%d, ep_delay=%d\n",
+ clnt_hdl,
+ ep_ctrl->ipa_ep_suspend,
+ ep_ctrl->ipa_ep_delay);
+
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n, clnt_hdl, ep_ctrl);
+
+ if (ep_ctrl->ipa_ep_suspend == true &&
+ IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client))
+ ipa3_suspend_active_aggr_wa(clnt_hdl);
+
+ return 0;
+}
+
+const char *ipa3_get_mode_type_str(enum ipa_mode_type mode)
+{
+ switch (mode) {
+ case (IPA_BASIC):
+ return "Basic";
+ case (IPA_ENABLE_FRAMING_HDLC):
+ return "HDLC framing";
+ case (IPA_ENABLE_DEFRAMING_HDLC):
+ return "HDLC de-framing";
+ case (IPA_DMA):
+ return "DMA";
+ }
+
+ return "undefined";
+}
+
+/**
+ * ipa3_cfg_ep_mode() - IPA end-point mode configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode)
+{
+ int ep;
+ int type;
+ struct ipahal_reg_endp_init_mode init_mode;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_mode == NULL) {
+ IPAERR("bad params clnt_hdl=%d , ep_valid=%d ep_mode=%p\n",
+ clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid,
+ ep_mode);
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
+ IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ ep = ipa3_get_ep_mapping(ep_mode->dst);
+ if (ep == -1 && ep_mode->mode == IPA_DMA) {
+ IPAERR("dst %d does not exist in DMA mode\n", ep_mode->dst);
+ return -EINVAL;
+ }
+
+ WARN_ON(ep_mode->mode == IPA_DMA && IPA_CLIENT_IS_PROD(ep_mode->dst));
+
+ if (!IPA_CLIENT_IS_CONS(ep_mode->dst))
+ ep = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+
+ IPADBG("pipe=%d mode=%d(%s), dst_client_number=%d",
+ clnt_hdl,
+ ep_mode->mode,
+ ipa3_get_mode_type_str(ep_mode->mode),
+ ep_mode->dst);
+
+ /* copy over EP cfg */
+ ipa3_ctx->ep[clnt_hdl].cfg.mode = *ep_mode;
+ ipa3_ctx->ep[clnt_hdl].dst_pipe_index = ep;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ init_mode.dst_pipe_number = ipa3_ctx->ep[clnt_hdl].dst_pipe_index;
+ init_mode.ep_mode = *ep_mode;
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_MODE_n, clnt_hdl, &init_mode);
+
+ /* Configure sequencers type for test clients*/
+ if (IPA_CLIENT_IS_TEST(ipa3_ctx->ep[clnt_hdl].client)) {
+ if (ep_mode->mode == IPA_DMA)
+ type = IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY;
+ else
+ type = IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP;
+
+ IPADBG(" set sequencers to sequance 0x%x, ep = %d\n", type,
+ clnt_hdl);
+ ipahal_write_reg_n(IPA_ENDP_INIT_SEQ_n, clnt_hdl, type);
+ }
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
+const char *ipa3_get_aggr_enable_str(enum ipa_aggr_en_type aggr_en)
+{
+ switch (aggr_en) {
+ case (IPA_BYPASS_AGGR):
+ return "no aggregation";
+ case (IPA_ENABLE_AGGR):
+ return "aggregation enabled";
+ case (IPA_ENABLE_DEAGGR):
+ return "de-aggregation enabled";
+ }
+
+ return "undefined";
+}
+
+const char *ipa3_get_aggr_type_str(enum ipa_aggr_type aggr_type)
+{
+ switch (aggr_type) {
+ case (IPA_MBIM_16):
+ return "MBIM_16";
+ case (IPA_HDLC):
+ return "HDLC";
+ case (IPA_TLP):
+ return "TLP";
+ case (IPA_RNDIS):
+ return "RNDIS";
+ case (IPA_GENERIC):
+ return "GENERIC";
+ case (IPA_QCMAP):
+ return "QCMAP";
+ }
+ return "undefined";
+}
+
+/**
+ * ipa3_cfg_ep_aggr() - IPA end-point aggregation configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr)
+{
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_aggr == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+
+ if (ep_aggr->aggr_en == IPA_ENABLE_DEAGGR &&
+ !IPA_EP_SUPPORTS_DEAGGR(clnt_hdl)) {
+ IPAERR("pipe=%d cannot be configured to DEAGGR\n", clnt_hdl);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ IPADBG("pipe=%d en=%d(%s), type=%d(%s), byte_limit=%d, time_limit=%d\n",
+ clnt_hdl,
+ ep_aggr->aggr_en,
+ ipa3_get_aggr_enable_str(ep_aggr->aggr_en),
+ ep_aggr->aggr,
+ ipa3_get_aggr_type_str(ep_aggr->aggr),
+ ep_aggr->aggr_byte_limit,
+ ep_aggr->aggr_time_limit);
+ IPADBG("hard_byte_limit_en=%d aggr_sw_eof_active=%d\n",
+ ep_aggr->aggr_hard_byte_limit_en,
+ ep_aggr->aggr_sw_eof_active);
+
+ /* copy over EP cfg */
+ ipa3_ctx->ep[clnt_hdl].cfg.aggr = *ep_aggr;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl, ep_aggr);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
+/**
+ * ipa3_cfg_ep_route() - IPA end-point routing configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route)
+{
+ struct ipahal_reg_endp_init_route init_rt;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_route == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
+ IPAERR("ROUTE does not apply to IPA out EP %d\n",
+ clnt_hdl);
+ return -EINVAL;
+ }
+
+ /*
+ * if DMA mode was configured previously for this EP, return with
+ * success
+ */
+ if (ipa3_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) {
+ IPADBG("DMA enabled for ep %d, dst pipe is part of DMA\n",
+ clnt_hdl);
+ return 0;
+ }
+
+ if (ep_route->rt_tbl_hdl)
+ IPAERR("client specified non-zero RT TBL hdl - ignore it\n");
+
+ IPADBG("pipe=%d, rt_tbl_hdl=%d\n",
+ clnt_hdl,
+ ep_route->rt_tbl_hdl);
+
+ /* always use "default" routing table when programming EP ROUTE reg */
+ ipa3_ctx->ep[clnt_hdl].rt_tbl_idx =
+ IPA_MEM_PART(v4_apps_rt_index_lo);
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ init_rt.route_table_index = ipa3_ctx->ep[clnt_hdl].rt_tbl_idx;
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_ROUTE_n, clnt_hdl, &init_rt);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
+/**
+ * ipa3_cfg_ep_holb() - IPA end-point holb configuration
+ *
+ * If an IPA producer pipe is full, IPA HW by default will block
+ * indefinitely till space opens up. During this time no packets
+ * including those from unrelated pipes will be processed. Enabling
+ * HOLB means IPA HW will be allowed to drop packets as/when needed
+ * and indefinite blocking is avoided.
+ *
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb)
+{
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_holb == NULL ||
+ ep_holb->tmr_val > ipa3_ctx->ctrl->max_holb_tmr_val ||
+ ep_holb->en > 1) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_PROD(ipa3_ctx->ep[clnt_hdl].client)) {
+ IPAERR("HOLB does not apply to IPA in EP %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ ipa3_ctx->ep[clnt_hdl].holb = *ep_holb;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n, clnt_hdl,
+ ep_holb);
+
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, clnt_hdl,
+ ep_holb);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ IPADBG("cfg holb %u ep=%d tmr=%d\n", ep_holb->en, clnt_hdl,
+ ep_holb->tmr_val);
+
+ return 0;
+}
+
+/**
+ * ipa3_cfg_ep_holb_by_client() - IPA end-point holb configuration
+ *
+ * Wrapper function for ipa3_cfg_ep_holb() with client name instead of
+ * client handle. This function is used for clients that does not have
+ * client handle.
+ *
+ * @client: [in] client name
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_cfg_ep_holb_by_client(enum ipa_client_type client,
+ const struct ipa_ep_cfg_holb *ep_holb)
+{
+ return ipa3_cfg_ep_holb(ipa3_get_ep_mapping(client), ep_holb);
+}
+
+/**
+ * ipa3_cfg_ep_deaggr() - IPA end-point deaggregation configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ep_deaggr: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_cfg_ep_deaggr(u32 clnt_hdl,
+ const struct ipa_ep_cfg_deaggr *ep_deaggr)
+{
+ struct ipa3_ep_context *ep;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_deaggr == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+
+ IPADBG("pipe=%d deaggr_hdr_len=%d\n",
+ clnt_hdl,
+ ep_deaggr->deaggr_hdr_len);
+
+ IPADBG("packet_offset_valid=%d\n",
+ ep_deaggr->packet_offset_valid);
+
+ IPADBG("packet_offset_location=%d max_packet_len=%d\n",
+ ep_deaggr->packet_offset_location,
+ ep_deaggr->max_packet_len);
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ /* copy over EP cfg */
+ ep->cfg.deaggr = *ep_deaggr;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_DEAGGR_n, clnt_hdl,
+ &ep->cfg.deaggr);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
+/**
+ * ipa3_cfg_ep_metadata() - IPA end-point metadata configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md)
+{
+ u32 qmap_id = 0;
+ struct ipa_ep_cfg_metadata ep_md_reg_wrt;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_md == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+
+ IPADBG("pipe=%d, mux id=%d\n", clnt_hdl, ep_md->qmap_id);
+
+ /* copy over EP cfg */
+ ipa3_ctx->ep[clnt_hdl].cfg.meta = *ep_md;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ ep_md_reg_wrt = *ep_md;
+ qmap_id = (ep_md->qmap_id <<
+ IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT) &
+ IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK;
+
+ ep_md_reg_wrt.qmap_id = qmap_id;
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_METADATA_n, clnt_hdl,
+ &ep_md_reg_wrt);
+ ipa3_ctx->ep[clnt_hdl].cfg.hdr.hdr_metadata_reg_valid = 1;
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_n, clnt_hdl,
+ &ipa3_ctx->ep[clnt_hdl].cfg.hdr);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
+int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
+{
+ struct ipa_ep_cfg_metadata meta;
+ struct ipa3_ep_context *ep;
+ int ipa_ep_idx;
+ int result = -EINVAL;
+
+ if (param_in->client >= IPA_CLIENT_MAX) {
+ IPAERR("bad parm client:%d\n", param_in->client);
+ goto fail;
+ }
+
+ ipa_ep_idx = ipa3_get_ep_mapping(param_in->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ goto fail;
+ }
+
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+ if (!ep->valid) {
+ IPAERR("EP not allocated.\n");
+ goto fail;
+ }
+
+ meta.qmap_id = param_in->qmap_id;
+ if (param_in->client == IPA_CLIENT_USB_PROD ||
+ param_in->client == IPA_CLIENT_HSIC1_PROD ||
+ param_in->client == IPA_CLIENT_ODU_PROD) {
+ result = ipa3_cfg_ep_metadata(ipa_ep_idx, &meta);
+ } else if (param_in->client == IPA_CLIENT_WLAN1_PROD) {
+ ipa3_ctx->ep[ipa_ep_idx].cfg.meta = meta;
+ result = ipa3_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id);
+ if (result)
+ IPAERR("qmap_id %d write failed on ep=%d\n",
+ meta.qmap_id, ipa_ep_idx);
+ result = 0;
+ }
+
+fail:
+ return result;
+}
+
+/**
+ * ipa3_dump_buff_internal() - dumps buffer for debug purposes
+ * @base: buffer base address
+ * @phy_base: buffer physical base address
+ * @size: size of the buffer
+ */
+void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size)
+{
+ int i;
+ u32 *cur = (u32 *)base;
+ u8 *byt;
+
+ IPADBG("system phys addr=%pa len=%u\n", &phy_base, size);
+ for (i = 0; i < size / 4; i++) {
+ byt = (u8 *)(cur + i);
+ IPADBG("%2d %08x %02x %02x %02x %02x\n", i, *(cur + i),
+ byt[0], byt[1], byt[2], byt[3]);
+ }
+ IPADBG("END\n");
+}
+
+/**
+ * ipa3_pipe_mem_init() - initialize the pipe memory
+ * @start_ofst: start offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ * -ENOMEM: no memory
+ */
+int ipa3_pipe_mem_init(u32 start_ofst, u32 size)
+{
+ int res;
+ u32 aligned_start_ofst;
+ u32 aligned_size;
+ struct gen_pool *pool;
+
+ if (!size) {
+ IPAERR("no IPA pipe memory allocated\n");
+ goto fail;
+ }
+
+ aligned_start_ofst = IPA_PIPE_MEM_START_OFST_ALIGNMENT(start_ofst);
+ aligned_size = size - (aligned_start_ofst - start_ofst);
+
+ IPADBG("start_ofst=%u aligned_start_ofst=%u size=%u aligned_size=%u\n",
+ start_ofst, aligned_start_ofst, size, aligned_size);
+
+ /* allocation order of 8 i.e. 128 bytes, global pool */
+ pool = gen_pool_create(8, -1);
+ if (!pool) {
+ IPAERR("Failed to create a new memory pool.\n");
+ goto fail;
+ }
+
+ res = gen_pool_add(pool, aligned_start_ofst, aligned_size, -1);
+ if (res) {
+ IPAERR("Failed to add memory to IPA pipe pool\n");
+ goto err_pool_add;
+ }
+
+ ipa3_ctx->pipe_mem_pool = pool;
+ return 0;
+
+err_pool_add:
+ gen_pool_destroy(pool);
+fail:
+ return -ENOMEM;
+}
+
+/**
+ * ipa3_pipe_mem_alloc() - allocate pipe memory
+ * @ofst: offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa3_pipe_mem_alloc(u32 *ofst, u32 size)
+{
+ u32 vaddr;
+ int res = -1;
+
+ if (!ipa3_ctx->pipe_mem_pool || !size) {
+ IPAERR("failed size=%u pipe_mem_pool=%p\n", size,
+ ipa3_ctx->pipe_mem_pool);
+ return res;
+ }
+
+ vaddr = gen_pool_alloc(ipa3_ctx->pipe_mem_pool, size);
+
+ if (vaddr) {
+ *ofst = vaddr;
+ res = 0;
+ IPADBG("size=%u ofst=%u\n", size, vaddr);
+ } else {
+ IPAERR("size=%u failed\n", size);
+ }
+
+ return res;
+}
+
+/**
+ * ipa3_pipe_mem_free() - free pipe memory
+ * @ofst: offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa3_pipe_mem_free(u32 ofst, u32 size)
+{
+ IPADBG("size=%u ofst=%u\n", size, ofst);
+ if (ipa3_ctx->pipe_mem_pool && size)
+ gen_pool_free(ipa3_ctx->pipe_mem_pool, ofst, size);
+ return 0;
+}
+
+/**
+ * ipa3_set_aggr_mode() - Set the aggregation mode which is a global setting
+ * @mode: [in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
+ * etc
+ *
+ * Returns: 0 on success
+ */
+int ipa3_set_aggr_mode(enum ipa_aggr_mode mode)
+{
+ struct ipahal_reg_qcncm qcncm;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ ipahal_read_reg_fields(IPA_QCNCM, &qcncm);
+ qcncm.mode_en = mode;
+ ipahal_write_reg_fields(IPA_QCNCM, &qcncm);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+/**
+ * ipa3_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
+ * mode
+ * @sig: [in] the first 3 bytes of QCNCM NDP signature (expected to be
+ * "QND")
+ *
+ * Set the NDP signature used for QCNCM aggregation mode. The fourth byte
+ * (expected to be 'P') needs to be set using the header addition mechanism
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_set_qcncm_ndp_sig(char sig[3])
+{
+ struct ipahal_reg_qcncm qcncm;
+
+ if (sig == NULL) {
+ IPAERR("bad argument for ipa3_set_qcncm_ndp_sig/n");
+ return -EINVAL;
+ }
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ ipahal_read_reg_fields(IPA_QCNCM, &qcncm);
+ qcncm.mode_val = ((sig[0] << 16) | (sig[1] << 8) | sig[2]);
+ ipahal_write_reg_fields(IPA_QCNCM, &qcncm);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+/**
+ * ipa3_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
+ * configuration
+ * @enable: [in] true for single NDP/MBIM; false otherwise
+ *
+ * Returns: 0 on success
+ */
+int ipa3_set_single_ndp_per_mbim(bool enable)
+{
+ struct ipahal_reg_single_ndp_mode mode;
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ ipahal_read_reg_fields(IPA_SINGLE_NDP_MODE, &mode);
+ mode.single_ndp_en = enable;
+ ipahal_write_reg_fields(IPA_SINGLE_NDP_MODE, &mode);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+/**
+ * ipa3_straddle_boundary() - Checks whether a memory buffer straddles a
+ * boundary
+ * @start: start address of the memory buffer
+ * @end: end address of the memory buffer
+ * @boundary: boundary
+ *
+ * Return value:
+ * 1: if the interval [start, end] straddles boundary
+ * 0: otherwise
+ */
+int ipa3_straddle_boundary(u32 start, u32 end, u32 boundary)
+{
+ u32 next_start;
+ u32 prev_end;
+
+ IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary);
+
+ next_start = (start + (boundary - 1)) & ~(boundary - 1);
+ prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary;
+
+ while (next_start < prev_end)
+ next_start += boundary;
+
+ if (next_start == prev_end)
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * ipa3_bam_reg_dump() - Dump selected BAM registers for IPA.
+ * The API is right now used only to dump IPA registers towards USB.
+ *
+ * Function is rate limited to avoid flooding kernel log buffer
+ */
+void ipa3_bam_reg_dump(void)
+{
+ static DEFINE_RATELIMIT_STATE(_rs, 500*HZ, 1);
+
+ if (__ratelimit(&_rs)) {
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ pr_err("IPA BAM START\n");
+ sps_get_bam_debug_info(ipa3_ctx->bam_handle, 93,
+ (SPS_BAM_PIPE(ipa3_get_ep_mapping(IPA_CLIENT_USB_CONS))
+ |
+ SPS_BAM_PIPE(ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD))),
+ 0, 2);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ }
+}
+
+/**
+ * ipa3_init_mem_partition() - Reads IPA memory map from DTS, performs alignment
+ * checks and logs the fetched values.
+ *
+ * Returns: 0 on success
+ */
+int ipa3_init_mem_partition(struct device_node *node)
+{
+ int result;
+
+ IPADBG("Reading from DTS as u32 array\n");
+ result = of_property_read_u32_array(node,
+ "qcom,ipa-ram-mmap", (u32 *)&ipa3_ctx->ctrl->mem_partition,
+ sizeof(ipa3_ctx->ctrl->mem_partition) / sizeof(u32));
+
+ if (result) {
+ IPAERR("Read operation failed\n");
+ return -ENODEV;
+ }
+
+ IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
+ IPA_MEM_PART(nat_size));
+
+ if (IPA_MEM_PART(uc_info_ofst) & 3) {
+ IPAERR("UC INFO OFST 0x%x is unaligned\n",
+ IPA_MEM_PART(uc_info_ofst));
+ return -ENODEV;
+ }
+
+ IPADBG("UC INFO OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(uc_info_ofst), IPA_MEM_PART(uc_info_size));
+
+ IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start));
+
+ if (IPA_MEM_PART(v4_flt_hash_ofst) & 7) {
+ IPAERR("V4 FLT HASHABLE OFST 0x%x is unaligned\n",
+ IPA_MEM_PART(v4_flt_hash_ofst));
+ return -ENODEV;
+ }
+
+ IPADBG("V4 FLT HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+ IPA_MEM_PART(v4_flt_hash_ofst),
+ IPA_MEM_PART(v4_flt_hash_size),
+ IPA_MEM_PART(v4_flt_hash_size_ddr));
+
+ if (IPA_MEM_PART(v4_flt_nhash_ofst) & 7) {
+ IPAERR("V4 FLT NON-HASHABLE OFST 0x%x is unaligned\n",
+ IPA_MEM_PART(v4_flt_nhash_ofst));
+ return -ENODEV;
+ }
+
+ IPADBG("V4 FLT NON-HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+ IPA_MEM_PART(v4_flt_nhash_ofst),
+ IPA_MEM_PART(v4_flt_nhash_size),
+ IPA_MEM_PART(v4_flt_nhash_size_ddr));
+
+ if (IPA_MEM_PART(v6_flt_hash_ofst) & 7) {
+ IPAERR("V6 FLT HASHABLE OFST 0x%x is unaligned\n",
+ IPA_MEM_PART(v6_flt_hash_ofst));
+ return -ENODEV;
+ }
+
+ IPADBG("V6 FLT HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+ IPA_MEM_PART(v6_flt_hash_ofst), IPA_MEM_PART(v6_flt_hash_size),
+ IPA_MEM_PART(v6_flt_hash_size_ddr));
+
+ if (IPA_MEM_PART(v6_flt_nhash_ofst) & 7) {
+ IPAERR("V6 FLT NON-HASHABLE OFST 0x%x is unaligned\n",
+ IPA_MEM_PART(v6_flt_nhash_ofst));
+ return -ENODEV;
+ }
+
+ IPADBG("V6 FLT NON-HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+ IPA_MEM_PART(v6_flt_nhash_ofst),
+ IPA_MEM_PART(v6_flt_nhash_size),
+ IPA_MEM_PART(v6_flt_nhash_size_ddr));
+
+ IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_rt_num_index));
+
+ IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n",
+ IPA_MEM_PART(v4_modem_rt_index_lo),
+ IPA_MEM_PART(v4_modem_rt_index_hi));
+
+ IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n",
+ IPA_MEM_PART(v4_apps_rt_index_lo),
+ IPA_MEM_PART(v4_apps_rt_index_hi));
+
+ if (IPA_MEM_PART(v4_rt_hash_ofst) & 7) {
+ IPAERR("V4 RT HASHABLE OFST 0x%x is unaligned\n",
+ IPA_MEM_PART(v4_rt_hash_ofst));
+ return -ENODEV;
+ }
+
+ IPADBG("V4 RT HASHABLE OFST 0x%x\n", IPA_MEM_PART(v4_rt_hash_ofst));
+
+ IPADBG("V4 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
+ IPA_MEM_PART(v4_rt_hash_size),
+ IPA_MEM_PART(v4_rt_hash_size_ddr));
+
+ if (IPA_MEM_PART(v4_rt_nhash_ofst) & 7) {
+ IPAERR("V4 RT NON-HASHABLE OFST 0x%x is unaligned\n",
+ IPA_MEM_PART(v4_rt_nhash_ofst));
+ return -ENODEV;
+ }
+
+ IPADBG("V4 RT NON-HASHABLE OFST 0x%x\n",
+ IPA_MEM_PART(v4_rt_nhash_ofst));
+
+ IPADBG("V4 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
+ IPA_MEM_PART(v4_rt_nhash_size),
+ IPA_MEM_PART(v4_rt_nhash_size_ddr));
+
+ IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_rt_num_index));
+
+ IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n",
+ IPA_MEM_PART(v6_modem_rt_index_lo),
+ IPA_MEM_PART(v6_modem_rt_index_hi));
+
+ IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n",
+ IPA_MEM_PART(v6_apps_rt_index_lo),
+ IPA_MEM_PART(v6_apps_rt_index_hi));
+
+ if (IPA_MEM_PART(v6_rt_hash_ofst) & 7) {
+ IPAERR("V6 RT HASHABLE OFST 0x%x is unaligned\n",
+ IPA_MEM_PART(v6_rt_hash_ofst));
+ return -ENODEV;
+ }
+
+ IPADBG("V6 RT HASHABLE OFST 0x%x\n", IPA_MEM_PART(v6_rt_hash_ofst));
+
+ IPADBG("V6 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
+ IPA_MEM_PART(v6_rt_hash_size),
+ IPA_MEM_PART(v6_rt_hash_size_ddr));
+
+ if (IPA_MEM_PART(v6_rt_nhash_ofst) & 7) {
+ IPAERR("V6 RT NON-HASHABLE OFST 0x%x is unaligned\n",
+ IPA_MEM_PART(v6_rt_nhash_ofst));
+ return -ENODEV;
+ }
+
+ IPADBG("V6 RT NON-HASHABLE OFST 0x%x\n",
+ IPA_MEM_PART(v6_rt_nhash_ofst));
+
+ IPADBG("V6 RT NON-HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
+ IPA_MEM_PART(v6_rt_nhash_size),
+ IPA_MEM_PART(v6_rt_nhash_size_ddr));
+
+ if (IPA_MEM_PART(modem_hdr_ofst) & 7) {
+ IPAERR("MODEM HDR OFST 0x%x is unaligned\n",
+ IPA_MEM_PART(modem_hdr_ofst));
+ return -ENODEV;
+ }
+
+ IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size));
+
+ if (IPA_MEM_PART(apps_hdr_ofst) & 7) {
+ IPAERR("APPS HDR OFST 0x%x is unaligned\n",
+ IPA_MEM_PART(apps_hdr_ofst));
+ return -ENODEV;
+ }
+
+ IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+ IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size),
+ IPA_MEM_PART(apps_hdr_size_ddr));
+
+ if (IPA_MEM_PART(modem_hdr_proc_ctx_ofst) & 7) {
+ IPAERR("MODEM HDR PROC CTX OFST 0x%x is unaligned\n",
+ IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
+ return -ENODEV;
+ }
+
+ IPADBG("MODEM HDR PROC CTX OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(modem_hdr_proc_ctx_ofst),
+ IPA_MEM_PART(modem_hdr_proc_ctx_size));
+
+ if (IPA_MEM_PART(apps_hdr_proc_ctx_ofst) & 7) {
+ IPAERR("APPS HDR PROC CTX OFST 0x%x is unaligned\n",
+ IPA_MEM_PART(apps_hdr_proc_ctx_ofst));
+ return -ENODEV;
+ }
+
+ IPADBG("APPS HDR PROC CTX OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+ IPA_MEM_PART(apps_hdr_proc_ctx_ofst),
+ IPA_MEM_PART(apps_hdr_proc_ctx_size),
+ IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr));
+
+ if (IPA_MEM_PART(modem_ofst) & 7) {
+ IPAERR("MODEM OFST 0x%x is unaligned\n",
+ IPA_MEM_PART(modem_ofst));
+ return -ENODEV;
+ }
+
+ IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst),
+ IPA_MEM_PART(modem_size));
+
+ IPADBG("V4 APPS HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(apps_v4_flt_hash_ofst),
+ IPA_MEM_PART(apps_v4_flt_hash_size));
+
+ IPADBG("V4 APPS NON-HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(apps_v4_flt_nhash_ofst),
+ IPA_MEM_PART(apps_v4_flt_nhash_size));
+
+ IPADBG("V6 APPS HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(apps_v6_flt_hash_ofst),
+ IPA_MEM_PART(apps_v6_flt_hash_size));
+
+ IPADBG("V6 APPS NON-HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(apps_v6_flt_nhash_ofst),
+ IPA_MEM_PART(apps_v6_flt_nhash_size));
+
+ IPADBG("RAM END OFST 0x%x\n",
+ IPA_MEM_PART(end_ofst));
+
+ IPADBG("V4 APPS HASHABLE RT OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(apps_v4_rt_hash_ofst),
+ IPA_MEM_PART(apps_v4_rt_hash_size));
+
+ IPADBG("V4 APPS NON-HASHABLE RT OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(apps_v4_rt_nhash_ofst),
+ IPA_MEM_PART(apps_v4_rt_nhash_size));
+
+ IPADBG("V6 APPS HASHABLE RT OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(apps_v6_rt_hash_ofst),
+ IPA_MEM_PART(apps_v6_rt_hash_size));
+
+ IPADBG("V6 APPS NON-HASHABLE RT OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(apps_v6_rt_nhash_ofst),
+ IPA_MEM_PART(apps_v6_rt_nhash_size));
+
+ return 0;
+}
+
+/**
+ * ipa_ctrl_static_bind() - set the appropriate methods for
+ * IPA Driver based on the HW version
+ *
+ * @ctrl: data structure which holds the function pointers
+ * @hw_type: the HW type in use
+ *
+ * This function can avoid the runtime assignment by using C99 special
+ * struct initialization - hard decision... time.vs.mem
+ */
+int ipa3_controller_static_bind(struct ipa3_controller *ctrl,
+ enum ipa_hw_type hw_type)
+{
+ ctrl->ipa_init_rt4 = _ipa_init_rt4_v3;
+ ctrl->ipa_init_rt6 = _ipa_init_rt6_v3;
+ ctrl->ipa_init_flt4 = _ipa_init_flt4_v3;
+ ctrl->ipa_init_flt6 = _ipa_init_flt6_v3;
+ ctrl->ipa_clk_rate_turbo = IPA_V3_0_CLK_RATE_TURBO;
+ ctrl->ipa_clk_rate_nominal = IPA_V3_0_CLK_RATE_NOMINAL;
+ ctrl->ipa_clk_rate_svs = IPA_V3_0_CLK_RATE_SVS;
+ ctrl->ipa3_read_ep_reg = _ipa_read_ep_reg_v3_0;
+ ctrl->ipa3_commit_flt = __ipa_commit_flt_v3;
+ ctrl->ipa3_commit_rt = __ipa_commit_rt_v3;
+ ctrl->ipa3_commit_hdr = __ipa_commit_hdr_v3_0;
+ ctrl->ipa3_enable_clks = _ipa_enable_clks_v3_0;
+ ctrl->ipa3_disable_clks = _ipa_disable_clks_v3_0;
+ ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v3_0;
+ ctrl->clock_scaling_bw_threshold_nominal =
+ IPA_V3_0_BW_THRESHOLD_NOMINAL_MBPS;
+ ctrl->clock_scaling_bw_threshold_turbo =
+ IPA_V3_0_BW_THRESHOLD_TURBO_MBPS;
+ ctrl->ipa_reg_base_ofst = ipahal_get_reg_base();
+ ctrl->ipa_init_sram = _ipa_init_sram_v3_0;
+ ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v3_0;
+
+ ctrl->ipa_init_hdr = _ipa_init_hdr_v3_0;
+
+ return 0;
+}
+
+void ipa3_skb_recycle(struct sk_buff *skb)
+{
+ struct skb_shared_info *shinfo;
+
+ shinfo = skb_shinfo(skb);
+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+ atomic_set(&shinfo->dataref, 1);
+
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ skb->data = skb->head + NET_SKB_PAD;
+ skb_reset_tail_pointer(skb);
+}
+
+int ipa3_alloc_rule_id(struct idr *rule_ids)
+{
+ /* There is two groups of rule-Ids, Modem ones and Apps ones.
+ * Distinction by high bit: Modem Ids are high bit asserted.
+ */
+ return idr_alloc(rule_ids, NULL,
+ ipahal_get_low_rule_id(), ipahal_get_rule_id_hi_bit(),
+ GFP_KERNEL);
+}
+
+int ipa3_id_alloc(void *ptr)
+{
+ int id;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock(&ipa3_ctx->idr_lock);
+ id = idr_alloc(&ipa3_ctx->ipa_idr, ptr, 0, 0, GFP_NOWAIT);
+ spin_unlock(&ipa3_ctx->idr_lock);
+ idr_preload_end();
+
+ return id;
+}
+
+void *ipa3_id_find(u32 id)
+{
+ void *ptr;
+
+ spin_lock(&ipa3_ctx->idr_lock);
+ ptr = idr_find(&ipa3_ctx->ipa_idr, id);
+ spin_unlock(&ipa3_ctx->idr_lock);
+
+ return ptr;
+}
+
+void ipa3_id_remove(u32 id)
+{
+ spin_lock(&ipa3_ctx->idr_lock);
+ idr_remove(&ipa3_ctx->ipa_idr, id);
+ spin_unlock(&ipa3_ctx->idr_lock);
+}
+
+void ipa3_tag_destroy_imm(void *user1, int user2)
+{
+ ipahal_destroy_imm_cmd(user1);
+}
+
+static void ipa3_tag_free_skb(void *user1, int user2)
+{
+ dev_kfree_skb_any((struct sk_buff *)user1);
+}
+
+#define REQUIRED_TAG_PROCESS_DESCRIPTORS 4
+
+/* ipa3_tag_process() - Initiates a tag process. Incorporates the input
+ * descriptors
+ *
+ * @desc: descriptors with commands for IC
+ * @desc_size: amount of descriptors in the above variable
+ *
+ * Note: The descriptors are copied (if there's room), the client needs to
+ * free his descriptors afterwards
+ *
+ * Return: 0 or negative in case of failure
+ */
+int ipa3_tag_process(struct ipa3_desc desc[],
+ int descs_num,
+ unsigned long timeout)
+{
+ struct ipa3_sys_context *sys;
+ struct ipa3_desc *tag_desc;
+ int desc_idx = 0;
+ struct ipahal_imm_cmd_ip_packet_init pktinit_cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+ struct ipahal_imm_cmd_ip_packet_tag_status status;
+ int i;
+ struct sk_buff *dummy_skb;
+ int res;
+ struct ipa3_tag_completion *comp;
+ int ep_idx;
+
+ /* Not enough room for the required descriptors for the tag process */
+ if (IPA_TAG_MAX_DESC - descs_num < REQUIRED_TAG_PROCESS_DESCRIPTORS) {
+ IPAERR("up to %d descriptors are allowed (received %d)\n",
+ IPA_TAG_MAX_DESC - REQUIRED_TAG_PROCESS_DESCRIPTORS,
+ descs_num);
+ return -ENOMEM;
+ }
+
+ ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
+ if (-1 == ep_idx) {
+ IPAERR("Client %u is not mapped\n",
+ IPA_CLIENT_APPS_CMD_PROD);
+ return -EFAULT;
+ }
+ sys = ipa3_ctx->ep[ep_idx].sys;
+
+ tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, GFP_KERNEL);
+ if (!tag_desc) {
+ IPAERR("failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ /* Copy the required descriptors from the client now */
+ if (desc) {
+ memcpy(&(tag_desc[0]), desc, descs_num *
+ sizeof(tag_desc[0]));
+ desc_idx += descs_num;
+ }
+
+ /* NO-OP IC for ensuring that IPA pipeline is empty */
+ cmd_pyld = ipahal_construct_nop_imm_cmd(
+ false, IPAHAL_FULL_PIPELINE_CLEAR, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct NOP imm cmd\n");
+ res = -ENOMEM;
+ goto fail_free_tag_desc;
+ }
+ tag_desc[desc_idx].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+ tag_desc[desc_idx].pyld = cmd_pyld->data;
+ tag_desc[desc_idx].len = cmd_pyld->len;
+ tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
+ tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
+ tag_desc[desc_idx].user1 = cmd_pyld;
+ desc_idx++;
+
+ /* IP_PACKET_INIT IC for tag status to be sent to apps */
+ pktinit_cmd.destination_pipe_index =
+ ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_PACKET_INIT, &pktinit_cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct ip_packet_init imm cmd\n");
+ res = -ENOMEM;
+ goto fail_free_desc;
+ }
+ tag_desc[desc_idx].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_INIT);
+ tag_desc[desc_idx].pyld = cmd_pyld->data;
+ tag_desc[desc_idx].len = cmd_pyld->len;
+ tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
+ tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
+ tag_desc[desc_idx].user1 = cmd_pyld;
+ desc_idx++;
+
+ /* status IC */
+ status.tag = IPA_COOKIE;
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_PACKET_TAG_STATUS, &status, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct ip_packet_tag_status imm cmd\n");
+ res = -ENOMEM;
+ goto fail_free_desc;
+ }
+ tag_desc[desc_idx].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
+ tag_desc[desc_idx].pyld = cmd_pyld->data;
+ tag_desc[desc_idx].len = cmd_pyld->len;
+ tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
+ tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
+ tag_desc[desc_idx].user1 = cmd_pyld;
+ desc_idx++;
+
+ comp = kzalloc(sizeof(*comp), GFP_KERNEL);
+ if (!comp) {
+ IPAERR("no mem\n");
+ res = -ENOMEM;
+ goto fail_free_desc;
+ }
+ init_completion(&comp->comp);
+
+ /* completion needs to be released from both here and rx handler */
+ atomic_set(&comp->cnt, 2);
+
+ /* dummy packet to send to IPA. packet payload is a completion object */
+ dummy_skb = alloc_skb(sizeof(comp), GFP_KERNEL);
+ if (!dummy_skb) {
+ IPAERR("failed to allocate memory\n");
+ res = -ENOMEM;
+ goto fail_free_comp;
+ }
+
+ memcpy(skb_put(dummy_skb, sizeof(comp)), &comp, sizeof(comp));
+
+ tag_desc[desc_idx].pyld = dummy_skb->data;
+ tag_desc[desc_idx].len = dummy_skb->len;
+ tag_desc[desc_idx].type = IPA_DATA_DESC_SKB;
+ tag_desc[desc_idx].callback = ipa3_tag_free_skb;
+ tag_desc[desc_idx].user1 = dummy_skb;
+ desc_idx++;
+
+ /* send all descriptors to IPA with single EOT */
+ res = ipa3_send(sys, desc_idx, tag_desc, true);
+ if (res) {
+ IPAERR("failed to send TAG packets %d\n", res);
+ res = -ENOMEM;
+ goto fail_free_comp;
+ }
+ kfree(tag_desc);
+ tag_desc = NULL;
+
+ IPADBG("waiting for TAG response\n");
+ res = wait_for_completion_timeout(&comp->comp, timeout);
+ if (res == 0) {
+ IPAERR("timeout (%lu msec) on waiting for TAG response\n",
+ timeout);
+ WARN_ON(1);
+ if (atomic_dec_return(&comp->cnt) == 0)
+ kfree(comp);
+ return -ETIME;
+ }
+
+ IPADBG("TAG response arrived!\n");
+ if (atomic_dec_return(&comp->cnt) == 0)
+ kfree(comp);
+
+ /* sleep for short period to ensure IPA wrote all packets to BAM */
+ usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC);
+
+ return 0;
+
+fail_free_comp:
+ kfree(comp);
+fail_free_desc:
+ /*
+ * Free only the first descriptors allocated here.
+ * [nop, pkt_init, status, dummy_skb]
+ * The user is responsible to free his allocations
+ * in case of failure.
+ * The min is required because we may fail during
+ * of the initial allocations above
+ */
+ for (i = descs_num;
+ i < min(REQUIRED_TAG_PROCESS_DESCRIPTORS, desc_idx); i++)
+ if (tag_desc[i].callback)
+ tag_desc[i].callback(tag_desc[i].user1,
+ tag_desc[i].user2);
+fail_free_tag_desc:
+ kfree(tag_desc);
+ return res;
+}
+
+/**
+ * ipa3_tag_generate_force_close_desc() - generate descriptors for force close
+ * immediate command
+ *
+ * @desc: descriptors for IC
+ * @desc_size: desc array size
+ * @start_pipe: first pipe to close aggregation
+ * @end_pipe: last (non-inclusive) pipe to close aggregation
+ *
+ * Return: number of descriptors written or negative in case of failure
+ */
+static int ipa3_tag_generate_force_close_desc(struct ipa3_desc desc[],
+ int desc_size, int start_pipe, int end_pipe)
+{
+ int i;
+ struct ipa_ep_cfg_aggr ep_aggr;
+ int desc_idx = 0;
+ int res;
+ struct ipahal_imm_cmd_register_write reg_write_agg_close;
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+ struct ipahal_reg_valmask valmask;
+
+ for (i = start_pipe; i < end_pipe; i++) {
+ ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, i, &ep_aggr);
+ if (!ep_aggr.aggr_en)
+ continue;
+ IPADBG("Force close ep: %d\n", i);
+ if (desc_idx + 1 > desc_size) {
+ IPAERR("Internal error - no descriptors\n");
+ res = -EFAULT;
+ goto fail_no_desc;
+ }
+
+ reg_write_agg_close.skip_pipeline_clear = false;
+ reg_write_agg_close.pipeline_clear_options =
+ IPAHAL_FULL_PIPELINE_CLEAR;
+ reg_write_agg_close.offset =
+ ipahal_get_reg_ofst(IPA_AGGR_FORCE_CLOSE);
+ ipahal_get_aggr_force_close_valmask(1<<i, &valmask);
+ reg_write_agg_close.value = valmask.val;
+ reg_write_agg_close.value_mask = valmask.mask;
+ cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+ ®_write_agg_close, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct register_write imm cmd\n");
+ res = -ENOMEM;
+ goto fail_alloc_reg_write_agg_close;
+ }
+
+ desc[desc_idx].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+ desc[desc_idx].pyld = cmd_pyld->data;
+ desc[desc_idx].len = cmd_pyld->len;
+ desc[desc_idx].type = IPA_IMM_CMD_DESC;
+ desc[desc_idx].callback = ipa3_tag_destroy_imm;
+ desc[desc_idx].user1 = cmd_pyld;
+ desc_idx++;
+ }
+
+ return desc_idx;
+
+fail_alloc_reg_write_agg_close:
+ for (i = 0; i < desc_idx; i++)
+ if (desc[desc_idx].callback)
+ desc[desc_idx].callback(desc[desc_idx].user1,
+ desc[desc_idx].user2);
+fail_no_desc:
+ return res;
+}
+
+/**
+ * ipa3_tag_aggr_force_close() - Force close aggregation
+ *
+ * @pipe_num: pipe number or -1 for all pipes
+ */
+int ipa3_tag_aggr_force_close(int pipe_num)
+{
+ struct ipa3_desc *desc;
+ int res = -1;
+ int start_pipe;
+ int end_pipe;
+ int num_descs;
+ int num_aggr_descs;
+
+ if (pipe_num < -1 || pipe_num >= (int)ipa3_ctx->ipa_num_pipes) {
+ IPAERR("Invalid pipe number %d\n", pipe_num);
+ return -EINVAL;
+ }
+
+ if (pipe_num == -1) {
+ start_pipe = 0;
+ end_pipe = ipa3_ctx->ipa_num_pipes;
+ } else {
+ start_pipe = pipe_num;
+ end_pipe = pipe_num + 1;
+ }
+
+ num_descs = end_pipe - start_pipe;
+
+ desc = kcalloc(num_descs, sizeof(*desc), GFP_KERNEL);
+ if (!desc) {
+ IPAERR("no mem\n");
+ return -ENOMEM;
+ }
+
+ /* Force close aggregation on all valid pipes with aggregation */
+ num_aggr_descs = ipa3_tag_generate_force_close_desc(desc, num_descs,
+ start_pipe, end_pipe);
+ if (num_aggr_descs < 0) {
+ IPAERR("ipa3_tag_generate_force_close_desc failed %d\n",
+ num_aggr_descs);
+ goto fail_free_desc;
+ }
+
+ res = ipa3_tag_process(desc, num_aggr_descs,
+ IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT);
+
+fail_free_desc:
+ kfree(desc);
+
+ return res;
+}
+
+/**
+ * ipa3_is_ready() - check if IPA module was initialized
+ * successfully
+ *
+ * Return value: true for yes; false for no
+ */
+bool ipa3_is_ready(void)
+{
+ bool complete;
+
+ if (ipa3_ctx == NULL)
+ return false;
+ mutex_lock(&ipa3_ctx->lock);
+ complete = ipa3_ctx->ipa_initialization_complete;
+ mutex_unlock(&ipa3_ctx->lock);
+ return complete;
+}
+
+/**
+ * ipa3_is_client_handle_valid() - check if IPA client handle is valid handle
+ *
+ * Return value: true for yes; false for no
+ */
+bool ipa3_is_client_handle_valid(u32 clnt_hdl)
+{
+ if (clnt_hdl >= 0 && clnt_hdl < ipa3_ctx->ipa_num_pipes)
+ return true;
+ return false;
+}
+
+/**
+ * ipa3_proxy_clk_unvote() - called to remove IPA clock proxy vote
+ *
+ * Return value: none
+ */
+void ipa3_proxy_clk_unvote(void)
+{
+ if (ipa3_is_ready() && ipa3_ctx->q6_proxy_clk_vote_valid) {
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE");
+ ipa3_ctx->q6_proxy_clk_vote_valid = false;
+ }
+}
+
+/**
+ * ipa3_proxy_clk_vote() - called to add IPA clock proxy vote
+ *
+ * Return value: none
+ */
+void ipa3_proxy_clk_vote(void)
+{
+ if (ipa3_is_ready() && !ipa3_ctx->q6_proxy_clk_vote_valid) {
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE");
+ ipa3_ctx->q6_proxy_clk_vote_valid = true;
+ }
+}
+
+/**
+ * ipa3_get_smem_restr_bytes()- Return IPA smem restricted bytes
+ *
+ * Return value: u16 - number of IPA smem restricted bytes
+ */
+u16 ipa3_get_smem_restr_bytes(void)
+{
+ if (ipa3_ctx)
+ return ipa3_ctx->smem_restricted_bytes;
+
+ IPAERR("IPA Driver not initialized\n");
+
+ return 0;
+}
+
+/**
+ * ipa3_get_modem_cfg_emb_pipe_flt()- Return ipa3_ctx->modem_cfg_emb_pipe_flt
+ *
+ * Return value: true if modem configures embedded pipe flt, false otherwise
+ */
+bool ipa3_get_modem_cfg_emb_pipe_flt(void)
+{
+ if (ipa3_ctx)
+ return ipa3_ctx->modem_cfg_emb_pipe_flt;
+
+ IPAERR("IPA driver has not been initialized\n");
+
+ return false;
+}
+
+/**
+ * ipa3_get_transport_type()- Return ipa3_ctx->transport_prototype
+ *
+ * Return value: enum ipa_transport_type
+ */
+enum ipa_transport_type ipa3_get_transport_type(void)
+{
+ if (ipa3_ctx)
+ return ipa3_ctx->transport_prototype;
+
+ IPAERR("IPA driver has not been initialized\n");
+ return IPA_TRANSPORT_TYPE_GSI;
+}
+
+u32 ipa3_get_num_pipes(void)
+{
+ return ipahal_read_reg(IPA_ENABLED_PIPES);
+}
+
+/**
+ * ipa3_disable_apps_wan_cons_deaggr()-
+ * set ipa_ctx->ipa_client_apps_wan_cons_agg_gro
+ *
+ * Return value: 0 or negative in case of failure
+ */
+int ipa3_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count)
+{
+ int res = -1;
+ u32 limit;
+
+ /* checking if IPA-HW can support */
+ limit = ipahal_aggr_get_max_byte_limit();
+ if ((agg_size >> 10) > limit) {
+ IPAERR("IPA-AGG byte limit %d\n", limit);
+ IPAERR("exceed aggr_byte_limit\n");
+ return res;
+ }
+ limit = ipahal_aggr_get_max_pkt_limit();
+ if (agg_count > limit) {
+ IPAERR("IPA-AGG pkt limit %d\n", limit);
+ IPAERR("exceed aggr_pkt_limit\n");
+ return res;
+ }
+
+ if (ipa3_ctx) {
+ ipa3_ctx->ipa_client_apps_wan_cons_agg_gro = true;
+ return 0;
+ }
+ return res;
+}
+
+static void *ipa3_get_ipc_logbuf(void)
+{
+ if (ipa3_ctx)
+ return ipa3_ctx->logbuf;
+
+ return NULL;
+}
+
+static void *ipa3_get_ipc_logbuf_low(void)
+{
+ if (ipa3_ctx)
+ return ipa3_ctx->logbuf_low;
+
+ return NULL;
+}
+
+static void ipa3_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb)
+{
+ *holb = ipa3_ctx->ep[ep_idx].holb;
+}
+
+static void ipa3_set_tag_process_before_gating(bool val)
+{
+ ipa3_ctx->tag_process_before_gating = val;
+}
+
+int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
+ struct ipa_api_controller *api_ctrl)
+{
+ if (ipa_hw_type < IPA_HW_v3_0) {
+ IPAERR("Unsupported IPA HW version %d\n", ipa_hw_type);
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ api_ctrl->ipa_connect = ipa3_connect;
+ api_ctrl->ipa_disconnect = ipa3_disconnect;
+ api_ctrl->ipa_reset_endpoint = ipa3_reset_endpoint;
+ api_ctrl->ipa_clear_endpoint_delay = ipa3_clear_endpoint_delay;
+ api_ctrl->ipa_disable_endpoint = NULL;
+ api_ctrl->ipa_cfg_ep = ipa3_cfg_ep;
+ api_ctrl->ipa_cfg_ep_nat = ipa3_cfg_ep_nat;
+ api_ctrl->ipa_cfg_ep_hdr = ipa3_cfg_ep_hdr;
+ api_ctrl->ipa_cfg_ep_hdr_ext = ipa3_cfg_ep_hdr_ext;
+ api_ctrl->ipa_cfg_ep_mode = ipa3_cfg_ep_mode;
+ api_ctrl->ipa_cfg_ep_aggr = ipa3_cfg_ep_aggr;
+ api_ctrl->ipa_cfg_ep_deaggr = ipa3_cfg_ep_deaggr;
+ api_ctrl->ipa_cfg_ep_route = ipa3_cfg_ep_route;
+ api_ctrl->ipa_cfg_ep_holb = ipa3_cfg_ep_holb;
+ api_ctrl->ipa_get_holb = ipa3_get_holb;
+ api_ctrl->ipa_set_tag_process_before_gating =
+ ipa3_set_tag_process_before_gating;
+ api_ctrl->ipa_cfg_ep_cfg = ipa3_cfg_ep_cfg;
+ api_ctrl->ipa_cfg_ep_metadata_mask = ipa3_cfg_ep_metadata_mask;
+ api_ctrl->ipa_cfg_ep_holb_by_client = ipa3_cfg_ep_holb_by_client;
+ api_ctrl->ipa_cfg_ep_ctrl = ipa3_cfg_ep_ctrl;
+ api_ctrl->ipa_add_hdr = ipa3_add_hdr;
+ api_ctrl->ipa_del_hdr = ipa3_del_hdr;
+ api_ctrl->ipa_commit_hdr = ipa3_commit_hdr;
+ api_ctrl->ipa_reset_hdr = ipa3_reset_hdr;
+ api_ctrl->ipa_get_hdr = ipa3_get_hdr;
+ api_ctrl->ipa_put_hdr = ipa3_put_hdr;
+ api_ctrl->ipa_copy_hdr = ipa3_copy_hdr;
+ api_ctrl->ipa_add_hdr_proc_ctx = ipa3_add_hdr_proc_ctx;
+ api_ctrl->ipa_del_hdr_proc_ctx = ipa3_del_hdr_proc_ctx;
+ api_ctrl->ipa_add_rt_rule = ipa3_add_rt_rule;
+ api_ctrl->ipa_del_rt_rule = ipa3_del_rt_rule;
+ api_ctrl->ipa_commit_rt = ipa3_commit_rt;
+ api_ctrl->ipa_reset_rt = ipa3_reset_rt;
+ api_ctrl->ipa_get_rt_tbl = ipa3_get_rt_tbl;
+ api_ctrl->ipa_put_rt_tbl = ipa3_put_rt_tbl;
+ api_ctrl->ipa_query_rt_index = ipa3_query_rt_index;
+ api_ctrl->ipa_mdfy_rt_rule = ipa3_mdfy_rt_rule;
+ api_ctrl->ipa_add_flt_rule = ipa3_add_flt_rule;
+ api_ctrl->ipa_del_flt_rule = ipa3_del_flt_rule;
+ api_ctrl->ipa_mdfy_flt_rule = ipa3_mdfy_flt_rule;
+ api_ctrl->ipa_commit_flt = ipa3_commit_flt;
+ api_ctrl->ipa_reset_flt = ipa3_reset_flt;
+ api_ctrl->allocate_nat_device = ipa3_allocate_nat_device;
+ api_ctrl->ipa_nat_init_cmd = ipa3_nat_init_cmd;
+ api_ctrl->ipa_nat_dma_cmd = ipa3_nat_dma_cmd;
+ api_ctrl->ipa_nat_del_cmd = ipa3_nat_del_cmd;
+ api_ctrl->ipa_send_msg = ipa3_send_msg;
+ api_ctrl->ipa_register_pull_msg = ipa3_register_pull_msg;
+ api_ctrl->ipa_deregister_pull_msg = ipa3_deregister_pull_msg;
+ api_ctrl->ipa_register_intf = ipa3_register_intf;
+ api_ctrl->ipa_register_intf_ext = ipa3_register_intf_ext;
+ api_ctrl->ipa_deregister_intf = ipa3_deregister_intf;
+ api_ctrl->ipa_set_aggr_mode = ipa3_set_aggr_mode;
+ api_ctrl->ipa_set_qcncm_ndp_sig = ipa3_set_qcncm_ndp_sig;
+ api_ctrl->ipa_set_single_ndp_per_mbim = ipa3_set_single_ndp_per_mbim;
+ api_ctrl->ipa_tx_dp = ipa3_tx_dp;
+ api_ctrl->ipa_tx_dp_mul = ipa3_tx_dp_mul;
+ api_ctrl->ipa_free_skb = ipa3_free_skb;
+ api_ctrl->ipa_setup_sys_pipe = ipa3_setup_sys_pipe;
+ api_ctrl->ipa_teardown_sys_pipe = ipa3_teardown_sys_pipe;
+ api_ctrl->ipa_sys_setup = ipa3_sys_setup;
+ api_ctrl->ipa_sys_teardown = ipa3_sys_teardown;
+ api_ctrl->ipa_sys_update_gsi_hdls = ipa3_sys_update_gsi_hdls;
+ api_ctrl->ipa_connect_wdi_pipe = ipa3_connect_wdi_pipe;
+ api_ctrl->ipa_disconnect_wdi_pipe = ipa3_disconnect_wdi_pipe;
+ api_ctrl->ipa_enable_wdi_pipe = ipa3_enable_wdi_pipe;
+ api_ctrl->ipa_disable_wdi_pipe = ipa3_disable_wdi_pipe;
+ api_ctrl->ipa_resume_wdi_pipe = ipa3_resume_wdi_pipe;
+ api_ctrl->ipa_suspend_wdi_pipe = ipa3_suspend_wdi_pipe;
+ api_ctrl->ipa_get_wdi_stats = ipa3_get_wdi_stats;
+ api_ctrl->ipa_get_smem_restr_bytes = ipa3_get_smem_restr_bytes;
+ api_ctrl->ipa_uc_wdi_get_dbpa = ipa3_uc_wdi_get_dbpa;
+ api_ctrl->ipa_uc_reg_rdyCB = ipa3_uc_reg_rdyCB;
+ api_ctrl->ipa_uc_dereg_rdyCB = ipa3_uc_dereg_rdyCB;
+ api_ctrl->teth_bridge_init = ipa3_teth_bridge_init;
+ api_ctrl->teth_bridge_disconnect = ipa3_teth_bridge_disconnect;
+ api_ctrl->teth_bridge_connect = ipa3_teth_bridge_connect;
+ api_ctrl->ipa_set_client = ipa3_set_client;
+ api_ctrl->ipa_get_client = ipa3_get_client;
+ api_ctrl->ipa_get_client_uplink = ipa3_get_client_uplink;
+ api_ctrl->ipa_dma_init = ipa3_dma_init;
+ api_ctrl->ipa_dma_enable = ipa3_dma_enable;
+ api_ctrl->ipa_dma_disable = ipa3_dma_disable;
+ api_ctrl->ipa_dma_sync_memcpy = ipa3_dma_sync_memcpy;
+ api_ctrl->ipa_dma_async_memcpy = ipa3_dma_async_memcpy;
+ api_ctrl->ipa_dma_uc_memcpy = ipa3_dma_uc_memcpy;
+ api_ctrl->ipa_dma_destroy = ipa3_dma_destroy;
+ api_ctrl->ipa_mhi_init_engine = ipa3_mhi_init_engine;
+ api_ctrl->ipa_connect_mhi_pipe = ipa3_connect_mhi_pipe;
+ api_ctrl->ipa_disconnect_mhi_pipe = ipa3_disconnect_mhi_pipe;
+ api_ctrl->ipa_mhi_stop_gsi_channel = ipa3_mhi_stop_gsi_channel;
+ api_ctrl->ipa_uc_mhi_reset_channel = ipa3_uc_mhi_reset_channel;
+ api_ctrl->ipa_qmi_enable_force_clear_datapath_send =
+ ipa3_qmi_enable_force_clear_datapath_send;
+ api_ctrl->ipa_qmi_disable_force_clear_datapath_send =
+ ipa3_qmi_disable_force_clear_datapath_send;
+ api_ctrl->ipa_mhi_reset_channel_internal =
+ ipa3_mhi_reset_channel_internal;
+ api_ctrl->ipa_mhi_start_channel_internal =
+ ipa3_mhi_start_channel_internal;
+ api_ctrl->ipa_mhi_query_ch_info = ipa3_mhi_query_ch_info;
+ api_ctrl->ipa_mhi_resume_channels_internal =
+ ipa3_mhi_resume_channels_internal;
+ api_ctrl->ipa_has_open_aggr_frame = ipa3_has_open_aggr_frame;
+ api_ctrl->ipa_mhi_destroy_channel = ipa3_mhi_destroy_channel;
+ api_ctrl->ipa_uc_mhi_send_dl_ul_sync_info =
+ ipa3_uc_mhi_send_dl_ul_sync_info;
+ api_ctrl->ipa_uc_mhi_init = ipa3_uc_mhi_init;
+ api_ctrl->ipa_uc_mhi_suspend_channel = ipa3_uc_mhi_suspend_channel;
+ api_ctrl->ipa_uc_mhi_stop_event_update_channel =
+ ipa3_uc_mhi_stop_event_update_channel;
+ api_ctrl->ipa_uc_mhi_cleanup = ipa3_uc_mhi_cleanup;
+ api_ctrl->ipa_uc_state_check = ipa3_uc_state_check;
+ api_ctrl->ipa_write_qmap_id = ipa3_write_qmap_id;
+ api_ctrl->ipa_add_interrupt_handler = ipa3_add_interrupt_handler;
+ api_ctrl->ipa_remove_interrupt_handler = ipa3_remove_interrupt_handler;
+ api_ctrl->ipa_restore_suspend_handler = ipa3_restore_suspend_handler;
+ api_ctrl->ipa_bam_reg_dump = ipa3_bam_reg_dump;
+ api_ctrl->ipa_get_ep_mapping = ipa3_get_ep_mapping;
+ api_ctrl->ipa_is_ready = ipa3_is_ready;
+ api_ctrl->ipa_proxy_clk_vote = ipa3_proxy_clk_vote;
+ api_ctrl->ipa_proxy_clk_unvote = ipa3_proxy_clk_unvote;
+ api_ctrl->ipa_is_client_handle_valid = ipa3_is_client_handle_valid;
+ api_ctrl->ipa_get_client_mapping = ipa3_get_client_mapping;
+ api_ctrl->ipa_get_rm_resource_from_ep = ipa3_get_rm_resource_from_ep;
+ api_ctrl->ipa_get_modem_cfg_emb_pipe_flt =
+ ipa3_get_modem_cfg_emb_pipe_flt;
+ api_ctrl->ipa_get_transport_type = ipa3_get_transport_type;
+ api_ctrl->ipa_ap_suspend = ipa3_ap_suspend;
+ api_ctrl->ipa_ap_resume = ipa3_ap_resume;
+ api_ctrl->ipa_get_smmu_domain = ipa3_get_smmu_domain;
+ api_ctrl->ipa_disable_apps_wan_cons_deaggr =
+ ipa3_disable_apps_wan_cons_deaggr;
+ api_ctrl->ipa_get_dma_dev = ipa3_get_dma_dev;
+ api_ctrl->ipa_release_wdi_mapping = ipa3_release_wdi_mapping;
+ api_ctrl->ipa_create_wdi_mapping = ipa3_create_wdi_mapping;
+ api_ctrl->ipa_get_gsi_ep_info = ipa3_get_gsi_ep_info;
+ api_ctrl->ipa_stop_gsi_channel = ipa3_stop_gsi_channel;
+ api_ctrl->ipa_register_ipa_ready_cb = ipa3_register_ipa_ready_cb;
+ api_ctrl->ipa_inc_client_enable_clks = ipa3_inc_client_enable_clks;
+ api_ctrl->ipa_dec_client_disable_clks = ipa3_dec_client_disable_clks;
+ api_ctrl->ipa_inc_client_enable_clks_no_block =
+ ipa3_inc_client_enable_clks_no_block;
+ api_ctrl->ipa_suspend_resource_no_block =
+ ipa3_suspend_resource_no_block;
+ api_ctrl->ipa_resume_resource = ipa3_resume_resource;
+ api_ctrl->ipa_suspend_resource_sync = ipa3_suspend_resource_sync;
+ api_ctrl->ipa_set_required_perf_profile =
+ ipa3_set_required_perf_profile;
+ api_ctrl->ipa_get_ipc_logbuf = ipa3_get_ipc_logbuf;
+ api_ctrl->ipa_get_ipc_logbuf_low = ipa3_get_ipc_logbuf_low;
+ api_ctrl->ipa_rx_poll = ipa3_rx_poll;
+ api_ctrl->ipa_recycle_wan_skb = ipa3_recycle_wan_skb;
+ api_ctrl->ipa_setup_uc_ntn_pipes = ipa3_setup_uc_ntn_pipes;
+ api_ctrl->ipa_tear_down_uc_offload_pipes =
+ ipa3_tear_down_uc_offload_pipes;
+
+ return 0;
+}
+
+/**
+ * ipa_is_modem_pipe()- Checks if pipe is owned by the modem
+ *
+ * @pipe_idx: pipe number
+ * Return value: true if owned by modem, false otherwize
+ */
+bool ipa_is_modem_pipe(int pipe_idx)
+{
+ int client_idx;
+
+ if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
+ IPAERR("Bad pipe index!\n");
+ return false;
+ }
+
+ for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+ if (!IPA_CLIENT_IS_Q6_CONS(client_idx) &&
+ !IPA_CLIENT_IS_Q6_PROD(client_idx))
+ continue;
+ if (ipa3_get_ep_mapping(client_idx) == pipe_idx)
+ return true;
+ }
+
+ return false;
+}
+
+static void ipa3_write_rsrc_grp_type_reg(int group_index,
+ enum ipa_rsrc_grp_type_src n, bool src,
+ struct ipahal_reg_rsrc_grp_cfg *val) {
+
+ if (src) {
+ switch (group_index) {
+ case IPA_GROUP_UL:
+ case IPA_GROUP_DL:
+ ipahal_write_reg_n_fields(
+ IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+ n, val);
+ break;
+ case IPA_GROUP_DIAG:
+ case IPA_GROUP_DMA:
+ ipahal_write_reg_n_fields(
+ IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+ n, val);
+ break;
+ case IPA_GROUP_Q6ZIP:
+ case IPA_GROUP_UC_RX_Q:
+ ipahal_write_reg_n_fields(
+ IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n,
+ n, val);
+ break;
+ default:
+ IPAERR(
+ " Invalid source resource group,index #%d\n",
+ group_index);
+ break;
+ }
+ } else {
+ switch (group_index) {
+ case IPA_GROUP_UL:
+ case IPA_GROUP_DL:
+ ipahal_write_reg_n_fields(
+ IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+ n, val);
+ break;
+ case IPA_GROUP_DIAG:
+ case IPA_GROUP_DMA:
+ ipahal_write_reg_n_fields(
+ IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+ n, val);
+ break;
+ case IPA_GROUP_Q6ZIP_GENERAL:
+ case IPA_GROUP_Q6ZIP_ENGINE:
+ ipahal_write_reg_n_fields(
+ IPA_DST_RSRC_GRP_45_RSRC_TYPE_n,
+ n, val);
+ break;
+ default:
+ IPAERR(
+ " Invalid destination resource group,index #%d\n",
+ group_index);
+ break;
+ }
+ }
+}
+
+static void ipa3_configure_rx_hps_clients(int depth, bool min)
+{
+ int i;
+ struct ipahal_reg_rx_hps_clients val;
+
+ /*
+ * depth 0 contains 4 first clients out of 6
+ * depth 1 contains 2 last clients out of 6
+ */
+ for (i = 0 ; i < (depth ? 2 : 4) ; i++) {
+ if (min)
+ val.client_minmax[i] =
+ ipa3_rsrc_rx_grp_config
+ [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ]
+ [!depth ? i : 4 + i].min;
+ else
+ val.client_minmax[i] =
+ ipa3_rsrc_rx_grp_config
+ [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ]
+ [!depth ? i : 4 + i].max;
+ }
+ if (depth) {
+ ipahal_write_reg_fields(min ? IPA_RX_HPS_CLIENTS_MIN_DEPTH_1 :
+ IPA_RX_HPS_CLIENTS_MAX_DEPTH_1,
+ &val);
+ } else {
+ ipahal_write_reg_fields(min ? IPA_RX_HPS_CLIENTS_MIN_DEPTH_0 :
+ IPA_RX_HPS_CLIENTS_MAX_DEPTH_0,
+ &val);
+ }
+}
+
+void ipa3_set_resorce_groups_min_max_limits(void)
+{
+ int i;
+ int j;
+ struct ipahal_reg_rsrc_grp_cfg val;
+
+ IPADBG("ENTER\n");
+ IPADBG("Assign source rsrc groups min-max limits\n");
+
+ for (i = 0; i < IPA_RSRC_GRP_TYPE_SRC_MAX; i++) {
+ for (j = 0; j < IPA_GROUP_MAX; j = j + 2) {
+ val.x_min = ipa3_rsrc_src_grp_config[i][j].min;
+ val.x_max = ipa3_rsrc_src_grp_config[i][j].max;
+ val.y_min = ipa3_rsrc_src_grp_config[i][j + 1].min;
+ val.y_max = ipa3_rsrc_src_grp_config[i][j + 1].max;
+ ipa3_write_rsrc_grp_type_reg(j, i, true, &val);
+ }
+ }
+
+ IPADBG("Assign destination rsrc groups min-max limits\n");
+
+ for (i = 0; i < IPA_RSRC_GRP_TYPE_DST_MAX; i++) {
+ for (j = 0; j < IPA_GROUP_MAX; j = j + 2) {
+ val.x_min = ipa3_rsrc_dst_grp_config[i][j].min;
+ val.x_max = ipa3_rsrc_dst_grp_config[i][j].max;
+ val.y_min = ipa3_rsrc_dst_grp_config[i][j + 1].min;
+ val.y_max = ipa3_rsrc_dst_grp_config[i][j + 1].max;
+ ipa3_write_rsrc_grp_type_reg(j, i, false, &val);
+ }
+ }
+
+ /* move resource group configuration from HLOS to TZ */
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) {
+ IPAERR("skip configuring ipa_rx_hps_clients from HLOS\n");
+ return;
+ }
+
+ IPADBG("Assign RX_HPS CMDQ rsrc groups min-max limits\n");
+
+ ipa3_configure_rx_hps_clients(0, true);
+ ipa3_configure_rx_hps_clients(1, true);
+ ipa3_configure_rx_hps_clients(0, false);
+ ipa3_configure_rx_hps_clients(1, false);
+
+ IPADBG("EXIT\n");
+}
+
+static void ipa3_gsi_poll_after_suspend(struct ipa3_ep_context *ep)
+{
+ bool empty;
+
+ IPADBG("switch ch %ld to poll\n", ep->gsi_chan_hdl);
+ gsi_config_channel_mode(ep->gsi_chan_hdl, GSI_CHAN_MODE_POLL);
+ gsi_is_channel_empty(ep->gsi_chan_hdl, &empty);
+ if (!empty) {
+ IPADBG("ch %ld not empty\n", ep->gsi_chan_hdl);
+ /* queue a work to start polling if don't have one */
+ atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+ if (!atomic_read(&ep->sys->curr_polling_state)) {
+ atomic_set(&ep->sys->curr_polling_state, 1);
+ queue_work(ep->sys->wq, &ep->sys->work);
+ }
+ }
+}
+
+void ipa3_suspend_apps_pipes(bool suspend)
+{
+ struct ipa_ep_cfg_ctrl cfg;
+ int ipa_ep_idx;
+ struct ipa3_ep_context *ep;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.ipa_ep_suspend = suspend;
+
+ ipa_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+ if (ep->valid) {
+ IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
+ ipa_ep_idx);
+ ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+ if (suspend)
+ ipa3_gsi_poll_after_suspend(ep);
+ else if (!atomic_read(&ep->sys->curr_polling_state))
+ gsi_config_channel_mode(ep->gsi_chan_hdl,
+ GSI_CHAN_MODE_CALLBACK);
+ }
+
+ ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+ /* Considering the case for SSR. */
+ if (ipa_ep_idx == -1) {
+ IPADBG("Invalid client.\n");
+ return;
+ }
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+ if (ep->valid) {
+ IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
+ ipa_ep_idx);
+ ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+ if (suspend)
+ ipa3_gsi_poll_after_suspend(ep);
+ else if (!atomic_read(&ep->sys->curr_polling_state))
+ gsi_config_channel_mode(ep->gsi_chan_hdl,
+ GSI_CHAN_MODE_CALLBACK);
+ }
+}
+
+/**
+ * ipa3_inject_dma_task_for_gsi()- Send DMA_TASK to IPA for GSI stop channel
+ *
+ * Send a DMA_TASK of 1B to IPA to unblock GSI channel in STOP_IN_PROG.
+ * Return value: 0 on success, negative otherwise
+ */
+int ipa3_inject_dma_task_for_gsi(void)
+{
+ static struct ipa_mem_buffer mem = {0};
+ struct ipahal_imm_cmd_dma_task_32b_addr cmd = {0};
+ static struct ipahal_imm_cmd_pyld *cmd_pyld;
+ struct ipa3_desc desc = {0};
+
+ /* allocate the memory only for the very first time */
+ if (!mem.base) {
+ IPADBG("Allocate mem\n");
+ mem.size = IPA_GSI_CHANNEL_STOP_PKT_SIZE;
+ mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
+ mem.size,
+ &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("no mem\n");
+ return -EFAULT;
+ }
+ }
+ if (!cmd_pyld) {
+ cmd.flsh = 1;
+ cmd.size1 = mem.size;
+ cmd.addr1 = mem.phys_base;
+ cmd.packet_size = mem.size;
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_TASK_32B_ADDR, &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct dma_task_32b_addr cmd\n");
+ return -EFAULT;
+ }
+ }
+
+ desc.opcode = ipahal_imm_cmd_get_opcode_param(
+ IPA_IMM_CMD_DMA_TASK_32B_ADDR, 1);
+ desc.pyld = cmd_pyld->data;
+ desc.len = cmd_pyld->len;
+ desc.type = IPA_IMM_CMD_DESC;
+
+ IPADBG("sending 1B packet to IPA\n");
+ if (ipa3_send_cmd(1, &desc)) {
+ IPAERR("ipa3_send_cmd failed\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * ipa3_stop_gsi_channel()- Stops a GSI channel in IPA
+ * @chan_hdl: GSI channel handle
+ *
+ * This function implements the sequence to stop a GSI channel
+ * in IPA. This function returns when the channel is is STOP state.
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+int ipa3_stop_gsi_channel(u32 clnt_hdl)
+{
+ struct ipa_mem_buffer mem;
+ int res = 0;
+ int i;
+ struct ipa3_ep_context *ep;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ memset(&mem, 0, sizeof(mem));
+
+ if (IPA_CLIENT_IS_PROD(ep->client)) {
+ res = gsi_stop_channel(ep->gsi_chan_hdl);
+ goto end_sequence;
+ }
+
+ for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
+ IPADBG("Calling gsi_stop_channel\n");
+ res = gsi_stop_channel(ep->gsi_chan_hdl);
+ IPADBG("gsi_stop_channel returned %d\n", res);
+ if (res != -GSI_STATUS_AGAIN && res != -GSI_STATUS_TIMED_OUT)
+ goto end_sequence;
+
+ IPADBG("Inject a DMA_TASK with 1B packet to IPA and retry\n");
+ /* Send a 1B packet DMA_RASK to IPA and try again*/
+ res = ipa3_inject_dma_task_for_gsi();
+ if (res) {
+ IPAERR("Failed to inject DMA TASk for GSI\n");
+ goto end_sequence;
+ }
+
+ /* sleep for short period to flush IPA */
+ usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
+ IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
+ }
+
+ IPAERR("Failed to stop GSI channel with retries\n");
+ res = -EFAULT;
+end_sequence:
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ return res;
+}
+
+/**
+ * ipa3_load_fws() - Load the IPAv3 FWs into IPA&GSI SRAM.
+ *
+ * @firmware: Structure which contains the FW data from the user space.
+ *
+ * Return value: 0 on success, negative otherwise
+ *
+ */
+int ipa3_load_fws(const struct firmware *firmware)
+{
+ const struct elf32_hdr *ehdr;
+ const struct elf32_phdr *phdr;
+ const uint8_t *elf_phdr_ptr;
+ uint32_t *elf_data_ptr;
+ int phdr_idx, index;
+ uint32_t *fw_mem_base;
+
+ ehdr = (struct elf32_hdr *) firmware->data;
+
+ elf_phdr_ptr = firmware->data + sizeof(*ehdr);
+
+ for (phdr_idx = 0; phdr_idx < ehdr->e_phnum; phdr_idx++) {
+ /*
+ * The ELF program header will contain the starting
+ * address to which the firmware needs to copied.
+ */
+ phdr = (struct elf32_phdr *)elf_phdr_ptr;
+
+ /*
+ * p_vaddr will contain the starting address to which the
+ * FW needs to be loaded.
+ * p_memsz will contain the size of the IRAM.
+ * p_filesz will contain the size of the FW image.
+ */
+ fw_mem_base = ioremap(phdr->p_vaddr, phdr->p_memsz);
+ if (!fw_mem_base) {
+ IPAERR("Failed to map 0x%x for the size of %u\n",
+ phdr->p_vaddr, phdr->p_memsz);
+ return -ENOMEM;
+ }
+
+ /* Set the entire region to 0s */
+ memset(fw_mem_base, 0, phdr->p_memsz);
+
+ /*
+ * p_offset will contain and absolute offset from the beginning
+ * of the ELF file.
+ */
+ elf_data_ptr = (uint32_t *)
+ ((uint8_t *)firmware->data + phdr->p_offset);
+
+ if (phdr->p_memsz % sizeof(uint32_t)) {
+ IPAERR("FW size %u doesn't align to 32bit\n",
+ phdr->p_memsz);
+ return -EFAULT;
+ }
+
+ /* Write the FW */
+ for (index = 0; index < phdr->p_filesz/sizeof(uint32_t);
+ index++) {
+ writel_relaxed(*elf_data_ptr, &fw_mem_base[index]);
+ elf_data_ptr++;
+ }
+
+ iounmap(fw_mem_base);
+
+ elf_phdr_ptr = elf_phdr_ptr + sizeof(*phdr);
+ }
+ IPADBG("IPA FWs (GSI FW, HPS and DPS) were loaded\n");
+ return 0;
+}
+
+/**
+ * ipa3_is_msm_device() - Is the running device a MSM or MDM?
+ * Determine according to IPA version
+ *
+ * Return value: true if MSM, false if MDM
+ *
+ */
+bool ipa3_is_msm_device(void)
+{
+ switch (ipa3_ctx->ipa_hw_type) {
+ case IPA_HW_v3_0:
+ case IPA_HW_v3_5:
+ return false;
+ case IPA_HW_v3_1:
+ case IPA_HW_v3_5_1:
+ return true;
+ default:
+ IPAERR("unknown HW type %d\n", ipa3_ctx->ipa_hw_type);
+ ipa_assert();
+ }
+
+ return false;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
new file mode 100644
index 0000000..b945eb06
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_IPA3) += ipa_hal.o
+
+ipa_hal-y := ipahal.o ipahal_reg.o ipahal_fltrt.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
new file mode 100644
index 0000000..c88b104
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -0,0 +1,1359 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include "ipahal.h"
+#include "ipahal_i.h"
+#include "ipahal_reg_i.h"
+#include "ipahal_fltrt_i.h"
+
+struct ipahal_context *ipahal_ctx;
+
+static const char *ipahal_imm_cmd_name_to_str[IPA_IMM_CMD_MAX] = {
+ __stringify(IPA_IMM_CMD_IP_V4_FILTER_INIT),
+ __stringify(IPA_IMM_CMD_IP_V6_FILTER_INIT),
+ __stringify(IPA_IMM_CMD_IP_V4_NAT_INIT),
+ __stringify(IPA_IMM_CMD_IP_V4_ROUTING_INIT),
+ __stringify(IPA_IMM_CMD_IP_V6_ROUTING_INIT),
+ __stringify(IPA_IMM_CMD_HDR_INIT_LOCAL),
+ __stringify(IPA_IMM_CMD_HDR_INIT_SYSTEM),
+ __stringify(IPA_IMM_CMD_REGISTER_WRITE),
+ __stringify(IPA_IMM_CMD_NAT_DMA),
+ __stringify(IPA_IMM_CMD_IP_PACKET_INIT),
+ __stringify(IPA_IMM_CMD_DMA_SHARED_MEM),
+ __stringify(IPA_IMM_CMD_IP_PACKET_TAG_STATUS),
+ __stringify(IPA_IMM_CMD_DMA_TASK_32B_ADDR),
+};
+
+static const char *ipahal_pkt_status_exception_to_str
+ [IPAHAL_PKT_STATUS_EXCEPTION_MAX] = {
+ __stringify(IPAHAL_PKT_STATUS_EXCEPTION_NONE),
+ __stringify(IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR),
+ __stringify(IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE),
+ __stringify(IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH),
+ __stringify(IPAHAL_PKT_STATUS_EXCEPTION_PACKET_THRESHOLD),
+ __stringify(IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS),
+ __stringify(IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT),
+ __stringify(IPAHAL_PKT_STATUS_EXCEPTION_NAT),
+};
+
+#define IPAHAL_MEM_ALLOC(__size, __is_atomic_ctx) \
+ (kzalloc((__size), ((__is_atomic_ctx)?GFP_ATOMIC:GFP_KERNEL)))
+
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_task_32b_addr(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_dma_task_32b_addr *data;
+ struct ipahal_imm_cmd_dma_task_32b_addr *dma_params =
+ (struct ipahal_imm_cmd_dma_task_32b_addr *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_dma_task_32b_addr *)pyld->data;
+
+ if (unlikely(dma_params->size1 & ~0xFFFF)) {
+ IPAHAL_ERR("Size1 is bigger than 16bit width 0x%x\n",
+ dma_params->size1);
+ WARN_ON(1);
+ }
+ if (unlikely(dma_params->packet_size & ~0xFFFF)) {
+ IPAHAL_ERR("Pkt size is bigger than 16bit width 0x%x\n",
+ dma_params->packet_size);
+ WARN_ON(1);
+ }
+ data->cmplt = dma_params->cmplt ? 1 : 0;
+ data->eof = dma_params->eof ? 1 : 0;
+ data->flsh = dma_params->flsh ? 1 : 0;
+ data->lock = dma_params->lock ? 1 : 0;
+ data->unlock = dma_params->unlock ? 1 : 0;
+ data->size1 = dma_params->size1;
+ data->addr1 = dma_params->addr1;
+ data->packet_size = dma_params->packet_size;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_tag_status(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_packet_tag_status *data;
+ struct ipahal_imm_cmd_ip_packet_tag_status *tag_params =
+ (struct ipahal_imm_cmd_ip_packet_tag_status *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_packet_tag_status *)pyld->data;
+
+ if (unlikely(tag_params->tag & ~0xFFFFFFFFFFFF)) {
+ IPAHAL_ERR("tag is bigger than 48bit width 0x%llx\n",
+ tag_params->tag);
+ WARN_ON(1);
+ }
+ data->tag = tag_params->tag;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_shared_mem(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_dma_shared_mem *data;
+ struct ipahal_imm_cmd_dma_shared_mem *mem_params =
+ (struct ipahal_imm_cmd_dma_shared_mem *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_dma_shared_mem *)pyld->data;
+
+ if (unlikely(mem_params->size & ~0xFFFF)) {
+ IPAHAL_ERR("Size is bigger than 16bit width 0x%x\n",
+ mem_params->size);
+ WARN_ON(1);
+ }
+ if (unlikely(mem_params->local_addr & ~0xFFFF)) {
+ IPAHAL_ERR("Local addr is bigger than 16bit width 0x%x\n",
+ mem_params->local_addr);
+ WARN_ON(1);
+ }
+ data->direction = mem_params->is_read ? 1 : 0;
+ data->size = mem_params->size;
+ data->local_addr = mem_params->local_addr;
+ data->system_addr = mem_params->system_addr;
+ data->skip_pipeline_clear = mem_params->skip_pipeline_clear ? 1 : 0;
+ switch (mem_params->pipeline_clear_options) {
+ case IPAHAL_HPS_CLEAR:
+ data->pipeline_clear_options = 0;
+ break;
+ case IPAHAL_SRC_GRP_CLEAR:
+ data->pipeline_clear_options = 1;
+ break;
+ case IPAHAL_FULL_PIPELINE_CLEAR:
+ data->pipeline_clear_options = 2;
+ break;
+ default:
+ IPAHAL_ERR("unsupported pipline clear option %d\n",
+ mem_params->pipeline_clear_options);
+ WARN_ON(1);
+ };
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_register_write(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_register_write *data;
+ struct ipahal_imm_cmd_register_write *regwrt_params =
+ (struct ipahal_imm_cmd_register_write *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_register_write *)pyld->data;
+
+ if (unlikely(regwrt_params->offset & ~0xFFFF)) {
+ IPAHAL_ERR("Offset is bigger than 16bit width 0x%x\n",
+ regwrt_params->offset);
+ WARN_ON(1);
+ }
+ data->offset = regwrt_params->offset;
+ data->value = regwrt_params->value;
+ data->value_mask = regwrt_params->value_mask;
+
+ data->skip_pipeline_clear = regwrt_params->skip_pipeline_clear ? 1 : 0;
+ switch (regwrt_params->pipeline_clear_options) {
+ case IPAHAL_HPS_CLEAR:
+ data->pipeline_clear_options = 0;
+ break;
+ case IPAHAL_SRC_GRP_CLEAR:
+ data->pipeline_clear_options = 1;
+ break;
+ case IPAHAL_FULL_PIPELINE_CLEAR:
+ data->pipeline_clear_options = 2;
+ break;
+ default:
+ IPAHAL_ERR("unsupported pipline clear option %d\n",
+ regwrt_params->pipeline_clear_options);
+ WARN_ON(1);
+ };
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_init(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_packet_init *data;
+ struct ipahal_imm_cmd_ip_packet_init *pktinit_params =
+ (struct ipahal_imm_cmd_ip_packet_init *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_packet_init *)pyld->data;
+
+ if (unlikely(pktinit_params->destination_pipe_index & ~0x1F)) {
+ IPAHAL_ERR("Dst pipe idx is bigger than 5bit width 0x%x\n",
+ pktinit_params->destination_pipe_index);
+ WARN_ON(1);
+ }
+ data->destination_pipe_index = pktinit_params->destination_pipe_index;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_nat_dma(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_nat_dma *data;
+ struct ipahal_imm_cmd_nat_dma *nat_params =
+ (struct ipahal_imm_cmd_nat_dma *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_nat_dma *)pyld->data;
+
+ data->table_index = nat_params->table_index;
+ data->base_addr = nat_params->base_addr;
+ data->offset = nat_params->offset;
+ data->data = nat_params->data;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_system(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_hdr_init_system *data;
+ struct ipahal_imm_cmd_hdr_init_system *syshdr_params =
+ (struct ipahal_imm_cmd_hdr_init_system *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_hdr_init_system *)pyld->data;
+
+ data->hdr_table_addr = syshdr_params->hdr_table_addr;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_local(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_hdr_init_local *data;
+ struct ipahal_imm_cmd_hdr_init_local *lclhdr_params =
+ (struct ipahal_imm_cmd_hdr_init_local *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_hdr_init_local *)pyld->data;
+
+ if (unlikely(lclhdr_params->size_hdr_table & ~0xFFF)) {
+ IPAHAL_ERR("Hdr tble size is bigger than 12bit width 0x%x\n",
+ lclhdr_params->size_hdr_table);
+ WARN_ON(1);
+ }
+ data->hdr_table_addr = lclhdr_params->hdr_table_addr;
+ data->size_hdr_table = lclhdr_params->size_hdr_table;
+ data->hdr_addr = lclhdr_params->hdr_addr;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_routing_init(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_v6_routing_init *data;
+ struct ipahal_imm_cmd_ip_v6_routing_init *rt6_params =
+ (struct ipahal_imm_cmd_ip_v6_routing_init *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_v6_routing_init *)pyld->data;
+
+ data->hash_rules_addr = rt6_params->hash_rules_addr;
+ data->hash_rules_size = rt6_params->hash_rules_size;
+ data->hash_local_addr = rt6_params->hash_local_addr;
+ data->nhash_rules_addr = rt6_params->nhash_rules_addr;
+ data->nhash_rules_size = rt6_params->nhash_rules_size;
+ data->nhash_local_addr = rt6_params->nhash_local_addr;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_routing_init(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_v4_routing_init *data;
+ struct ipahal_imm_cmd_ip_v4_routing_init *rt4_params =
+ (struct ipahal_imm_cmd_ip_v4_routing_init *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_v4_routing_init *)pyld->data;
+
+ data->hash_rules_addr = rt4_params->hash_rules_addr;
+ data->hash_rules_size = rt4_params->hash_rules_size;
+ data->hash_local_addr = rt4_params->hash_local_addr;
+ data->nhash_rules_addr = rt4_params->nhash_rules_addr;
+ data->nhash_rules_size = rt4_params->nhash_rules_size;
+ data->nhash_local_addr = rt4_params->nhash_local_addr;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_nat_init(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_v4_nat_init *data;
+ struct ipahal_imm_cmd_ip_v4_nat_init *nat4_params =
+ (struct ipahal_imm_cmd_ip_v4_nat_init *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_v4_nat_init *)pyld->data;
+
+ data->ipv4_rules_addr = nat4_params->ipv4_rules_addr;
+ data->ipv4_expansion_rules_addr =
+ nat4_params->ipv4_expansion_rules_addr;
+ data->index_table_addr = nat4_params->index_table_addr;
+ data->index_table_expansion_addr =
+ nat4_params->index_table_expansion_addr;
+ data->table_index = nat4_params->table_index;
+ data->ipv4_rules_addr_type =
+ nat4_params->ipv4_rules_addr_shared ? 1 : 0;
+ data->ipv4_expansion_rules_addr_type =
+ nat4_params->ipv4_expansion_rules_addr_shared ? 1 : 0;
+ data->index_table_addr_type =
+ nat4_params->index_table_addr_shared ? 1 : 0;
+ data->index_table_expansion_addr_type =
+ nat4_params->index_table_expansion_addr_shared ? 1 : 0;
+ data->size_base_tables = nat4_params->size_base_tables;
+ data->size_expansion_tables = nat4_params->size_expansion_tables;
+ data->public_ip_addr = nat4_params->public_ip_addr;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_filter_init(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_v6_filter_init *data;
+ struct ipahal_imm_cmd_ip_v6_filter_init *flt6_params =
+ (struct ipahal_imm_cmd_ip_v6_filter_init *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_v6_filter_init *)pyld->data;
+
+ data->hash_rules_addr = flt6_params->hash_rules_addr;
+ data->hash_rules_size = flt6_params->hash_rules_size;
+ data->hash_local_addr = flt6_params->hash_local_addr;
+ data->nhash_rules_addr = flt6_params->nhash_rules_addr;
+ data->nhash_rules_size = flt6_params->nhash_rules_size;
+ data->nhash_local_addr = flt6_params->nhash_local_addr;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_filter_init(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_v4_filter_init *data;
+ struct ipahal_imm_cmd_ip_v4_filter_init *flt4_params =
+ (struct ipahal_imm_cmd_ip_v4_filter_init *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_v4_filter_init *)pyld->data;
+
+ data->hash_rules_addr = flt4_params->hash_rules_addr;
+ data->hash_rules_size = flt4_params->hash_rules_size;
+ data->hash_local_addr = flt4_params->hash_local_addr;
+ data->nhash_rules_addr = flt4_params->nhash_rules_addr;
+ data->nhash_rules_size = flt4_params->nhash_rules_size;
+ data->nhash_local_addr = flt4_params->nhash_local_addr;
+
+ return pyld;
+}
+
+/*
+ * struct ipahal_imm_cmd_obj - immediate command H/W information for
+ * specific IPA version
+ * @construct - CB to construct imm command payload from abstracted structure
+ * @opcode - Immediate command OpCode
+ * @dyn_op - Does this command supports Dynamic opcode?
+ * Some commands opcode are dynamic where the part of the opcode is
+ * supplied as param. This flag indicates if the specific command supports it
+ * or not.
+ */
+struct ipahal_imm_cmd_obj {
+ struct ipahal_imm_cmd_pyld *(*construct)(enum ipahal_imm_cmd_name cmd,
+ const void *params, bool is_atomic_ctx);
+ u16 opcode;
+ bool dyn_op;
+};
+
+/*
+ * This table contains the info regard each immediate command for IPAv3
+ * and later.
+ * Information like: opcode and construct functions.
+ * All the information on the IMM on IPAv3 are statically defined below.
+ * If information is missing regard some IMM on some IPA version,
+ * the init function will fill it with the information from the previous
+ * IPA version.
+ * Information is considered missing if all of the fields are 0
+ * If opcode is -1, this means that the IMM is removed on the
+ * specific version
+ */
+static struct ipahal_imm_cmd_obj
+ ipahal_imm_cmd_objs[IPA_HW_MAX][IPA_IMM_CMD_MAX] = {
+ /* IPAv3 */
+ [IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_FILTER_INIT] = {
+ ipa_imm_cmd_construct_ip_v4_filter_init,
+ 3, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_FILTER_INIT] = {
+ ipa_imm_cmd_construct_ip_v6_filter_init,
+ 4, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_NAT_INIT] = {
+ ipa_imm_cmd_construct_ip_v4_nat_init,
+ 5, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_ROUTING_INIT] = {
+ ipa_imm_cmd_construct_ip_v4_routing_init,
+ 7, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_ROUTING_INIT] = {
+ ipa_imm_cmd_construct_ip_v6_routing_init,
+ 8, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_LOCAL] = {
+ ipa_imm_cmd_construct_hdr_init_local,
+ 9, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_SYSTEM] = {
+ ipa_imm_cmd_construct_hdr_init_system,
+ 10, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_REGISTER_WRITE] = {
+ ipa_imm_cmd_construct_register_write,
+ 12, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_NAT_DMA] = {
+ ipa_imm_cmd_construct_nat_dma,
+ 14, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_INIT] = {
+ ipa_imm_cmd_construct_ip_packet_init,
+ 16, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_DMA_TASK_32B_ADDR] = {
+ ipa_imm_cmd_construct_dma_task_32b_addr,
+ 17, true},
+ [IPA_HW_v3_0][IPA_IMM_CMD_DMA_SHARED_MEM] = {
+ ipa_imm_cmd_construct_dma_shared_mem,
+ 19, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_TAG_STATUS] = {
+ ipa_imm_cmd_construct_ip_packet_tag_status,
+ 20, false},
+};
+
+/*
+ * ipahal_imm_cmd_init() - Build the Immediate command information table
+ * See ipahal_imm_cmd_objs[][] comments
+ */
+static int ipahal_imm_cmd_init(enum ipa_hw_type ipa_hw_type)
+{
+ int i;
+ int j;
+ struct ipahal_imm_cmd_obj zero_obj;
+
+ IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+ if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
+ IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+ return -EINVAL;
+ }
+
+ memset(&zero_obj, 0, sizeof(zero_obj));
+ for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+ for (j = 0; j < IPA_IMM_CMD_MAX ; j++) {
+ if (!memcmp(&ipahal_imm_cmd_objs[i+1][j], &zero_obj,
+ sizeof(struct ipahal_imm_cmd_obj))) {
+ memcpy(&ipahal_imm_cmd_objs[i+1][j],
+ &ipahal_imm_cmd_objs[i][j],
+ sizeof(struct ipahal_imm_cmd_obj));
+ } else {
+ /*
+ * explicitly overridden immediate command.
+ * Check validity
+ */
+ if (!ipahal_imm_cmd_objs[i+1][j].opcode) {
+ IPAHAL_ERR(
+ "imm_cmd=%s with zero opcode ipa_ver=%d\n",
+ ipahal_imm_cmd_name_str(j), i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_imm_cmd_objs[i+1][j].construct) {
+ IPAHAL_ERR(
+ "imm_cmd=%s with NULL construct func ipa_ver=%d\n",
+ ipahal_imm_cmd_name_str(j), i+1);
+ WARN_ON(1);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * ipahal_imm_cmd_name_str() - returns string that represent the imm cmd
+ * @cmd_name: [in] Immediate command name
+ */
+const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name)
+{
+ if (cmd_name < 0 || cmd_name >= IPA_IMM_CMD_MAX) {
+ IPAHAL_ERR("requested name of invalid imm_cmd=%d\n", cmd_name);
+ return "Invalid IMM_CMD";
+ }
+
+ return ipahal_imm_cmd_name_to_str[cmd_name];
+}
+
+/*
+ * ipahal_imm_cmd_get_opcode() - Get the fixed opcode of the immediate command
+ */
+u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd)
+{
+ u32 opcode;
+
+ if (cmd >= IPA_IMM_CMD_MAX) {
+ IPAHAL_ERR("Invalid immediate command imm_cmd=%u\n", cmd);
+ ipa_assert();
+ return -EFAULT;
+ }
+
+ IPAHAL_DBG_LOW("Get opcode of IMM_CMD=%s\n",
+ ipahal_imm_cmd_name_str(cmd));
+ opcode = ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].opcode;
+ if (opcode == -1) {
+ IPAHAL_ERR("Try to get opcode of obsolete IMM_CMD=%s\n",
+ ipahal_imm_cmd_name_str(cmd));
+ ipa_assert();
+ return -EFAULT;
+ }
+
+ return opcode;
+}
+
+/*
+ * ipahal_imm_cmd_get_opcode_param() - Get the opcode of an immediate command
+ * that supports dynamic opcode
+ * Some commands opcode are not totaly fixed, but part of it is
+ * a supplied parameter. E.g. Low-Byte is fixed and Hi-Byte
+ * is a given parameter.
+ * This API will return the composed opcode of the command given
+ * the parameter
+ * Note: Use this API only for immediate comamnds that support Dynamic Opcode
+ */
+u16 ipahal_imm_cmd_get_opcode_param(enum ipahal_imm_cmd_name cmd, int param)
+{
+ u32 opcode;
+
+ if (cmd >= IPA_IMM_CMD_MAX) {
+ IPAHAL_ERR("Invalid immediate command IMM_CMD=%u\n", cmd);
+ ipa_assert();
+ return -EFAULT;
+ }
+
+ IPAHAL_DBG_LOW("Get opcode of IMM_CMD=%s\n",
+ ipahal_imm_cmd_name_str(cmd));
+
+ if (!ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].dyn_op) {
+ IPAHAL_ERR("IMM_CMD=%s does not support dynamic opcode\n",
+ ipahal_imm_cmd_name_str(cmd));
+ ipa_assert();
+ return -EFAULT;
+ }
+
+ /* Currently, dynamic opcode commands uses params to be set
+ * on the Opcode hi-byte (lo-byte is fixed).
+ * If this to be changed in the future, make the opcode calculation
+ * a CB per command
+ */
+ if (param & ~0xFFFF) {
+ IPAHAL_ERR("IMM_CMD=%s opcode param is invalid\n",
+ ipahal_imm_cmd_name_str(cmd));
+ ipa_assert();
+ return -EFAULT;
+ }
+ opcode = ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].opcode;
+ if (opcode == -1) {
+ IPAHAL_ERR("Try to get opcode of obsolete IMM_CMD=%s\n",
+ ipahal_imm_cmd_name_str(cmd));
+ ipa_assert();
+ return -EFAULT;
+ }
+ if (opcode & ~0xFFFF) {
+ IPAHAL_ERR("IMM_CMD=%s opcode will be overridden\n",
+ ipahal_imm_cmd_name_str(cmd));
+ ipa_assert();
+ return -EFAULT;
+ }
+ return (opcode + (param<<8));
+}
+
+/*
+ * ipahal_construct_imm_cmd() - Construct immdiate command
+ * This function builds imm cmd bulk that can be be sent to IPA
+ * The command will be allocated dynamically.
+ * After done using it, call ipahal_destroy_imm_cmd() to release it
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_imm_cmd(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ if (!params) {
+ IPAHAL_ERR("Input error: params=%p\n", params);
+ ipa_assert();
+ return NULL;
+ }
+
+ if (cmd >= IPA_IMM_CMD_MAX) {
+ IPAHAL_ERR("Invalid immediate command %u\n", cmd);
+ ipa_assert();
+ return NULL;
+ }
+
+ IPAHAL_DBG_LOW("construct IMM_CMD:%s\n", ipahal_imm_cmd_name_str(cmd));
+ return ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].construct(
+ cmd, params, is_atomic_ctx);
+}
+
+/*
+ * ipahal_construct_nop_imm_cmd() - Construct immediate comamnd for NO-Op
+ * Core driver may want functionality to inject NOP commands to IPA
+ * to ensure e.g., PIPLINE clear before someother operation.
+ * The functionality given by this function can be reached by
+ * ipahal_construct_imm_cmd(). This function is helper to the core driver
+ * to reach this NOP functionlity easily.
+ * @skip_pipline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipline_clr_opt: options for pipeline clear waiting
+ * @is_atomic_ctx: is called in atomic context or can sleep?
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd(
+ bool skip_pipline_clear,
+ enum ipahal_pipeline_clear_option pipline_clr_opt,
+ bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_register_write cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.skip_pipeline_clear = skip_pipline_clear;
+ cmd.pipeline_clear_options = pipline_clr_opt;
+ cmd.value_mask = 0x0;
+
+ cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+ &cmd, is_atomic_ctx);
+
+ if (!cmd_pyld)
+ IPAHAL_ERR("failed to construct register_write imm cmd\n");
+
+ return cmd_pyld;
+}
+
+
+/* IPA Packet Status Logic */
+
+#define IPA_PKT_STATUS_SET_MSK(__hw_bit_msk, __shft) \
+ (status->status_mask |= \
+ ((hw_status->status_mask & (__hw_bit_msk) ? 1 : 0) << (__shft)))
+
+static void ipa_pkt_status_parse(
+ const void *unparsed_status, struct ipahal_pkt_status *status)
+{
+ enum ipahal_pkt_status_opcode opcode = 0;
+ enum ipahal_pkt_status_exception exception_type = 0;
+
+ struct ipa_pkt_status_hw *hw_status =
+ (struct ipa_pkt_status_hw *)unparsed_status;
+
+ status->pkt_len = hw_status->pkt_len;
+ status->endp_src_idx = hw_status->endp_src_idx;
+ status->endp_dest_idx = hw_status->endp_dest_idx;
+ status->metadata = hw_status->metadata;
+ status->flt_local = hw_status->flt_local;
+ status->flt_hash = hw_status->flt_hash;
+ status->flt_global = hw_status->flt_hash;
+ status->flt_ret_hdr = hw_status->flt_ret_hdr;
+ status->flt_miss = ~(hw_status->flt_rule_id) ? false : true;
+ status->flt_rule_id = hw_status->flt_rule_id;
+ status->rt_local = hw_status->rt_local;
+ status->rt_hash = hw_status->rt_hash;
+ status->ucp = hw_status->ucp;
+ status->rt_tbl_idx = hw_status->rt_tbl_idx;
+ status->rt_miss = ~(hw_status->rt_rule_id) ? false : true;
+ status->rt_rule_id = hw_status->rt_rule_id;
+ status->nat_hit = hw_status->nat_hit;
+ status->nat_entry_idx = hw_status->nat_entry_idx;
+ status->tag_info = hw_status->tag_info;
+ status->seq_num = hw_status->seq_num;
+ status->time_of_day_ctr = hw_status->time_of_day_ctr;
+ status->hdr_local = hw_status->hdr_local;
+ status->hdr_offset = hw_status->hdr_offset;
+ status->frag_hit = hw_status->frag_hit;
+ status->frag_rule = hw_status->frag_rule;
+
+ switch (hw_status->status_opcode) {
+ case 0x1:
+ opcode = IPAHAL_PKT_STATUS_OPCODE_PACKET;
+ break;
+ case 0x2:
+ opcode = IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE;
+ break;
+ case 0x4:
+ opcode = IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET;
+ break;
+ case 0x8:
+ opcode = IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET;
+ break;
+ case 0x10:
+ opcode = IPAHAL_PKT_STATUS_OPCODE_LOG;
+ break;
+ case 0x20:
+ opcode = IPAHAL_PKT_STATUS_OPCODE_DCMP;
+ break;
+ case 0x40:
+ opcode = IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS;
+ break;
+ default:
+ IPAHAL_ERR("unsupported Status Opcode 0x%x\n",
+ hw_status->status_opcode);
+ WARN_ON(1);
+ };
+ status->status_opcode = opcode;
+
+ switch (hw_status->nat_type) {
+ case 0:
+ status->nat_type = IPAHAL_PKT_STATUS_NAT_NONE;
+ break;
+ case 1:
+ status->nat_type = IPAHAL_PKT_STATUS_NAT_SRC;
+ break;
+ case 2:
+ status->nat_type = IPAHAL_PKT_STATUS_NAT_DST;
+ break;
+ default:
+ IPAHAL_ERR("unsupported Status NAT type 0x%x\n",
+ hw_status->nat_type);
+ WARN_ON(1);
+ };
+
+ switch (hw_status->exception) {
+ case 0:
+ exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NONE;
+ break;
+ case 1:
+ exception_type = IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR;
+ break;
+ case 4:
+ exception_type = IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE;
+ break;
+ case 8:
+ exception_type = IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH;
+ break;
+ case 16:
+ exception_type = IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS;
+ break;
+ case 32:
+ exception_type = IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT;
+ break;
+ case 64:
+ exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NAT;
+ break;
+ default:
+ IPAHAL_ERR("unsupported Status Exception type 0x%x\n",
+ hw_status->exception);
+ WARN_ON(1);
+ };
+ status->exception = exception_type;
+
+ IPA_PKT_STATUS_SET_MSK(0x1, IPAHAL_PKT_STATUS_MASK_FRAG_PROCESS_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x2, IPAHAL_PKT_STATUS_MASK_FILT_PROCESS_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x4, IPAHAL_PKT_STATUS_MASK_NAT_PROCESS_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x8, IPAHAL_PKT_STATUS_MASK_ROUTE_PROCESS_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x10, IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x20, IPAHAL_PKT_STATUS_MASK_FRAGMENT_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x40,
+ IPAHAL_PKT_STATUS_MASK_FIRST_FRAGMENT_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x80, IPAHAL_PKT_STATUS_MASK_V4_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x100,
+ IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x200, IPAHAL_PKT_STATUS_MASK_AGGR_PROCESS_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x400, IPAHAL_PKT_STATUS_MASK_DEST_EOT_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x800,
+ IPAHAL_PKT_STATUS_MASK_DEAGGR_PROCESS_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x1000, IPAHAL_PKT_STATUS_MASK_DEAGG_FIRST_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x2000, IPAHAL_PKT_STATUS_MASK_SRC_EOT_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x4000, IPAHAL_PKT_STATUS_MASK_PREV_EOT_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x8000, IPAHAL_PKT_STATUS_MASK_BYTE_LIMIT_SHFT);
+ status->status_mask &= 0xFFFF;
+}
+
+/*
+ * struct ipahal_pkt_status_obj - Pakcet Status H/W information for
+ * specific IPA version
+ * @size: H/W size of the status packet
+ * @parse: CB that parses the H/W packet status into the abstracted structure
+ */
+struct ipahal_pkt_status_obj {
+ u32 size;
+ void (*parse)(const void *unparsed_status,
+ struct ipahal_pkt_status *status);
+};
+
+/*
+ * This table contains the info regard packet status for IPAv3 and later
+ * Information like: size of packet status and parsing function
+ * All the information on the pkt Status on IPAv3 are statically defined below.
+ * If information is missing regard some IPA version, the init function
+ * will fill it with the information from the previous IPA version.
+ * Information is considered missing if all of the fields are 0
+ */
+static struct ipahal_pkt_status_obj ipahal_pkt_status_objs[IPA_HW_MAX] = {
+ /* IPAv3 */
+ [IPA_HW_v3_0] = {
+ IPA3_0_PKT_STATUS_SIZE,
+ ipa_pkt_status_parse,
+ },
+};
+
+/*
+ * ipahal_pkt_status_init() - Build the packet status information array
+ * for the different IPA versions
+ * See ipahal_pkt_status_objs[] comments
+ */
+static int ipahal_pkt_status_init(enum ipa_hw_type ipa_hw_type)
+{
+ int i;
+ struct ipahal_pkt_status_obj zero_obj;
+
+ IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+ if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
+ IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+ return -EINVAL;
+ }
+
+ /*
+ * Since structure alignment is implementation dependent,
+ * add test to avoid different and incompatible data layouts.
+ *
+ * In case new H/W has different size or structure of status packet,
+ * add a compile time validty check for it like below (as well as
+ * the new defines and/or the new strucutre in the internal header).
+ */
+ BUILD_BUG_ON(sizeof(struct ipa_pkt_status_hw) !=
+ IPA3_0_PKT_STATUS_SIZE);
+
+ memset(&zero_obj, 0, sizeof(zero_obj));
+ for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+ if (!memcmp(&ipahal_pkt_status_objs[i+1], &zero_obj,
+ sizeof(struct ipahal_pkt_status_obj))) {
+ memcpy(&ipahal_pkt_status_objs[i+1],
+ &ipahal_pkt_status_objs[i],
+ sizeof(struct ipahal_pkt_status_obj));
+ } else {
+ /*
+ * explicitly overridden Packet Status info
+ * Check validity
+ */
+ if (!ipahal_pkt_status_objs[i+1].size) {
+ IPAHAL_ERR(
+ "Packet Status with zero size ipa_ver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_pkt_status_objs[i+1].parse) {
+ IPAHAL_ERR(
+ "Packet Status without Parse func ipa_ver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * ipahal_pkt_status_get_size() - Get H/W size of packet status
+ */
+u32 ipahal_pkt_status_get_size(void)
+{
+ return ipahal_pkt_status_objs[ipahal_ctx->hw_type].size;
+}
+
+/*
+ * ipahal_pkt_status_parse() - Parse Packet Status payload to abstracted form
+ * @unparsed_status: Pointer to H/W format of the packet status as read from H/W
+ * @status: Pointer to pre-allocated buffer where the parsed info will be stored
+ */
+void ipahal_pkt_status_parse(const void *unparsed_status,
+ struct ipahal_pkt_status *status)
+{
+ if (!unparsed_status || !status) {
+ IPAHAL_ERR("Input Error: unparsed_status=%p status=%p\n",
+ unparsed_status, status);
+ return;
+ }
+
+ IPAHAL_DBG_LOW("Parse Status Packet\n");
+ memset(status, 0, sizeof(*status));
+ ipahal_pkt_status_objs[ipahal_ctx->hw_type].parse(unparsed_status,
+ status);
+}
+
+/*
+ * ipahal_pkt_status_exception_str() - returns string represents exception type
+ * @exception: [in] The exception type
+ */
+const char *ipahal_pkt_status_exception_str(
+ enum ipahal_pkt_status_exception exception)
+{
+ if (exception < 0 || exception >= IPAHAL_PKT_STATUS_EXCEPTION_MAX) {
+ IPAHAL_ERR(
+ "requested string of invalid pkt_status exception=%d\n",
+ exception);
+ return "Invalid PKT_STATUS_EXCEPTION";
+ }
+
+ return ipahal_pkt_status_exception_to_str[exception];
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void ipahal_debugfs_init(void)
+{
+ ipahal_ctx->dent = debugfs_create_dir("ipahal", 0);
+ if (!ipahal_ctx->dent || IS_ERR(ipahal_ctx->dent)) {
+ IPAHAL_ERR("fail to create ipahal debugfs folder\n");
+ goto fail;
+ }
+
+ return;
+fail:
+ debugfs_remove_recursive(ipahal_ctx->dent);
+ ipahal_ctx->dent = NULL;
+}
+
+static void ipahal_debugfs_remove(void)
+{
+ if (!ipahal_ctx)
+ return;
+
+ if (IS_ERR(ipahal_ctx->dent)) {
+ IPAHAL_ERR("ipahal debugfs folder was not created\n");
+ return;
+ }
+
+ debugfs_remove_recursive(ipahal_ctx->dent);
+}
+#else /* CONFIG_DEBUG_FS */
+static void ipahal_debugfs_init(void) {}
+static void ipahal_debugfs_remove(void) {}
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * ipahal_cp_hdr_to_hw_buff_v3() - copy header to hardware buffer according to
+ * base address and offset given.
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr: the header to be copied
+ * @hdr_len: the length of the header
+ */
+static void ipahal_cp_hdr_to_hw_buff_v3(void *const base, u32 offset,
+ u8 *const hdr, u32 hdr_len)
+{
+ memcpy(base + offset, hdr, hdr_len);
+}
+
+/*
+ * ipahal_cp_proc_ctx_to_hw_buff_v3() - copy processing context to
+ * base address and offset given.
+ * @type: header processing context type (no processing context,
+ * IPA_HDR_PROC_ETHII_TO_ETHII etc.)
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr_len: the length of the header
+ * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
+ * @phys_base: memory location in DDR
+ * @hdr_base_addr: base address in table
+ * @offset_entry: offset from hdr_base_addr in table
+ */
+static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
+ void *const base, u32 offset,
+ u32 hdr_len, bool is_hdr_proc_ctx,
+ dma_addr_t phys_base, u32 hdr_base_addr,
+ struct ipa_hdr_offset_entry *offset_entry){
+ if (type == IPA_HDR_PROC_NONE) {
+ struct ipa_hw_hdr_proc_ctx_add_hdr_seq *ctx;
+
+ ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_seq *)
+ (base + offset);
+ ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+ ctx->hdr_add.tlv.length = 1;
+ ctx->hdr_add.tlv.value = hdr_len;
+ ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+ hdr_base_addr + offset_entry->offset;
+ IPAHAL_DBG("header address 0x%x\n",
+ ctx->hdr_add.hdr_addr);
+ ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+ ctx->end.length = 0;
+ ctx->end.value = 0;
+ } else {
+ struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *ctx;
+
+ ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *)
+ (base + offset);
+ ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+ ctx->hdr_add.tlv.length = 1;
+ ctx->hdr_add.tlv.value = hdr_len;
+ ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+ hdr_base_addr + offset_entry->offset;
+ IPAHAL_DBG("header address 0x%x\n",
+ ctx->hdr_add.hdr_addr);
+ ctx->cmd.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
+ ctx->cmd.length = 0;
+ switch (type) {
+ case IPA_HDR_PROC_ETHII_TO_ETHII:
+ ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_ETHII;
+ break;
+ case IPA_HDR_PROC_ETHII_TO_802_3:
+ ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_802_3;
+ break;
+ case IPA_HDR_PROC_802_3_TO_ETHII:
+ ctx->cmd.value = IPA_HDR_UCP_802_3_TO_ETHII;
+ break;
+ case IPA_HDR_PROC_802_3_TO_802_3:
+ ctx->cmd.value = IPA_HDR_UCP_802_3_TO_802_3;
+ break;
+ default:
+ IPAHAL_ERR("unknown ipa_hdr_proc_type %d", type);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ IPAHAL_DBG("command id %d\n", ctx->cmd.value);
+ ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+ ctx->end.length = 0;
+ ctx->end.value = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * ipahal_get_proc_ctx_needed_len_v3() - calculates the needed length for
+ * addition of header processing context according to the type of processing
+ * context.
+ * @type: header processing context type (no processing context,
+ * IPA_HDR_PROC_ETHII_TO_ETHII etc.)
+ */
+static int ipahal_get_proc_ctx_needed_len_v3(enum ipa_hdr_proc_type type)
+{
+ return (type == IPA_HDR_PROC_NONE) ?
+ sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_seq) :
+ sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq);
+}
+
+/*
+ * struct ipahal_hdr_funcs - headers handling functions for specific IPA
+ * version
+ * @ipahal_cp_hdr_to_hw_buff - copy function for regular headers
+ */
+struct ipahal_hdr_funcs {
+ void (*ipahal_cp_hdr_to_hw_buff)(void *const base, u32 offset,
+ u8 *const hdr, u32 hdr_len);
+
+ int (*ipahal_cp_proc_ctx_to_hw_buff)(enum ipa_hdr_proc_type type,
+ void *const base, u32 offset, u32 hdr_len,
+ bool is_hdr_proc_ctx, dma_addr_t phys_base,
+ u32 hdr_base_addr,
+ struct ipa_hdr_offset_entry *offset_entry);
+
+ int (*ipahal_get_proc_ctx_needed_len)(enum ipa_hdr_proc_type type);
+};
+
+static struct ipahal_hdr_funcs hdr_funcs;
+
+static void ipahal_hdr_init(enum ipa_hw_type ipa_hw_type)
+{
+
+ IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+ /*
+ * once there are changes in HW and need to use different case, insert
+ * new case for the new h/w. put the default always for the latest HW
+ * and make sure all previous supported versions have their cases.
+ */
+ switch (ipa_hw_type) {
+ case IPA_HW_v3_0:
+ default:
+ hdr_funcs.ipahal_cp_hdr_to_hw_buff =
+ ipahal_cp_hdr_to_hw_buff_v3;
+ hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff =
+ ipahal_cp_proc_ctx_to_hw_buff_v3;
+ hdr_funcs.ipahal_get_proc_ctx_needed_len =
+ ipahal_get_proc_ctx_needed_len_v3;
+ }
+ IPAHAL_DBG("Exit\n");
+}
+
+/*
+ * ipahal_cp_hdr_to_hw_buff() - copy header to hardware buffer according to
+ * base address and offset given.
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr: the header to be copied
+ * @hdr_len: the length of the header
+ */
+void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *const hdr,
+ u32 hdr_len)
+{
+ IPAHAL_DBG_LOW("Entry\n");
+ IPAHAL_DBG("base %p, offset %d, hdr %p, hdr_len %d\n", base,
+ offset, hdr, hdr_len);
+ if (!base || !hdr_len || !hdr) {
+ IPAHAL_ERR("failed on validating params");
+ return;
+ }
+
+ hdr_funcs.ipahal_cp_hdr_to_hw_buff(base, offset, hdr, hdr_len);
+
+ IPAHAL_DBG_LOW("Exit\n");
+}
+
+/*
+ * ipahal_cp_proc_ctx_to_hw_buff() - copy processing context to
+ * base address and offset given.
+ * @type: type of header processing context
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr_len: the length of the header
+ * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
+ * @phys_base: memory location in DDR
+ * @hdr_base_addr: base address in table
+ * @offset_entry: offset from hdr_base_addr in table
+ */
+int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
+ void *const base, u32 offset, u32 hdr_len,
+ bool is_hdr_proc_ctx, dma_addr_t phys_base,
+ u32 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry)
+{
+ IPAHAL_DBG(
+ "type %d, base %p, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %d, offset_entry %p\n"
+ , type, base, offset, hdr_len, is_hdr_proc_ctx,
+ hdr_base_addr, offset_entry);
+
+ if (!base ||
+ !hdr_len ||
+ (!phys_base && !hdr_base_addr) ||
+ !hdr_base_addr ||
+ ((is_hdr_proc_ctx == false) && !offset_entry)) {
+ IPAHAL_ERR(
+ "invalid input: hdr_len:%u phys_base:%pad hdr_base_addr:%u is_hdr_proc_ctx:%d offset_entry:%pK\n"
+ , hdr_len, &phys_base, hdr_base_addr
+ , is_hdr_proc_ctx, offset_entry);
+ return -EINVAL;
+ }
+
+ return hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff(type, base, offset,
+ hdr_len, is_hdr_proc_ctx, phys_base,
+ hdr_base_addr, offset_entry);
+}
+
+/*
+ * ipahal_get_proc_ctx_needed_len() - calculates the needed length for
+ * addition of header processing context according to the type of processing
+ * context
+ * @type: header processing context type (no processing context,
+ * IPA_HDR_PROC_ETHII_TO_ETHII etc.)
+ */
+int ipahal_get_proc_ctx_needed_len(enum ipa_hdr_proc_type type)
+{
+ int res;
+
+ IPAHAL_DBG("entry\n");
+
+ res = hdr_funcs.ipahal_get_proc_ctx_needed_len(type);
+
+ IPAHAL_DBG("Exit\n");
+
+ return res;
+}
+
+
+int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base,
+ struct device *ipa_pdev)
+{
+ int result;
+
+ IPAHAL_DBG("Entry - IPA HW TYPE=%d base=%p ipa_pdev=%p\n",
+ ipa_hw_type, base, ipa_pdev);
+
+ ipahal_ctx = kzalloc(sizeof(*ipahal_ctx), GFP_KERNEL);
+ if (!ipahal_ctx) {
+ IPAHAL_ERR("kzalloc err for ipahal_ctx\n");
+ result = -ENOMEM;
+ goto bail_err_exit;
+ }
+
+ if (ipa_hw_type < IPA_HW_v3_0) {
+ IPAHAL_ERR("ipahal supported on IPAv3 and later only\n");
+ result = -EINVAL;
+ goto bail_free_ctx;
+ }
+
+ if (ipa_hw_type >= IPA_HW_MAX) {
+ IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+ result = -EINVAL;
+ goto bail_free_ctx;
+ }
+
+ if (!base) {
+ IPAHAL_ERR("invalid memory io mapping addr\n");
+ result = -EINVAL;
+ goto bail_free_ctx;
+ }
+
+ if (!ipa_pdev) {
+ IPAHAL_ERR("invalid IPA platform device\n");
+ result = -EINVAL;
+ goto bail_free_ctx;
+ }
+
+ ipahal_ctx->hw_type = ipa_hw_type;
+ ipahal_ctx->base = base;
+ ipahal_ctx->ipa_pdev = ipa_pdev;
+
+ if (ipahal_reg_init(ipa_hw_type)) {
+ IPAHAL_ERR("failed to init ipahal reg\n");
+ result = -EFAULT;
+ goto bail_free_ctx;
+ }
+
+ if (ipahal_imm_cmd_init(ipa_hw_type)) {
+ IPAHAL_ERR("failed to init ipahal imm cmd\n");
+ result = -EFAULT;
+ goto bail_free_ctx;
+ }
+
+ if (ipahal_pkt_status_init(ipa_hw_type)) {
+ IPAHAL_ERR("failed to init ipahal pkt status\n");
+ result = -EFAULT;
+ goto bail_free_ctx;
+ }
+
+ ipahal_hdr_init(ipa_hw_type);
+
+ if (ipahal_fltrt_init(ipa_hw_type)) {
+ IPAHAL_ERR("failed to init ipahal flt rt\n");
+ result = -EFAULT;
+ goto bail_free_ctx;
+ }
+
+ ipahal_debugfs_init();
+
+ return 0;
+
+bail_free_ctx:
+ kfree(ipahal_ctx);
+ ipahal_ctx = NULL;
+bail_err_exit:
+ return result;
+}
+
+void ipahal_destroy(void)
+{
+ IPAHAL_DBG("Entry\n");
+ ipahal_fltrt_destroy();
+ ipahal_debugfs_remove();
+ kfree(ipahal_ctx);
+ ipahal_ctx = NULL;
+}
+
+void ipahal_free_dma_mem(struct ipa_mem_buffer *mem)
+{
+ if (likely(mem)) {
+ dma_free_coherent(ipahal_ctx->ipa_pdev, mem->size, mem->base,
+ mem->phys_base);
+ mem->size = 0;
+ mem->base = NULL;
+ mem->phys_base = 0;
+ }
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
new file mode 100644
index 0000000..6549775
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -0,0 +1,642 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_H_
+#define _IPAHAL_H_
+
+#include <linux/msm_ipa.h>
+#include "../../ipa_common_i.h"
+
+/*
+ * Immediate command names
+ *
+ * NOTE:: Any change to this enum, need to change to ipahal_imm_cmd_name_to_str
+ * array as well.
+ */
+enum ipahal_imm_cmd_name {
+ IPA_IMM_CMD_IP_V4_FILTER_INIT,
+ IPA_IMM_CMD_IP_V6_FILTER_INIT,
+ IPA_IMM_CMD_IP_V4_NAT_INIT,
+ IPA_IMM_CMD_IP_V4_ROUTING_INIT,
+ IPA_IMM_CMD_IP_V6_ROUTING_INIT,
+ IPA_IMM_CMD_HDR_INIT_LOCAL,
+ IPA_IMM_CMD_HDR_INIT_SYSTEM,
+ IPA_IMM_CMD_REGISTER_WRITE,
+ IPA_IMM_CMD_NAT_DMA,
+ IPA_IMM_CMD_IP_PACKET_INIT,
+ IPA_IMM_CMD_DMA_SHARED_MEM,
+ IPA_IMM_CMD_IP_PACKET_TAG_STATUS,
+ IPA_IMM_CMD_DMA_TASK_32B_ADDR,
+ IPA_IMM_CMD_MAX,
+};
+
+/* Immediate commands abstracted structures */
+
+/*
+ * struct ipahal_imm_cmd_ip_v4_filter_init - IP_V4_FILTER_INIT cmd payload
+ * Inits IPv4 filter block.
+ * @hash_rules_addr: Addr in sys mem where ipv4 hashable flt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable flt tbl should
+ * be copied to
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable flt tbl should
+ * be copied to
+ */
+struct ipahal_imm_cmd_ip_v4_filter_init {
+ u64 hash_rules_addr;
+ u32 hash_rules_size;
+ u32 hash_local_addr;
+ u64 nhash_rules_addr;
+ u32 nhash_rules_size;
+ u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v6_filter_init - IP_V6_FILTER_INIT cmd payload
+ * Inits IPv6 filter block.
+ * @hash_rules_addr: Addr in sys mem where ipv6 hashable flt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable flt tbl should
+ * be copied to
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable flt tbl should
+ * be copied to
+ */
+struct ipahal_imm_cmd_ip_v6_filter_init {
+ u64 hash_rules_addr;
+ u32 hash_rules_size;
+ u32 hash_local_addr;
+ u64 nhash_rules_addr;
+ u32 nhash_rules_size;
+ u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v4_nat_init - IP_V4_NAT_INIT cmd payload
+ * Inits IPv4 NAT block. Initiate NAT table with it dimensions, location
+ * cache address abd itger related parameters.
+ * @table_index: For future support of multiple NAT tables
+ * @ipv4_rules_addr: Addr in sys/shared mem where ipv4 NAT rules start
+ * @ipv4_rules_addr_shared: ipv4_rules_addr in shared mem (if not, then sys)
+ * @ipv4_expansion_rules_addr: Addr in sys/shared mem where expantion NAT
+ * table starts. IPv4 NAT rules that result in NAT collision are located
+ * in this table.
+ * @ipv4_expansion_rules_addr_shared: ipv4_expansion_rules_addr in
+ * shared mem (if not, then sys)
+ * @index_table_addr: Addr in sys/shared mem where index table, which points
+ * to NAT table starts
+ * @index_table_addr_shared: index_table_addr in shared mem (if not, then sys)
+ * @index_table_expansion_addr: Addr in sys/shared mem where expansion index
+ * table starts
+ * @index_table_expansion_addr_shared: index_table_expansion_addr in
+ * shared mem (if not, then sys)
+ * @size_base_tables: Num of entries in NAT tbl and idx tbl (each)
+ * @size_expansion_tables: Num of entries in NAT expantion tbl and expantion
+ * idx tbl (each)
+ * @public_ip_addr: public IP address
+ */
+struct ipahal_imm_cmd_ip_v4_nat_init {
+ u8 table_index;
+ u64 ipv4_rules_addr;
+ bool ipv4_rules_addr_shared;
+ u64 ipv4_expansion_rules_addr;
+ bool ipv4_expansion_rules_addr_shared;
+ u64 index_table_addr;
+ bool index_table_addr_shared;
+ u64 index_table_expansion_addr;
+ bool index_table_expansion_addr_shared;
+ u16 size_base_tables;
+ u16 size_expansion_tables;
+ u32 public_ip_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v4_routing_init - IP_V4_ROUTING_INIT cmd payload
+ * Inits IPv4 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in sys mem where ipv4 hashable rt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable rt tbl should
+ * be copied to
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable rt tbl should
+ * be copied to
+ */
+struct ipahal_imm_cmd_ip_v4_routing_init {
+ u64 hash_rules_addr;
+ u32 hash_rules_size;
+ u32 hash_local_addr;
+ u64 nhash_rules_addr;
+ u32 nhash_rules_size;
+ u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v6_routing_init - IP_V6_ROUTING_INIT cmd payload
+ * Inits IPv6 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in sys mem where ipv6 hashable rt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable rt tbl should
+ * be copied to
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable rt tbl should
+ * be copied to
+ */
+struct ipahal_imm_cmd_ip_v6_routing_init {
+ u64 hash_rules_addr;
+ u32 hash_rules_size;
+ u32 hash_local_addr;
+ u64 nhash_rules_addr;
+ u32 nhash_rules_size;
+ u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_hdr_init_local - HDR_INIT_LOCAL cmd payload
+ * Inits hdr table within local mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in sys mem where the table starts (SRC)
+ * @size_hdr_table: Size of the above (in bytes)
+ * @hdr_addr: header address in IPA sram (used as DST for memory copy)
+ * @rsvd: reserved
+ */
+struct ipahal_imm_cmd_hdr_init_local {
+ u64 hdr_table_addr;
+ u32 size_hdr_table;
+ u32 hdr_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_hdr_init_system - HDR_INIT_SYSTEM cmd payload
+ * Inits hdr table within sys mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in system memory where the hdrs tbl starts.
+ */
+struct ipahal_imm_cmd_hdr_init_system {
+ u64 hdr_table_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_nat_dma - NAT_DMA cmd payload
+ * Perform DMA operation on NAT related mem addressess. Copy data into
+ * different locations within NAT associated tbls. (For add/remove NAT rules)
+ * @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op.
+ * @base_addr: Base addr to which the DMA operation should be performed.
+ * @offset: offset in bytes from base addr to write 'data' to
+ * @data: data to be written
+ */
+struct ipahal_imm_cmd_nat_dma {
+ u8 table_index;
+ u8 base_addr;
+ u32 offset;
+ u16 data;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_packet_init - IP_PACKET_INIT cmd payload
+ * Configuration for specific IP pkt. Shall be called prior to an IP pkt
+ * data. Pkt will not go through IP pkt processing.
+ * @destination_pipe_index: Destination pipe index (in case routing
+ * is enabled, this field will overwrite the rt rule)
+ */
+struct ipahal_imm_cmd_ip_packet_init {
+ u32 destination_pipe_index;
+};
+
+/*
+ * enum ipa_pipeline_clear_option - Values for pipeline clear waiting options
+ * @IPAHAL_HPS_CLEAR: Wait for HPS clear. All queues except high priority queue
+ * shall not be serviced until HPS is clear of packets or immediate commands.
+ * The high priority Rx queue / Q6ZIP group shall still be serviced normally.
+ *
+ * @IPAHAL_SRC_GRP_CLEAR: Wait for originating source group to be clear
+ * (for no packet contexts allocated to the originating source group).
+ * The source group / Rx queue shall not be serviced until all previously
+ * allocated packet contexts are released. All other source groups/queues shall
+ * be serviced normally.
+ *
+ * @IPAHAL_FULL_PIPELINE_CLEAR: Wait for full pipeline to be clear.
+ * All groups / Rx queues shall not be serviced until IPA pipeline is fully
+ * clear. This should be used for debug only.
+ */
+enum ipahal_pipeline_clear_option {
+ IPAHAL_HPS_CLEAR,
+ IPAHAL_SRC_GRP_CLEAR,
+ IPAHAL_FULL_PIPELINE_CLEAR
+};
+
+/*
+ * struct ipahal_imm_cmd_register_write - REGISTER_WRITE cmd payload
+ * Write value to register. Allows reg changes to be synced with data packet
+ * and other immediate commands. Can be used to access the sram
+ * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
+ * @value: value to write to register
+ * @value_mask: mask specifying which value bits to write to the register
+ * @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipeline_clear_option: options for pipeline clear waiting
+ */
+struct ipahal_imm_cmd_register_write {
+ u32 offset;
+ u32 value;
+ u32 value_mask;
+ bool skip_pipeline_clear;
+ enum ipahal_pipeline_clear_option pipeline_clear_options;
+};
+
+/*
+ * struct ipahal_imm_cmd_dma_shared_mem - DMA_SHARED_MEM cmd payload
+ * Perform mem copy into or out of the SW area of IPA local mem
+ * @size: Size in bytes of data to copy. Expected size is up to 2K bytes
+ * @local_addr: Address in IPA local memory
+ * @is_read: Read operation from local memory? If not, then write.
+ * @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipeline_clear_option: options for pipeline clear waiting
+ * @system_addr: Address in system memory
+ */
+struct ipahal_imm_cmd_dma_shared_mem {
+ u32 size;
+ u32 local_addr;
+ bool is_read;
+ bool skip_pipeline_clear;
+ enum ipahal_pipeline_clear_option pipeline_clear_options;
+ u64 system_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_packet_tag_status - IP_PACKET_TAG_STATUS cmd payload
+ * This cmd is used for to allow SW to track HW processing by setting a TAG
+ * value that is passed back to SW inside Packet Status information.
+ * TAG info will be provided as part of Packet Status info generated for
+ * the next pkt transferred over the pipe.
+ * This immediate command must be followed by a packet in the same transfer.
+ * @tag: Tag that is provided back to SW
+ */
+struct ipahal_imm_cmd_ip_packet_tag_status {
+ u64 tag;
+};
+
+/*
+ * struct ipahal_imm_cmd_dma_task_32b_addr - IPA_DMA_TASK_32B_ADDR cmd payload
+ * Used by clients using 32bit addresses. Used to perform DMA operation on
+ * multiple descriptors.
+ * The Opcode is dynamic, where it holds the number of buffer to process
+ * @cmplt: Complete flag: If true, IPA interrupt SW when the entire
+ * DMA related data was completely xfered to its destination.
+ * @eof: Enf Of Frame flag: If true, IPA assert the EOT to the
+ * dest client. This is used used for aggr sequence
+ * @flsh: Flush flag: If true pkt will go through the IPA blocks but
+ * will not be xfered to dest client but rather will be discarded
+ * @lock: Lock pipe flag: If true, IPA will stop processing descriptors
+ * from other EPs in the same src grp (RX queue)
+ * @unlock: Unlock pipe flag: If true, IPA will stop exclusively
+ * servicing current EP out of the src EPs of the grp (RX queue)
+ * @size1: Size of buffer1 data
+ * @addr1: Pointer to buffer1 data
+ * @packet_size: Total packet size. If a pkt send using multiple DMA_TASKs,
+ * only the first one needs to have this field set. It will be ignored
+ * in subsequent DMA_TASKs until the packet ends (EOT). First DMA_TASK
+ * must contain this field (2 or more buffers) or EOT.
+ */
+struct ipahal_imm_cmd_dma_task_32b_addr {
+ bool cmplt;
+ bool eof;
+ bool flsh;
+ bool lock;
+ bool unlock;
+ u32 size1;
+ u32 addr1;
+ u32 packet_size;
+};
+
+/*
+ * struct ipahal_imm_cmd_pyld - Immediate cmd payload information
+ * @len: length of the buffer
+ * @data: buffer contains the immediate command payload. Buffer goes
+ * back to back with this structure
+ */
+struct ipahal_imm_cmd_pyld {
+ u16 len;
+ u8 data[0];
+};
+
+
+/* Immediate command Function APIs */
+
+/*
+ * ipahal_imm_cmd_name_str() - returns string that represent the imm cmd
+ * @cmd_name: [in] Immediate command name
+ */
+const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name);
+
+/*
+ * ipahal_imm_cmd_get_opcode() - Get the fixed opcode of the immediate command
+ */
+u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd);
+
+/*
+ * ipahal_imm_cmd_get_opcode_param() - Get the opcode of an immediate command
+ * that supports dynamic opcode
+ * Some commands opcode are not totaly fixed, but part of it is
+ * a supplied parameter. E.g. Low-Byte is fixed and Hi-Byte
+ * is a given parameter.
+ * This API will return the composed opcode of the command given
+ * the parameter
+ * Note: Use this API only for immediate comamnds that support Dynamic Opcode
+ */
+u16 ipahal_imm_cmd_get_opcode_param(enum ipahal_imm_cmd_name cmd, int param);
+
+/*
+ * ipahal_construct_imm_cmd() - Construct immdiate command
+ * This function builds imm cmd bulk that can be be sent to IPA
+ * The command will be allocated dynamically.
+ * After done using it, call ipahal_destroy_imm_cmd() to release it
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_imm_cmd(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx);
+
+/*
+ * ipahal_construct_nop_imm_cmd() - Construct immediate comamnd for NO-Op
+ * Core driver may want functionality to inject NOP commands to IPA
+ * to ensure e.g., PIPLINE clear before someother operation.
+ * The functionality given by this function can be reached by
+ * ipahal_construct_imm_cmd(). This function is helper to the core driver
+ * to reach this NOP functionlity easily.
+ * @skip_pipline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipline_clr_opt: options for pipeline clear waiting
+ * @is_atomic_ctx: is called in atomic context or can sleep?
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd(
+ bool skip_pipline_clear,
+ enum ipahal_pipeline_clear_option pipline_clr_opt,
+ bool is_atomic_ctx);
+
+/*
+ * ipahal_destroy_imm_cmd() - Destroy/Release bulk that was built
+ * by the construction functions
+ */
+static inline void ipahal_destroy_imm_cmd(struct ipahal_imm_cmd_pyld *pyld)
+{
+ kfree(pyld);
+}
+
+
+/* IPA Status packet Structures and Function APIs */
+
+/*
+ * enum ipahal_pkt_status_opcode - Packet Status Opcode
+ * @IPAHAL_STATUS_OPCODE_PACKET_2ND_PASS: Packet Status generated as part of
+ * IPA second processing pass for a packet (i.e. IPA XLAT processing for
+ * the translated packet).
+ */
+enum ipahal_pkt_status_opcode {
+ IPAHAL_PKT_STATUS_OPCODE_PACKET = 0,
+ IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE,
+ IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET,
+ IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET,
+ IPAHAL_PKT_STATUS_OPCODE_LOG,
+ IPAHAL_PKT_STATUS_OPCODE_DCMP,
+ IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS,
+};
+
+/*
+ * enum ipahal_pkt_status_exception - Packet Status exception type
+ * @IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH: formerly IHL exception.
+ *
+ * Note: IPTYPE, PACKET_LENGTH and PACKET_THRESHOLD exceptions means that
+ * partial / no IP processing took place and corresponding Status Mask
+ * fields should be ignored. Flt and rt info is not valid.
+ *
+ * NOTE:: Any change to this enum, need to change to
+ * ipahal_pkt_status_exception_to_str array as well.
+ */
+enum ipahal_pkt_status_exception {
+ IPAHAL_PKT_STATUS_EXCEPTION_NONE = 0,
+ IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR,
+ IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE,
+ IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH,
+ IPAHAL_PKT_STATUS_EXCEPTION_PACKET_THRESHOLD,
+ IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS,
+ IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT,
+ IPAHAL_PKT_STATUS_EXCEPTION_NAT,
+ IPAHAL_PKT_STATUS_EXCEPTION_MAX,
+};
+
+/*
+ * enum ipahal_pkt_status_mask - Packet Status bitmask shift values of
+ * the contained flags. This bitmask indicates flags on the properties of
+ * the packet as well as IPA processing it may had.
+ * @FRAG_PROCESS: Frag block processing flag: Was pkt processed by frag block?
+ * Also means the frag info is valid unless exception or first frag
+ * @FILT_PROCESS: Flt block processing flag: Was pkt processed by flt block?
+ * Also means that flt info is valid.
+ * @NAT_PROCESS: NAT block processing flag: Was pkt processed by NAT block?
+ * Also means that NAT info is valid, unless exception.
+ * @ROUTE_PROCESS: Rt block processing flag: Was pkt processed by rt block?
+ * Also means that rt info is valid, unless exception.
+ * @TAG_VALID: Flag specifying if TAG and TAG info valid?
+ * @FRAGMENT: Flag specifying if pkt is IP fragment.
+ * @FIRST_FRAGMENT: Flag specifying if pkt is first fragment. In this case, frag
+ * info is invalid
+ * @V4: Flag specifying pkt is IPv4 or IPv6
+ * @CKSUM_PROCESS: CSUM block processing flag: Was pkt processed by csum block?
+ * If so, csum trailer exists
+ * @AGGR_PROCESS: Aggr block processing flag: Was pkt processed by aggr block?
+ * @DEST_EOT: Flag specifying if EOT was asserted for the pkt on dest endp
+ * @DEAGGR_PROCESS: Deaggr block processing flag: Was pkt processed by deaggr
+ * block?
+ * @DEAGG_FIRST: Flag specifying if this is the first pkt in deaggr frame
+ * @SRC_EOT: Flag specifying if EOT asserted by src endp when sending the buffer
+ * @PREV_EOT: Flag specifying if EOT was sent just before the pkt as part of
+ * aggr hard-byte-limit
+ * @BYTE_LIMIT: Flag specifying if pkt is over a configured byte limit.
+ */
+enum ipahal_pkt_status_mask {
+ IPAHAL_PKT_STATUS_MASK_FRAG_PROCESS_SHFT = 0,
+ IPAHAL_PKT_STATUS_MASK_FILT_PROCESS_SHFT,
+ IPAHAL_PKT_STATUS_MASK_NAT_PROCESS_SHFT,
+ IPAHAL_PKT_STATUS_MASK_ROUTE_PROCESS_SHFT,
+ IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT,
+ IPAHAL_PKT_STATUS_MASK_FRAGMENT_SHFT,
+ IPAHAL_PKT_STATUS_MASK_FIRST_FRAGMENT_SHFT,
+ IPAHAL_PKT_STATUS_MASK_V4_SHFT,
+ IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT,
+ IPAHAL_PKT_STATUS_MASK_AGGR_PROCESS_SHFT,
+ IPAHAL_PKT_STATUS_MASK_DEST_EOT_SHFT,
+ IPAHAL_PKT_STATUS_MASK_DEAGGR_PROCESS_SHFT,
+ IPAHAL_PKT_STATUS_MASK_DEAGG_FIRST_SHFT,
+ IPAHAL_PKT_STATUS_MASK_SRC_EOT_SHFT,
+ IPAHAL_PKT_STATUS_MASK_PREV_EOT_SHFT,
+ IPAHAL_PKT_STATUS_MASK_BYTE_LIMIT_SHFT,
+};
+
+/*
+ * Returns boolean value representing a property of the a packet.
+ * @__flag_shft: The shift value of the flag of the status bitmask of
+ * @__status: Pointer to abstracrted status structure
+ * the needed property. See enum ipahal_pkt_status_mask
+ */
+#define IPAHAL_PKT_STATUS_MASK_FLAG_VAL(__flag_shft, __status) \
+ (((__status)->status_mask) & ((u32)0x1<<(__flag_shft)) ? true : false)
+
+/*
+ * enum ipahal_pkt_status_nat_type - Type of NAT
+ */
+enum ipahal_pkt_status_nat_type {
+ IPAHAL_PKT_STATUS_NAT_NONE,
+ IPAHAL_PKT_STATUS_NAT_SRC,
+ IPAHAL_PKT_STATUS_NAT_DST,
+};
+
+/*
+ * struct ipahal_pkt_status - IPA status packet abstracted payload.
+ * This structure describes the status packet fields for the
+ * following statuses: IPA_STATUS_PACKET, IPA_STATUS_DROPPED_PACKET,
+ * IPA_STATUS_SUSPENDED_PACKET.
+ * Other statuses types has different status packet structure.
+ * @status_opcode: The Type of the status (Opcode).
+ * @exception: The first exception that took place.
+ * In case of exception, src endp and pkt len are always valid.
+ * @status_mask: Bit mask for flags on several properties on the packet
+ * and processing it may passed at IPA. See enum ipahal_pkt_status_mask
+ * @pkt_len: Pkt pyld len including hdr and retained hdr if used. Does
+ * not include padding or checksum trailer len.
+ * @endp_src_idx: Source end point index.
+ * @endp_dest_idx: Destination end point index.
+ * Not valid in case of exception
+ * @metadata: meta data value used by packet
+ * @flt_local: Filter table location flag: Does matching flt rule belongs to
+ * flt tbl that resides in lcl memory? (if not, then system mem)
+ * @flt_hash: Filter hash hit flag: Does matching flt rule was in hash tbl?
+ * @flt_global: Global filter rule flag: Does matching flt rule belongs to
+ * the global flt tbl? (if not, then the per endp tables)
+ * @flt_ret_hdr: Retain header in filter rule flag: Does matching flt rule
+ * specifies to retain header?
+ * @flt_miss: Filtering miss flag: Was their a filtering rule miss?
+ * In case of miss, all flt info to be ignored
+ * @flt_rule_id: The ID of the matching filter rule (if no miss).
+ * This info can be combined with endp_src_idx to locate the exact rule.
+ * @rt_local: Route table location flag: Does matching rt rule belongs to
+ * rt tbl that resides in lcl memory? (if not, then system mem)
+ * @rt_hash: Route hash hit flag: Does matching rt rule was in hash tbl?
+ * @ucp: UC Processing flag
+ * @rt_tbl_idx: Index of rt tbl that contains the rule on which was a match
+ * @rt_miss: Routing miss flag: Was their a routing rule miss?
+ * @rt_rule_id: The ID of the matching rt rule. (if no miss). This info
+ * can be combined with rt_tbl_idx to locate the exact rule.
+ * @nat_hit: NAT hit flag: Was their NAT hit?
+ * @nat_entry_idx: Index of the NAT entry used of NAT processing
+ * @nat_type: Defines the type of the NAT operation:
+ * @tag_info: S/W defined value provided via immediate command
+ * @seq_num: Per source endp unique packet sequence number
+ * @time_of_day_ctr: running counter from IPA clock
+ * @hdr_local: Header table location flag: In header insertion, was the header
+ * taken from the table resides in local memory? (If no, then system mem)
+ * @hdr_offset: Offset of used header in the header table
+ * @frag_hit: Frag hit flag: Was their frag rule hit in H/W frag table?
+ * @frag_rule: Frag rule index in H/W frag table in case of frag hit
+ */
+struct ipahal_pkt_status {
+ enum ipahal_pkt_status_opcode status_opcode;
+ enum ipahal_pkt_status_exception exception;
+ u32 status_mask;
+ u32 pkt_len;
+ u8 endp_src_idx;
+ u8 endp_dest_idx;
+ u32 metadata;
+ bool flt_local;
+ bool flt_hash;
+ bool flt_global;
+ bool flt_ret_hdr;
+ bool flt_miss;
+ u16 flt_rule_id;
+ bool rt_local;
+ bool rt_hash;
+ bool ucp;
+ u8 rt_tbl_idx;
+ bool rt_miss;
+ u16 rt_rule_id;
+ bool nat_hit;
+ u16 nat_entry_idx;
+ enum ipahal_pkt_status_nat_type nat_type;
+ u64 tag_info;
+ u8 seq_num;
+ u32 time_of_day_ctr;
+ bool hdr_local;
+ u16 hdr_offset;
+ bool frag_hit;
+ u8 frag_rule;
+};
+
+/*
+ * ipahal_pkt_status_get_size() - Get H/W size of packet status
+ */
+u32 ipahal_pkt_status_get_size(void);
+
+/*
+ * ipahal_pkt_status_parse() - Parse Packet Status payload to abstracted form
+ * @unparsed_status: Pointer to H/W format of the packet status as read from H/W
+ * @status: Pointer to pre-allocated buffer where the parsed info will be stored
+ */
+void ipahal_pkt_status_parse(const void *unparsed_status,
+ struct ipahal_pkt_status *status);
+
+/*
+ * ipahal_pkt_status_exception_str() - returns string represents exception type
+ * @exception: [in] The exception type
+ */
+const char *ipahal_pkt_status_exception_str(
+ enum ipahal_pkt_status_exception exception);
+
+/*
+ * ipahal_cp_hdr_to_hw_buff() - copy header to hardware buffer according to
+ * base address and offset given.
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr: the header to be copied
+ * @hdr_len: the length of the header
+ */
+void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *hdr, u32 hdr_len);
+
+/*
+ * ipahal_cp_proc_ctx_to_hw_buff() - copy processing context to
+ * base address and offset given.
+ * @type: type of header processing context
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr_len: the length of the header
+ * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
+ * @phys_base: memory location in DDR
+ * @hdr_base_addr: base address in table
+ * @offset_entry: offset from hdr_base_addr in table
+ */
+int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
+ void *base, u32 offset, u32 hdr_len,
+ bool is_hdr_proc_ctx, dma_addr_t phys_base,
+ u32 hdr_base_addr,
+ struct ipa_hdr_offset_entry *offset_entry);
+
+/*
+ * ipahal_get_proc_ctx_needed_len() - calculates the needed length for addition
+ * of header processing context according to the type of processing context
+ * @type: header processing context type (no processing context,
+ * IPA_HDR_PROC_ETHII_TO_ETHII etc.)
+ */
+int ipahal_get_proc_ctx_needed_len(enum ipa_hdr_proc_type type);
+
+int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base,
+ struct device *ipa_pdev);
+void ipahal_destroy(void);
+void ipahal_free_dma_mem(struct ipa_mem_buffer *mem);
+
+#endif /* _IPAHAL_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
new file mode 100644
index 0000000..e355d9d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -0,0 +1,3200 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipc_logging.h>
+#include <linux/debugfs.h>
+#include <linux/ipa.h>
+#include "ipahal.h"
+#include "ipahal_fltrt.h"
+#include "ipahal_fltrt_i.h"
+#include "ipahal_i.h"
+#include "../../ipa_common_i.h"
+
+/*
+ * struct ipahal_fltrt_obj - Flt/Rt H/W information for specific IPA version
+ * @support_hash: Is hashable tables supported
+ * @tbl_width: Width of table in bytes
+ * @sysaddr_alignment: System table address alignment
+ * @lcladdr_alignment: Local table offset alignment
+ * @blk_sz_alignment: Rules block size alignment
+ * @rule_start_alignment: Rule start address alignment
+ * @tbl_hdr_width: Width of the header structure in bytes
+ * @tbl_addr_mask: Masking for Table address
+ * @rule_max_prio: Max possible priority of a rule
+ * @rule_min_prio: Min possible priority of a rule
+ * @low_rule_id: Low value of Rule ID that can be used
+ * @rule_id_bit_len: Rule is high (MSB) bit len
+ * @rule_buf_size: Max size rule may utilize.
+ * @write_val_to_hdr: Write address or offset to header entry
+ * @create_flt_bitmap: Create bitmap in H/W format using given bitmap
+ * @create_tbl_addr: Given raw table address, create H/W formated one
+ * @parse_tbl_addr: Parse the given H/W address (hdr format)
+ * @rt_generate_hw_rule: Generate RT rule in H/W format
+ * @flt_generate_hw_rule: Generate FLT rule in H/W format
+ * @flt_generate_eq: Generate flt equation attributes from rule attributes
+ * @rt_parse_hw_rule: Parse rt rule read from H/W
+ * @flt_parse_hw_rule: Parse flt rule read from H/W
+ * @eq_bitfield: Array of the bit fields of the support equations
+ */
+struct ipahal_fltrt_obj {
+ bool support_hash;
+ u32 tbl_width;
+ u32 sysaddr_alignment;
+ u32 lcladdr_alignment;
+ u32 blk_sz_alignment;
+ u32 rule_start_alignment;
+ u32 tbl_hdr_width;
+ u32 tbl_addr_mask;
+ int rule_max_prio;
+ int rule_min_prio;
+ u32 low_rule_id;
+ u32 rule_id_bit_len;
+ u32 rule_buf_size;
+ u8* (*write_val_to_hdr)(u64 val, u8 *hdr);
+ u64 (*create_flt_bitmap)(u64 ep_bitmap);
+ u64 (*create_tbl_addr)(bool is_sys, u64 addr);
+ void (*parse_tbl_addr)(u64 hwaddr, u64 *addr, bool *is_sys);
+ int (*rt_generate_hw_rule)(struct ipahal_rt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf);
+ int (*flt_generate_hw_rule)(struct ipahal_flt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf);
+ int (*flt_generate_eq)(enum ipa_ip_type ipt,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb);
+ int (*rt_parse_hw_rule)(u8 *addr, struct ipahal_rt_rule_entry *rule);
+ int (*flt_parse_hw_rule)(u8 *addr, struct ipahal_flt_rule_entry *rule);
+ u8 eq_bitfield[IPA_EQ_MAX];
+};
+
+
+static u64 ipa_fltrt_create_flt_bitmap(u64 ep_bitmap)
+{
+ /* At IPA3, there global configuration is possible but not used */
+ return (ep_bitmap << 1) & ~0x1;
+}
+
+static u64 ipa_fltrt_create_tbl_addr(bool is_sys, u64 addr)
+{
+ if (is_sys) {
+ if (addr & IPA3_0_HW_TBL_SYSADDR_ALIGNMENT) {
+ IPAHAL_ERR(
+ "sys addr is not aligned accordingly addr=0x%pad\n",
+ &addr);
+ ipa_assert();
+ return 0;
+ }
+ } else {
+ if (addr & IPA3_0_HW_TBL_LCLADDR_ALIGNMENT) {
+ IPAHAL_ERR("addr/ofst isn't lcl addr aligned %llu\n",
+ addr);
+ ipa_assert();
+ return 0;
+ }
+ /*
+ * for local tables (at sram) offsets is used as tables
+ * addresses. offset need to be in 8B units
+ * (local address aligned) and left shifted to its place.
+ * Local bit need to be enabled.
+ */
+ addr /= IPA3_0_HW_TBL_LCLADDR_ALIGNMENT + 1;
+ addr *= IPA3_0_HW_TBL_ADDR_MASK + 1;
+ addr += 1;
+ }
+
+ return addr;
+}
+
+static void ipa_fltrt_parse_tbl_addr(u64 hwaddr, u64 *addr, bool *is_sys)
+{
+ IPAHAL_DBG("Parsing hwaddr 0x%llx\n", hwaddr);
+
+ *is_sys = !(hwaddr & 0x1);
+ hwaddr &= (~0ULL - 1);
+ if (hwaddr & IPA3_0_HW_TBL_SYSADDR_ALIGNMENT) {
+ IPAHAL_ERR(
+ "sys addr is not aligned accordingly addr=0x%pad\n",
+ &hwaddr);
+ ipa_assert();
+ return;
+ }
+
+ if (!*is_sys) {
+ hwaddr /= IPA3_0_HW_TBL_ADDR_MASK + 1;
+ hwaddr *= IPA3_0_HW_TBL_LCLADDR_ALIGNMENT + 1;
+ }
+
+ *addr = hwaddr;
+}
+
+/* Update these tables of the number of equations changes */
+static const int ipa3_0_ofst_meq32[] = { IPA_OFFSET_MEQ32_0,
+ IPA_OFFSET_MEQ32_1};
+static const int ipa3_0_ofst_meq128[] = { IPA_OFFSET_MEQ128_0,
+ IPA_OFFSET_MEQ128_1};
+static const int ipa3_0_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0,
+ IPA_IHL_OFFSET_RANGE16_1};
+static const int ipa3_0_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0,
+ IPA_IHL_OFFSET_MEQ32_1};
+
+static int ipa_fltrt_generate_hw_rule_bdy(enum ipa_ip_type ipt,
+ const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule);
+static int ipa_fltrt_generate_hw_rule_bdy_from_eq(
+ const struct ipa_ipfltri_rule_eq *attrib, u8 **buf);
+static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb);
+static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb);
+static int ipa_flt_generate_eq(enum ipa_ip_type ipt,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb);
+static int ipa_rt_parse_hw_rule(u8 *addr,
+ struct ipahal_rt_rule_entry *rule);
+static int ipa_flt_parse_hw_rule(u8 *addr,
+ struct ipahal_flt_rule_entry *rule);
+
+#define IPA_IS_RAN_OUT_OF_EQ(__eq_array, __eq_index) \
+ (ARRAY_SIZE(__eq_array) <= (__eq_index))
+
+#define IPA_GET_RULE_EQ_BIT_PTRN(__eq) \
+ (BIT(ipahal_fltrt_objs[ipahal_ctx->hw_type].eq_bitfield[(__eq)]))
+
+/*
+ * ipa_fltrt_rule_generation_err_check() - check basic validity on the rule
+ * attribs before starting building it
+ * checks if not not using ipv4 attribs on ipv6 and vice-versa
+ * @ip: IP address type
+ * @attrib: IPA rule attribute
+ *
+ * Return: 0 on success, -EPERM on failure
+ */
+static int ipa_fltrt_rule_generation_err_check(
+ enum ipa_ip_type ipt, const struct ipa_rule_attrib *attrib)
+{
+ if (ipt == IPA_IP_v4) {
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
+ attrib->attrib_mask & IPA_FLT_TC ||
+ attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+ IPAHAL_ERR("v6 attrib's specified for v4 rule\n");
+ return -EPERM;
+ }
+ } else if (ipt == IPA_IP_v6) {
+ if (attrib->attrib_mask & IPA_FLT_TOS ||
+ attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+ IPAHAL_ERR("v4 attrib's specified for v6 rule\n");
+ return -EPERM;
+ }
+ } else {
+ IPAHAL_ERR("unsupported ip %d\n", ipt);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int ipa_rt_gen_hw_rule(struct ipahal_rt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf)
+{
+ struct ipa3_0_rt_rule_hw_hdr *rule_hdr;
+ u8 *start;
+ u16 en_rule = 0;
+
+ start = buf;
+ rule_hdr = (struct ipa3_0_rt_rule_hw_hdr *)buf;
+
+ ipa_assert_on(params->dst_pipe_idx & ~0x1F);
+ rule_hdr->u.hdr.pipe_dest_idx = params->dst_pipe_idx;
+ switch (params->hdr_type) {
+ case IPAHAL_RT_RULE_HDR_PROC_CTX:
+ rule_hdr->u.hdr.system = !params->hdr_lcl;
+ rule_hdr->u.hdr.proc_ctx = 1;
+ ipa_assert_on(params->hdr_ofst & 31);
+ rule_hdr->u.hdr.hdr_offset = (params->hdr_ofst) >> 5;
+ break;
+ case IPAHAL_RT_RULE_HDR_RAW:
+ rule_hdr->u.hdr.system = !params->hdr_lcl;
+ rule_hdr->u.hdr.proc_ctx = 0;
+ ipa_assert_on(params->hdr_ofst & 3);
+ rule_hdr->u.hdr.hdr_offset = (params->hdr_ofst) >> 2;
+ break;
+ case IPAHAL_RT_RULE_HDR_NONE:
+ rule_hdr->u.hdr.system = !params->hdr_lcl;
+ rule_hdr->u.hdr.proc_ctx = 0;
+ rule_hdr->u.hdr.hdr_offset = 0;
+ break;
+ default:
+ IPAHAL_ERR("Invalid HDR type %d\n", params->hdr_type);
+ WARN_ON(1);
+ return -EINVAL;
+ };
+
+ ipa_assert_on(params->priority & ~0x3FF);
+ rule_hdr->u.hdr.priority = params->priority;
+ rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0;
+ ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+ ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+ rule_hdr->u.hdr.rule_id = params->id;
+
+ buf += sizeof(struct ipa3_0_rt_rule_hw_hdr);
+
+ if (ipa_fltrt_generate_hw_rule_bdy(params->ipt, ¶ms->rule->attrib,
+ &buf, &en_rule)) {
+ IPAHAL_ERR("fail to generate hw rule\n");
+ return -EPERM;
+ }
+ rule_hdr->u.hdr.en_rule = en_rule;
+
+ IPAHAL_DBG("en_rule 0x%x\n", en_rule);
+ ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
+
+ if (*hw_len == 0) {
+ *hw_len = buf - start;
+ } else if (*hw_len != (buf - start)) {
+ IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n",
+ *hw_len, (buf - start));
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int ipa_flt_gen_hw_rule(struct ipahal_flt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf)
+{
+ struct ipa3_0_flt_rule_hw_hdr *rule_hdr;
+ u8 *start;
+ u16 en_rule = 0;
+
+ start = buf;
+ rule_hdr = (struct ipa3_0_flt_rule_hw_hdr *)buf;
+
+ switch (params->rule->action) {
+ case IPA_PASS_TO_ROUTING:
+ rule_hdr->u.hdr.action = 0x0;
+ break;
+ case IPA_PASS_TO_SRC_NAT:
+ rule_hdr->u.hdr.action = 0x1;
+ break;
+ case IPA_PASS_TO_DST_NAT:
+ rule_hdr->u.hdr.action = 0x2;
+ break;
+ case IPA_PASS_TO_EXCEPTION:
+ rule_hdr->u.hdr.action = 0x3;
+ break;
+ default:
+ IPAHAL_ERR("Invalid Rule Action %d\n", params->rule->action);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ ipa_assert_on(params->rt_tbl_idx & ~0x1F);
+ rule_hdr->u.hdr.rt_tbl_idx = params->rt_tbl_idx;
+ rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0;
+ rule_hdr->u.hdr.rsvd1 = 0;
+ rule_hdr->u.hdr.rsvd2 = 0;
+ rule_hdr->u.hdr.rsvd3 = 0;
+
+ ipa_assert_on(params->priority & ~0x3FF);
+ rule_hdr->u.hdr.priority = params->priority;
+ ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+ ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+ rule_hdr->u.hdr.rule_id = params->id;
+
+ buf += sizeof(struct ipa3_0_flt_rule_hw_hdr);
+
+ if (params->rule->eq_attrib_type) {
+ if (ipa_fltrt_generate_hw_rule_bdy_from_eq(
+ ¶ms->rule->eq_attrib, &buf)) {
+ IPAHAL_ERR("fail to generate hw rule from eq\n");
+ return -EPERM;
+ }
+ en_rule = params->rule->eq_attrib.rule_eq_bitmap;
+ } else {
+ if (ipa_fltrt_generate_hw_rule_bdy(params->ipt,
+ ¶ms->rule->attrib, &buf, &en_rule)) {
+ IPAHAL_ERR("fail to generate hw rule\n");
+ return -EPERM;
+ }
+ }
+ rule_hdr->u.hdr.en_rule = en_rule;
+
+ IPAHAL_DBG("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n",
+ en_rule,
+ rule_hdr->u.hdr.action,
+ rule_hdr->u.hdr.rt_tbl_idx,
+ rule_hdr->u.hdr.retain_hdr);
+ IPAHAL_DBG("priority=%d, rule_id=%d\n",
+ rule_hdr->u.hdr.priority,
+ rule_hdr->u.hdr.rule_id);
+
+ ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
+
+ if (*hw_len == 0) {
+ *hw_len = buf - start;
+ } else if (*hw_len != (buf - start)) {
+ IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n",
+ *hw_len, (buf - start));
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+/*
+ * This array contains the FLT/RT info for IPAv3 and later.
+ * All the information on IPAv3 are statically defined below.
+ * If information is missing regarding on some IPA version,
+ * the init function will fill it with the information from the previous
+ * IPA version.
+ * Information is considered missing if all of the fields are 0.
+ */
+static struct ipahal_fltrt_obj ipahal_fltrt_objs[IPA_HW_MAX] = {
+ /* IPAv3 */
+ [IPA_HW_v3_0] = {
+ true,
+ IPA3_0_HW_TBL_WIDTH,
+ IPA3_0_HW_TBL_SYSADDR_ALIGNMENT,
+ IPA3_0_HW_TBL_LCLADDR_ALIGNMENT,
+ IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT,
+ IPA3_0_HW_RULE_START_ALIGNMENT,
+ IPA3_0_HW_TBL_HDR_WIDTH,
+ IPA3_0_HW_TBL_ADDR_MASK,
+ IPA3_0_RULE_MAX_PRIORITY,
+ IPA3_0_RULE_MIN_PRIORITY,
+ IPA3_0_LOW_RULE_ID,
+ IPA3_0_RULE_ID_BIT_LEN,
+ IPA3_0_HW_RULE_BUF_SIZE,
+ ipa_write_64,
+ ipa_fltrt_create_flt_bitmap,
+ ipa_fltrt_create_tbl_addr,
+ ipa_fltrt_parse_tbl_addr,
+ ipa_rt_gen_hw_rule,
+ ipa_flt_gen_hw_rule,
+ ipa_flt_generate_eq,
+ ipa_rt_parse_hw_rule,
+ ipa_flt_parse_hw_rule,
+ {
+ [IPA_TOS_EQ] = 0,
+ [IPA_PROTOCOL_EQ] = 1,
+ [IPA_TC_EQ] = 2,
+ [IPA_OFFSET_MEQ128_0] = 3,
+ [IPA_OFFSET_MEQ128_1] = 4,
+ [IPA_OFFSET_MEQ32_0] = 5,
+ [IPA_OFFSET_MEQ32_1] = 6,
+ [IPA_IHL_OFFSET_MEQ32_0] = 7,
+ [IPA_IHL_OFFSET_MEQ32_1] = 8,
+ [IPA_METADATA_COMPARE] = 9,
+ [IPA_IHL_OFFSET_RANGE16_0] = 10,
+ [IPA_IHL_OFFSET_RANGE16_1] = 11,
+ [IPA_IHL_OFFSET_EQ_32] = 12,
+ [IPA_IHL_OFFSET_EQ_16] = 13,
+ [IPA_FL_EQ] = 14,
+ [IPA_IS_FRAG] = 15,
+ },
+ },
+};
+
+static int ipa_flt_generate_eq(enum ipa_ip_type ipt,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+ if (ipa_fltrt_rule_generation_err_check(ipt, attrib))
+ return -EPERM;
+
+ if (ipt == IPA_IP_v4) {
+ if (ipa_flt_generate_eq_ip4(ipt, attrib, eq_atrb)) {
+ IPAHAL_ERR("failed to build ipv4 flt eq rule\n");
+ return -EPERM;
+ }
+ } else if (ipt == IPA_IP_v6) {
+ if (ipa_flt_generate_eq_ip6(ipt, attrib, eq_atrb)) {
+ IPAHAL_ERR("failed to build ipv6 flt eq rule\n");
+ return -EPERM;
+ }
+ } else {
+ IPAHAL_ERR("unsupported ip %d\n", ipt);
+ return -EPERM;
+ }
+
+ /*
+ * default "rule" means no attributes set -> map to
+ * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
+ */
+ if (attrib->attrib_mask == 0) {
+ eq_atrb->rule_eq_bitmap = 0;
+ eq_atrb->rule_eq_bitmap |= IPA_GET_RULE_EQ_BIT_PTRN(
+ IPA_OFFSET_MEQ32_0);
+ eq_atrb->offset_meq_32[0].offset = 0;
+ eq_atrb->offset_meq_32[0].mask = 0;
+ eq_atrb->offset_meq_32[0].value = 0;
+ }
+
+ return 0;
+}
+
+static void ipa_fltrt_generate_mac_addr_hw_rule(u8 **extra, u8 **rest,
+ u8 hdr_mac_addr_offset,
+ const uint8_t mac_addr_mask[ETH_ALEN],
+ const uint8_t mac_addr[ETH_ALEN])
+{
+ int i;
+
+ *extra = ipa_write_8(hdr_mac_addr_offset, *extra);
+
+ /* LSB MASK and ADDR */
+ *rest = ipa_write_64(0, *rest);
+ *rest = ipa_write_64(0, *rest);
+
+ /* MSB MASK and ADDR */
+ *rest = ipa_write_16(0, *rest);
+ for (i = 5; i >= 0; i--)
+ *rest = ipa_write_8(mac_addr_mask[i], *rest);
+ *rest = ipa_write_16(0, *rest);
+ for (i = 5; i >= 0; i--)
+ *rest = ipa_write_8(mac_addr[i], *rest);
+}
+
+static int ipa_fltrt_generate_hw_rule_bdy_ip4(u16 *en_rule,
+ const struct ipa_rule_attrib *attrib,
+ u8 **extra_wrds, u8 **rest_wrds)
+{
+ u8 *extra = *extra_wrds;
+ u8 *rest = *rest_wrds;
+ u8 ofst_meq32 = 0;
+ u8 ihl_ofst_rng16 = 0;
+ u8 ihl_ofst_meq32 = 0;
+ u8 ofst_meq128 = 0;
+ int rc = 0;
+
+ if (attrib->attrib_mask & IPA_FLT_TOS) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ);
+ extra = ipa_write_8(attrib->u.v4.tos, extra);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ);
+ extra = ipa_write_8(attrib->u.v4.protocol, extra);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -14 => offset of dst mac addr in Ethernet II hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -14,
+ attrib->dst_mac_addr_mask,
+ attrib->dst_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -8 => offset of src mac addr in Ethernet II hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -8,
+ attrib->src_mac_addr_mask,
+ attrib->src_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -22 => offset of dst mac addr in 802.3 hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -22,
+ attrib->dst_mac_addr_mask,
+ attrib->dst_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -16 => offset of src mac addr in 802.3 hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -16,
+ attrib->src_mac_addr_mask,
+ attrib->src_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ /* 0 => offset of TOS in v4 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_32((attrib->tos_mask << 16), rest);
+ rest = ipa_write_32((attrib->tos_value << 16), rest);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ /* 12 => offset of src ip in v4 header */
+ extra = ipa_write_8(12, extra);
+ rest = ipa_write_32(attrib->u.v4.src_addr_mask, rest);
+ rest = ipa_write_32(attrib->u.v4.src_addr, rest);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ /* 16 => offset of dst ip in v4 header */
+ extra = ipa_write_8(16, extra);
+ rest = ipa_write_32(attrib->u.v4.dst_addr_mask, rest);
+ rest = ipa_write_32(attrib->u.v4.dst_addr, rest);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ /* -2 => offset of ether type in L2 hdr */
+ extra = ipa_write_8((u8)-2, extra);
+ rest = ipa_write_16(0, rest);
+ rest = ipa_write_16(htons(attrib->ether_type), rest);
+ rest = ipa_write_16(0, rest);
+ rest = ipa_write_16(htons(attrib->ether_type), rest);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 0 => offset of type after v4 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_32(0xFF, rest);
+ rest = ipa_write_32(attrib->type, rest);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 1 => offset of code after v4 header */
+ extra = ipa_write_8(1, extra);
+ rest = ipa_write_32(0xFF, rest);
+ rest = ipa_write_32(attrib->code, rest);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 0 => offset of SPI after v4 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_32(0xFFFFFFFF, rest);
+ rest = ipa_write_32(attrib->spi, rest);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
+ rest = ipa_write_32(attrib->meta_data_mask, rest);
+ rest = ipa_write_32(attrib->meta_data, rest);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAHAL_ERR("bad src port range param\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 0 => offset of src port after v4 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_16(attrib->src_port_hi, rest);
+ rest = ipa_write_16(attrib->src_port_lo, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAHAL_ERR("bad dst port range param\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 2 => offset of dst port after v4 header */
+ extra = ipa_write_8(2, extra);
+ rest = ipa_write_16(attrib->dst_port_hi, rest);
+ rest = ipa_write_16(attrib->dst_port_lo, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 0 => offset of src port after v4 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_16(attrib->src_port, rest);
+ rest = ipa_write_16(attrib->src_port, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 2 => offset of dst port after v4 header */
+ extra = ipa_write_8(2, extra);
+ rest = ipa_write_16(attrib->dst_port, rest);
+ rest = ipa_write_16(attrib->dst_port, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG);
+
+ goto done;
+
+err:
+ rc = -EPERM;
+done:
+ *extra_wrds = extra;
+ *rest_wrds = rest;
+ return rc;
+}
+
+static int ipa_fltrt_generate_hw_rule_bdy_ip6(u16 *en_rule,
+ const struct ipa_rule_attrib *attrib,
+ u8 **extra_wrds, u8 **rest_wrds)
+{
+ u8 *extra = *extra_wrds;
+ u8 *rest = *rest_wrds;
+ u8 ofst_meq32 = 0;
+ u8 ihl_ofst_rng16 = 0;
+ u8 ihl_ofst_meq32 = 0;
+ u8 ofst_meq128 = 0;
+ int rc = 0;
+
+ /* v6 code below assumes no extension headers TODO: fix this */
+
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ);
+ extra = ipa_write_8(attrib->u.v6.next_hdr, extra);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TC) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TC_EQ);
+ extra = ipa_write_8(attrib->u.v6.tc, extra);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+ /* 8 => offset of src ip in v6 header */
+ extra = ipa_write_8(8, extra);
+ rest = ipa_write_32(attrib->u.v6.src_addr_mask[3], rest);
+ rest = ipa_write_32(attrib->u.v6.src_addr_mask[2], rest);
+ rest = ipa_write_32(attrib->u.v6.src_addr[3], rest);
+ rest = ipa_write_32(attrib->u.v6.src_addr[2], rest);
+ rest = ipa_write_32(attrib->u.v6.src_addr_mask[1], rest);
+ rest = ipa_write_32(attrib->u.v6.src_addr_mask[0], rest);
+ rest = ipa_write_32(attrib->u.v6.src_addr[1], rest);
+ rest = ipa_write_32(attrib->u.v6.src_addr[0], rest);
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+ /* 24 => offset of dst ip in v6 header */
+ extra = ipa_write_8(24, extra);
+ rest = ipa_write_32(attrib->u.v6.dst_addr_mask[3], rest);
+ rest = ipa_write_32(attrib->u.v6.dst_addr_mask[2], rest);
+ rest = ipa_write_32(attrib->u.v6.dst_addr[3], rest);
+ rest = ipa_write_32(attrib->u.v6.dst_addr[2], rest);
+ rest = ipa_write_32(attrib->u.v6.dst_addr_mask[1], rest);
+ rest = ipa_write_32(attrib->u.v6.dst_addr_mask[0], rest);
+ rest = ipa_write_32(attrib->u.v6.dst_addr[1], rest);
+ rest = ipa_write_32(attrib->u.v6.dst_addr[0], rest);
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+ /* 0 => offset of TOS in v6 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_64(0, rest);
+ rest = ipa_write_64(0, rest);
+ rest = ipa_write_32(0, rest);
+ rest = ipa_write_32((attrib->tos_mask << 20), rest);
+ rest = ipa_write_32(0, rest);
+ rest = ipa_write_32((attrib->tos_value << 20), rest);
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -14 => offset of dst mac addr in Ethernet II hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -14,
+ attrib->dst_mac_addr_mask,
+ attrib->dst_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -8 => offset of src mac addr in Ethernet II hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -8,
+ attrib->src_mac_addr_mask,
+ attrib->src_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -22 => offset of dst mac addr in 802.3 hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -22,
+ attrib->dst_mac_addr_mask,
+ attrib->dst_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -16 => offset of src mac addr in 802.3 hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -16,
+ attrib->src_mac_addr_mask,
+ attrib->src_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ /* -2 => offset of ether type in L2 hdr */
+ extra = ipa_write_8((u8)-2, extra);
+ rest = ipa_write_16(0, rest);
+ rest = ipa_write_16(htons(attrib->ether_type), rest);
+ rest = ipa_write_16(0, rest);
+ rest = ipa_write_16(htons(attrib->ether_type), rest);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 0 => offset of type after v6 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_32(0xFF, rest);
+ rest = ipa_write_32(attrib->type, rest);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 1 => offset of code after v6 header */
+ extra = ipa_write_8(1, extra);
+ rest = ipa_write_32(0xFF, rest);
+ rest = ipa_write_32(attrib->code, rest);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 0 => offset of SPI after v6 header FIXME */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_32(0xFFFFFFFF, rest);
+ rest = ipa_write_32(attrib->spi, rest);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
+ rest = ipa_write_32(attrib->meta_data_mask, rest);
+ rest = ipa_write_32(attrib->meta_data, rest);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 0 => offset of src port after v6 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_16(attrib->src_port, rest);
+ rest = ipa_write_16(attrib->src_port, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 2 => offset of dst port after v6 header */
+ extra = ipa_write_8(2, extra);
+ rest = ipa_write_16(attrib->dst_port, rest);
+ rest = ipa_write_16(attrib->dst_port, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAHAL_ERR("bad src port range param\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 0 => offset of src port after v6 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_16(attrib->src_port_hi, rest);
+ rest = ipa_write_16(attrib->src_port_lo, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAHAL_ERR("bad dst port range param\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 2 => offset of dst port after v6 header */
+ extra = ipa_write_8(2, extra);
+ rest = ipa_write_16(attrib->dst_port_hi, rest);
+ rest = ipa_write_16(attrib->dst_port_lo, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ);
+ rest = ipa_write_32(attrib->u.v6.flow_label & 0xFFFFF,
+ rest);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG);
+
+ goto done;
+
+err:
+ rc = -EPERM;
+done:
+ *extra_wrds = extra;
+ *rest_wrds = rest;
+ return rc;
+}
+
+static u8 *ipa_fltrt_copy_mem(u8 *src, u8 *dst, int cnt)
+{
+ while (cnt--)
+ *dst++ = *src++;
+
+ return dst;
+}
+
+/*
+ * ipa_fltrt_generate_hw_rule_bdy() - generate HW rule body (w/o header)
+ * @ip: IP address type
+ * @attrib: IPA rule attribute
+ * @buf: output buffer. Advance it after building the rule
+ * @en_rule: enable rule
+ *
+ * Return codes:
+ * 0: success
+ * -EPERM: wrong input
+ */
+static int ipa_fltrt_generate_hw_rule_bdy(enum ipa_ip_type ipt,
+ const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule)
+{
+ int sz;
+ int rc = 0;
+ u8 *extra_wrd_buf;
+ u8 *rest_wrd_buf;
+ u8 *extra_wrd_start;
+ u8 *rest_wrd_start;
+ u8 *extra_wrd_i;
+ u8 *rest_wrd_i;
+
+ sz = IPA3_0_HW_TBL_WIDTH * 2 + IPA3_0_HW_RULE_START_ALIGNMENT;
+ extra_wrd_buf = kzalloc(sz, GFP_KERNEL);
+ if (!extra_wrd_buf) {
+ IPAHAL_ERR("failed to allocate %d bytes\n", sz);
+ rc = -ENOMEM;
+ goto fail_extra_alloc;
+ }
+
+ sz = IPA3_0_HW_RULE_BUF_SIZE + IPA3_0_HW_RULE_START_ALIGNMENT;
+ rest_wrd_buf = kzalloc(sz, GFP_KERNEL);
+ if (!rest_wrd_buf) {
+ IPAHAL_ERR("failed to allocate %d bytes\n", sz);
+ rc = -ENOMEM;
+ goto fail_rest_alloc;
+ }
+
+ extra_wrd_start = extra_wrd_buf + IPA3_0_HW_RULE_START_ALIGNMENT;
+ extra_wrd_start = (u8 *)((long)extra_wrd_start &
+ ~IPA3_0_HW_RULE_START_ALIGNMENT);
+
+ rest_wrd_start = rest_wrd_buf + IPA3_0_HW_RULE_START_ALIGNMENT;
+ rest_wrd_start = (u8 *)((long)rest_wrd_start &
+ ~IPA3_0_HW_RULE_START_ALIGNMENT);
+
+ extra_wrd_i = extra_wrd_start;
+ rest_wrd_i = rest_wrd_start;
+
+ rc = ipa_fltrt_rule_generation_err_check(ipt, attrib);
+ if (rc) {
+ IPAHAL_ERR("rule generation err check failed\n");
+ goto fail_err_check;
+ }
+
+ if (ipt == IPA_IP_v4) {
+ if (ipa_fltrt_generate_hw_rule_bdy_ip4(en_rule, attrib,
+ &extra_wrd_i, &rest_wrd_i)) {
+ IPAHAL_ERR("failed to build ipv4 hw rule\n");
+ rc = -EPERM;
+ goto fail_err_check;
+ }
+
+ } else if (ipt == IPA_IP_v6) {
+ if (ipa_fltrt_generate_hw_rule_bdy_ip6(en_rule, attrib,
+ &extra_wrd_i, &rest_wrd_i)) {
+ IPAHAL_ERR("failed to build ipv6 hw rule\n");
+ rc = -EPERM;
+ goto fail_err_check;
+ }
+ } else {
+ IPAHAL_ERR("unsupported ip %d\n", ipt);
+ goto fail_err_check;
+ }
+
+ /*
+ * default "rule" means no attributes set -> map to
+ * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
+ */
+ if (attrib->attrib_mask == 0) {
+ IPAHAL_DBG("building default rule\n");
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(ipa3_0_ofst_meq32[0]);
+ extra_wrd_i = ipa_write_8(0, extra_wrd_i); /* offset */
+ rest_wrd_i = ipa_write_32(0, rest_wrd_i); /* mask */
+ rest_wrd_i = ipa_write_32(0, rest_wrd_i); /* val */
+ }
+
+ IPAHAL_DBG("extra_word_1 0x%llx\n", *(u64 *)extra_wrd_start);
+ IPAHAL_DBG("extra_word_2 0x%llx\n",
+ *(u64 *)(extra_wrd_start + IPA3_0_HW_TBL_WIDTH));
+
+ extra_wrd_i = ipa_pad_to_64(extra_wrd_i);
+ sz = extra_wrd_i - extra_wrd_start;
+ IPAHAL_DBG("extra words params sz %d\n", sz);
+ *buf = ipa_fltrt_copy_mem(extra_wrd_start, *buf, sz);
+
+ rest_wrd_i = ipa_pad_to_64(rest_wrd_i);
+ sz = rest_wrd_i - rest_wrd_start;
+ IPAHAL_DBG("non extra words params sz %d\n", sz);
+ *buf = ipa_fltrt_copy_mem(rest_wrd_start, *buf, sz);
+
+fail_err_check:
+ kfree(rest_wrd_buf);
+fail_rest_alloc:
+ kfree(extra_wrd_buf);
+fail_extra_alloc:
+ return rc;
+}
+
+
+/**
+ * ipa_fltrt_calc_extra_wrd_bytes()- Calculate the number of extra words for eq
+ * @attrib: equation attribute
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+static int ipa_fltrt_calc_extra_wrd_bytes(
+ const struct ipa_ipfltri_rule_eq *attrib)
+{
+ int num = 0;
+
+ if (attrib->tos_eq_present)
+ num++;
+ if (attrib->protocol_eq_present)
+ num++;
+ if (attrib->tc_eq_present)
+ num++;
+ num += attrib->num_offset_meq_128;
+ num += attrib->num_offset_meq_32;
+ num += attrib->num_ihl_offset_meq_32;
+ num += attrib->num_ihl_offset_range_16;
+ if (attrib->ihl_offset_eq_32_present)
+ num++;
+ if (attrib->ihl_offset_eq_16_present)
+ num++;
+
+ IPAHAL_DBG("extra bytes number %d\n", num);
+
+ return num;
+}
+
+static int ipa_fltrt_generate_hw_rule_bdy_from_eq(
+ const struct ipa_ipfltri_rule_eq *attrib, u8 **buf)
+{
+ int num_offset_meq_32 = attrib->num_offset_meq_32;
+ int num_ihl_offset_range_16 = attrib->num_ihl_offset_range_16;
+ int num_ihl_offset_meq_32 = attrib->num_ihl_offset_meq_32;
+ int num_offset_meq_128 = attrib->num_offset_meq_128;
+ int i;
+ int extra_bytes;
+ u8 *extra;
+ u8 *rest;
+
+ extra_bytes = ipa_fltrt_calc_extra_wrd_bytes(attrib);
+ /* only 3 eq does not have extra word param, 13 out of 16 is the number
+ * of equations that needs extra word param
+ */
+ if (extra_bytes > 13) {
+ IPAHAL_ERR("too much extra bytes\n");
+ return -EPERM;
+ } else if (extra_bytes > IPA3_0_HW_TBL_HDR_WIDTH) {
+ /* two extra words */
+ extra = *buf;
+ rest = *buf + IPA3_0_HW_TBL_HDR_WIDTH * 2;
+ } else if (extra_bytes > 0) {
+ /* single exra word */
+ extra = *buf;
+ rest = *buf + IPA3_0_HW_TBL_HDR_WIDTH;
+ } else {
+ /* no extra words */
+ extra = NULL;
+ rest = *buf;
+ }
+
+ if (attrib->tos_eq_present)
+ extra = ipa_write_8(attrib->tos_eq, extra);
+
+ if (attrib->protocol_eq_present)
+ extra = ipa_write_8(attrib->protocol_eq, extra);
+
+ if (attrib->tc_eq_present)
+ extra = ipa_write_8(attrib->tc_eq, extra);
+
+ if (num_offset_meq_128) {
+ extra = ipa_write_8(attrib->offset_meq_128[0].offset, extra);
+ for (i = 0; i < 8; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[0].mask[i],
+ rest);
+ for (i = 0; i < 8; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[0].value[i],
+ rest);
+ for (i = 8; i < 16; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[0].mask[i],
+ rest);
+ for (i = 8; i < 16; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[0].value[i],
+ rest);
+ num_offset_meq_128--;
+ }
+
+ if (num_offset_meq_128) {
+ extra = ipa_write_8(attrib->offset_meq_128[1].offset, extra);
+ for (i = 0; i < 8; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[1].mask[i],
+ rest);
+ for (i = 0; i < 8; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[1].value[i],
+ rest);
+ for (i = 8; i < 16; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[1].mask[i],
+ rest);
+ for (i = 8; i < 16; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[1].value[i],
+ rest);
+ num_offset_meq_128--;
+ }
+
+ if (num_offset_meq_32) {
+ extra = ipa_write_8(attrib->offset_meq_32[0].offset, extra);
+ rest = ipa_write_32(attrib->offset_meq_32[0].mask, rest);
+ rest = ipa_write_32(attrib->offset_meq_32[0].value, rest);
+ num_offset_meq_32--;
+ }
+
+ if (num_offset_meq_32) {
+ extra = ipa_write_8(attrib->offset_meq_32[1].offset, extra);
+ rest = ipa_write_32(attrib->offset_meq_32[1].mask, rest);
+ rest = ipa_write_32(attrib->offset_meq_32[1].value, rest);
+ num_offset_meq_32--;
+ }
+
+ if (num_ihl_offset_meq_32) {
+ extra = ipa_write_8(attrib->ihl_offset_meq_32[0].offset,
+ extra);
+
+ rest = ipa_write_32(attrib->ihl_offset_meq_32[0].mask, rest);
+ rest = ipa_write_32(attrib->ihl_offset_meq_32[0].value, rest);
+ num_ihl_offset_meq_32--;
+ }
+
+ if (num_ihl_offset_meq_32) {
+ extra = ipa_write_8(attrib->ihl_offset_meq_32[1].offset,
+ extra);
+
+ rest = ipa_write_32(attrib->ihl_offset_meq_32[1].mask, rest);
+ rest = ipa_write_32(attrib->ihl_offset_meq_32[1].value, rest);
+ num_ihl_offset_meq_32--;
+ }
+
+ if (attrib->metadata_meq32_present) {
+ rest = ipa_write_32(attrib->metadata_meq32.mask, rest);
+ rest = ipa_write_32(attrib->metadata_meq32.value, rest);
+ }
+
+ if (num_ihl_offset_range_16) {
+ extra = ipa_write_8(attrib->ihl_offset_range_16[0].offset,
+ extra);
+
+ rest = ipa_write_16(attrib->ihl_offset_range_16[0].range_high,
+ rest);
+ rest = ipa_write_16(attrib->ihl_offset_range_16[0].range_low,
+ rest);
+ num_ihl_offset_range_16--;
+ }
+
+ if (num_ihl_offset_range_16) {
+ extra = ipa_write_8(attrib->ihl_offset_range_16[1].offset,
+ extra);
+
+ rest = ipa_write_16(attrib->ihl_offset_range_16[1].range_high,
+ rest);
+ rest = ipa_write_16(attrib->ihl_offset_range_16[1].range_low,
+ rest);
+ num_ihl_offset_range_16--;
+ }
+
+ if (attrib->ihl_offset_eq_32_present) {
+ extra = ipa_write_8(attrib->ihl_offset_eq_32.offset, extra);
+ rest = ipa_write_32(attrib->ihl_offset_eq_32.value, rest);
+ }
+
+ if (attrib->ihl_offset_eq_16_present) {
+ extra = ipa_write_8(attrib->ihl_offset_eq_16.offset, extra);
+ rest = ipa_write_16(attrib->ihl_offset_eq_16.value, rest);
+ rest = ipa_write_16(0, rest);
+ }
+
+ if (attrib->fl_eq_present)
+ rest = ipa_write_32(attrib->fl_eq & 0xFFFFF, rest);
+
+ extra = ipa_pad_to_64(extra);
+ rest = ipa_pad_to_64(rest);
+ *buf = rest;
+
+ return 0;
+}
+
+static void ipa_flt_generate_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb,
+ u8 hdr_mac_addr_offset, const uint8_t mac_addr_mask[ETH_ALEN],
+ const uint8_t mac_addr[ETH_ALEN], u8 ofst_meq128)
+{
+ int i;
+
+ eq_atrb->offset_meq_128[ofst_meq128].offset = hdr_mac_addr_offset;
+
+ /* LSB MASK and ADDR */
+ memset(eq_atrb->offset_meq_128[ofst_meq128].mask, 0, 8);
+ memset(eq_atrb->offset_meq_128[ofst_meq128].value, 0, 8);
+
+ /* MSB MASK and ADDR */
+ memset(eq_atrb->offset_meq_128[ofst_meq128].mask + 8, 0, 2);
+ for (i = 0; i <= 5; i++)
+ eq_atrb->offset_meq_128[ofst_meq128].mask[15 - i] =
+ mac_addr_mask[i];
+
+ memset(eq_atrb->offset_meq_128[ofst_meq128].value + 8, 0, 2);
+ for (i = 0; i <= 5; i++)
+ eq_atrb->offset_meq_128[ofst_meq128].value[15 - i] =
+ mac_addr[i];
+}
+
+static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+ u8 ofst_meq32 = 0;
+ u8 ihl_ofst_rng16 = 0;
+ u8 ihl_ofst_meq32 = 0;
+ u8 ofst_meq128 = 0;
+ u16 eq_bitmap = 0;
+ u16 *en_rule = &eq_bitmap;
+
+ if (attrib->attrib_mask & IPA_FLT_TOS) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ);
+ eq_atrb->tos_eq_present = 1;
+ eq_atrb->tos_eq = attrib->u.v4.tos;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ);
+ eq_atrb->protocol_eq_present = 1;
+ eq_atrb->protocol_eq = attrib->u.v4.protocol;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -14 => offset of dst mac addr in Ethernet II hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -14,
+ attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -8 => offset of src mac addr in Ethernet II hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -8,
+ attrib->src_mac_addr_mask, attrib->src_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -22 => offset of dst mac addr in 802.3 hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -22,
+ attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -16 => offset of src mac addr in 802.3 hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -16,
+ attrib->src_mac_addr_mask, attrib->src_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ eq_atrb->offset_meq_32[ofst_meq32].offset = 0;
+ eq_atrb->offset_meq_32[ofst_meq32].mask =
+ attrib->tos_mask << 16;
+ eq_atrb->offset_meq_32[ofst_meq32].value =
+ attrib->tos_value << 16;
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ eq_atrb->offset_meq_32[ofst_meq32].offset = 12;
+ eq_atrb->offset_meq_32[ofst_meq32].mask =
+ attrib->u.v4.src_addr_mask;
+ eq_atrb->offset_meq_32[ofst_meq32].value =
+ attrib->u.v4.src_addr;
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ eq_atrb->offset_meq_32[ofst_meq32].offset = 16;
+ eq_atrb->offset_meq_32[ofst_meq32].mask =
+ attrib->u.v4.dst_addr_mask;
+ eq_atrb->offset_meq_32[ofst_meq32].value =
+ attrib->u.v4.dst_addr;
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
+ eq_atrb->offset_meq_32[ofst_meq32].mask =
+ htons(attrib->ether_type);
+ eq_atrb->offset_meq_32[ofst_meq32].value =
+ htons(attrib->ether_type);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->type;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->code;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+ 0xFFFFFFFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->spi;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ IPA_METADATA_COMPARE);
+ eq_atrb->metadata_meq32_present = 1;
+ eq_atrb->metadata_meq32.offset = 0;
+ eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
+ eq_atrb->metadata_meq32.value = attrib->meta_data;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAHAL_ERR("bad src port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->src_port_lo;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->src_port_hi;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAHAL_ERR("bad dst port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->dst_port_lo;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->dst_port_hi;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->src_port;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->src_port;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->dst_port;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->dst_port;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG);
+ eq_atrb->ipv4_frag_eq_present = 1;
+ }
+
+ eq_atrb->rule_eq_bitmap = *en_rule;
+ eq_atrb->num_offset_meq_32 = ofst_meq32;
+ eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16;
+ eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32;
+ eq_atrb->num_offset_meq_128 = ofst_meq128;
+
+ return 0;
+}
+
+static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+ u8 ofst_meq32 = 0;
+ u8 ihl_ofst_rng16 = 0;
+ u8 ihl_ofst_meq32 = 0;
+ u8 ofst_meq128 = 0;
+ u16 eq_bitmap = 0;
+ u16 *en_rule = &eq_bitmap;
+
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ IPA_PROTOCOL_EQ);
+ eq_atrb->protocol_eq_present = 1;
+ eq_atrb->protocol_eq = attrib->u.v6.next_hdr;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TC) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ IPA_TC_EQ);
+ eq_atrb->tc_eq_present = 1;
+ eq_atrb->tc_eq = attrib->u.v6.tc;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+ /* use the same word order as in ipa v2 */
+ eq_atrb->offset_meq_128[ofst_meq128].offset = 8;
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
+ = attrib->u.v6.src_addr_mask[0];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
+ = attrib->u.v6.src_addr_mask[1];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
+ = attrib->u.v6.src_addr_mask[2];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+ = attrib->u.v6.src_addr_mask[3];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
+ = attrib->u.v6.src_addr[0];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
+ = attrib->u.v6.src_addr[1];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
+ = attrib->u.v6.src_addr[2];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+ 12) = attrib->u.v6.src_addr[3];
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+ eq_atrb->offset_meq_128[ofst_meq128].offset = 24;
+ /* use the same word order as in ipa v2 */
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
+ = attrib->u.v6.dst_addr_mask[0];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
+ = attrib->u.v6.dst_addr_mask[1];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
+ = attrib->u.v6.dst_addr_mask[2];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+ = attrib->u.v6.dst_addr_mask[3];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
+ = attrib->u.v6.dst_addr[0];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
+ = attrib->u.v6.dst_addr[1];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
+ = attrib->u.v6.dst_addr[2];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+ 12) = attrib->u.v6.dst_addr[3];
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+ eq_atrb->offset_meq_128[ofst_meq128].offset = 0;
+ memset(eq_atrb->offset_meq_128[ofst_meq128].mask, 0, 12);
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+ = attrib->tos_mask << 20;
+ memset(eq_atrb->offset_meq_128[ofst_meq128].value, 0, 12);
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+ 12) = attrib->tos_value << 20;
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -14 => offset of dst mac addr in Ethernet II hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -14,
+ attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -8 => offset of src mac addr in Ethernet II hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -8,
+ attrib->src_mac_addr_mask, attrib->src_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -22 => offset of dst mac addr in 802.3 hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -22,
+ attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -16 => offset of src mac addr in 802.3 hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -16,
+ attrib->src_mac_addr_mask, attrib->src_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
+ eq_atrb->offset_meq_32[ofst_meq32].mask =
+ htons(attrib->ether_type);
+ eq_atrb->offset_meq_32[ofst_meq32].value =
+ htons(attrib->ether_type);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->type;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->code;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+ 0xFFFFFFFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->spi;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ IPA_METADATA_COMPARE);
+ eq_atrb->metadata_meq32_present = 1;
+ eq_atrb->metadata_meq32.offset = 0;
+ eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
+ eq_atrb->metadata_meq32.value = attrib->meta_data;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->src_port;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->src_port;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->dst_port;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->dst_port;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAHAL_ERR("bad src port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->src_port_lo;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->src_port_hi;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAHAL_ERR("bad dst port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->dst_port_lo;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->dst_port_hi;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ);
+ eq_atrb->fl_eq_present = 1;
+ eq_atrb->fl_eq = attrib->u.v6.flow_label;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ IPA_IS_FRAG);
+ eq_atrb->ipv4_frag_eq_present = 1;
+ }
+
+ eq_atrb->rule_eq_bitmap = *en_rule;
+ eq_atrb->num_offset_meq_32 = ofst_meq32;
+ eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16;
+ eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32;
+ eq_atrb->num_offset_meq_128 = ofst_meq128;
+
+ return 0;
+}
+
+static int ipa_fltrt_parse_hw_rule_eq(u8 *addr, u32 hdr_sz,
+ struct ipa_ipfltri_rule_eq *atrb, u32 *rule_size)
+{
+ u16 eq_bitmap;
+ int extra_bytes;
+ u8 *extra;
+ u8 *rest;
+ int i;
+ u8 dummy_extra_wrd;
+
+ if (!addr || !atrb || !rule_size) {
+ IPAHAL_ERR("Input error: addr=%p atrb=%p rule_size=%p\n",
+ addr, atrb, rule_size);
+ return -EINVAL;
+ }
+
+ eq_bitmap = atrb->rule_eq_bitmap;
+
+ IPAHAL_DBG("eq_bitmap=0x%x\n", eq_bitmap);
+
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ))
+ atrb->tos_eq_present = true;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ))
+ atrb->protocol_eq_present = true;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_TC_EQ))
+ atrb->tc_eq_present = true;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ128_0))
+ atrb->num_offset_meq_128++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ128_1))
+ atrb->num_offset_meq_128++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ32_0))
+ atrb->num_offset_meq_32++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ32_1))
+ atrb->num_offset_meq_32++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_MEQ32_0))
+ atrb->num_ihl_offset_meq_32++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_MEQ32_1))
+ atrb->num_ihl_offset_meq_32++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE))
+ atrb->metadata_meq32_present = true;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_RANGE16_0))
+ atrb->num_ihl_offset_range_16++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_RANGE16_1))
+ atrb->num_ihl_offset_range_16++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_EQ_32))
+ atrb->ihl_offset_eq_32_present = true;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_EQ_16))
+ atrb->ihl_offset_eq_16_present = true;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ))
+ atrb->fl_eq_present = true;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG))
+ atrb->ipv4_frag_eq_present = true;
+
+ extra_bytes = ipa_fltrt_calc_extra_wrd_bytes(atrb);
+ /* only 3 eq does not have extra word param, 13 out of 16 is the number
+ * of equations that needs extra word param
+ */
+ if (extra_bytes > 13) {
+ IPAHAL_ERR("too much extra bytes\n");
+ return -EPERM;
+ } else if (extra_bytes > IPA3_0_HW_TBL_HDR_WIDTH) {
+ /* two extra words */
+ extra = addr + hdr_sz;
+ rest = extra + IPA3_0_HW_TBL_HDR_WIDTH * 2;
+ } else if (extra_bytes > 0) {
+ /* single extra word */
+ extra = addr + hdr_sz;
+ rest = extra + IPA3_0_HW_TBL_HDR_WIDTH;
+ } else {
+ /* no extra words */
+ dummy_extra_wrd = 0;
+ extra = &dummy_extra_wrd;
+ rest = addr + hdr_sz;
+ }
+ IPAHAL_DBG("addr=0x%p extra=0x%p rest=0x%p\n", addr, extra, rest);
+
+ if (atrb->tos_eq_present)
+ atrb->tos_eq = *extra++;
+ if (atrb->protocol_eq_present)
+ atrb->protocol_eq = *extra++;
+ if (atrb->tc_eq_present)
+ atrb->tc_eq = *extra++;
+
+ if (atrb->num_offset_meq_128 > 0) {
+ atrb->offset_meq_128[0].offset = *extra++;
+ for (i = 0; i < 8; i++)
+ atrb->offset_meq_128[0].mask[i] = *rest++;
+ for (i = 0; i < 8; i++)
+ atrb->offset_meq_128[0].value[i] = *rest++;
+ for (i = 8; i < 16; i++)
+ atrb->offset_meq_128[0].mask[i] = *rest++;
+ for (i = 8; i < 16; i++)
+ atrb->offset_meq_128[0].value[i] = *rest++;
+ }
+ if (atrb->num_offset_meq_128 > 1) {
+ atrb->offset_meq_128[1].offset = *extra++;
+ for (i = 0; i < 8; i++)
+ atrb->offset_meq_128[1].mask[i] = *rest++;
+ for (i = 0; i < 8; i++)
+ atrb->offset_meq_128[1].value[i] = *rest++;
+ for (i = 8; i < 16; i++)
+ atrb->offset_meq_128[1].mask[i] = *rest++;
+ for (i = 8; i < 16; i++)
+ atrb->offset_meq_128[1].value[i] = *rest++;
+ }
+
+ if (atrb->num_offset_meq_32 > 0) {
+ atrb->offset_meq_32[0].offset = *extra++;
+ atrb->offset_meq_32[0].mask = *((u32 *)rest);
+ rest += 4;
+ atrb->offset_meq_32[0].value = *((u32 *)rest);
+ rest += 4;
+ }
+ if (atrb->num_offset_meq_32 > 1) {
+ atrb->offset_meq_32[1].offset = *extra++;
+ atrb->offset_meq_32[1].mask = *((u32 *)rest);
+ rest += 4;
+ atrb->offset_meq_32[1].value = *((u32 *)rest);
+ rest += 4;
+ }
+
+ if (atrb->num_ihl_offset_meq_32 > 0) {
+ atrb->ihl_offset_meq_32[0].offset = *extra++;
+ atrb->ihl_offset_meq_32[0].mask = *((u32 *)rest);
+ rest += 4;
+ atrb->ihl_offset_meq_32[0].value = *((u32 *)rest);
+ rest += 4;
+ }
+ if (atrb->num_ihl_offset_meq_32 > 1) {
+ atrb->ihl_offset_meq_32[1].offset = *extra++;
+ atrb->ihl_offset_meq_32[1].mask = *((u32 *)rest);
+ rest += 4;
+ atrb->ihl_offset_meq_32[1].value = *((u32 *)rest);
+ rest += 4;
+ }
+
+ if (atrb->metadata_meq32_present) {
+ atrb->metadata_meq32.mask = *((u32 *)rest);
+ rest += 4;
+ atrb->metadata_meq32.value = *((u32 *)rest);
+ rest += 4;
+ }
+
+ if (atrb->num_ihl_offset_range_16 > 0) {
+ atrb->ihl_offset_range_16[0].offset = *extra++;
+ atrb->ihl_offset_range_16[0].range_high = *((u16 *)rest);
+ rest += 2;
+ atrb->ihl_offset_range_16[0].range_low = *((u16 *)rest);
+ rest += 2;
+ }
+ if (atrb->num_ihl_offset_range_16 > 1) {
+ atrb->ihl_offset_range_16[1].offset = *extra++;
+ atrb->ihl_offset_range_16[1].range_high = *((u16 *)rest);
+ rest += 2;
+ atrb->ihl_offset_range_16[1].range_low = *((u16 *)rest);
+ rest += 2;
+ }
+
+ if (atrb->ihl_offset_eq_32_present) {
+ atrb->ihl_offset_eq_32.offset = *extra++;
+ atrb->ihl_offset_eq_32.value = *((u32 *)rest);
+ rest += 4;
+ }
+
+ if (atrb->ihl_offset_eq_16_present) {
+ atrb->ihl_offset_eq_16.offset = *extra++;
+ atrb->ihl_offset_eq_16.value = *((u16 *)rest);
+ rest += 4;
+ }
+
+ if (atrb->fl_eq_present) {
+ atrb->fl_eq = *((u32 *)rest);
+ atrb->fl_eq &= 0xfffff;
+ rest += 4;
+ }
+
+ IPAHAL_DBG("before rule alignment rest=0x%p\n", rest);
+ rest = (u8 *)(((unsigned long)rest + IPA3_0_HW_RULE_START_ALIGNMENT) &
+ ~IPA3_0_HW_RULE_START_ALIGNMENT);
+ IPAHAL_DBG("after rule alignment rest=0x%p\n", rest);
+
+ *rule_size = rest - addr;
+ IPAHAL_DBG("rule_size=0x%x\n", *rule_size);
+
+ return 0;
+}
+
+static int ipa_rt_parse_hw_rule(u8 *addr, struct ipahal_rt_rule_entry *rule)
+{
+ struct ipa3_0_rt_rule_hw_hdr *rule_hdr;
+ struct ipa_ipfltri_rule_eq *atrb;
+
+ IPAHAL_DBG("Entry\n");
+
+ rule_hdr = (struct ipa3_0_rt_rule_hw_hdr *)addr;
+ atrb = &rule->eq_attrib;
+
+ IPAHAL_DBG("read hdr 0x%llx\n", rule_hdr->u.word);
+
+ if (rule_hdr->u.word == 0) {
+ /* table termintator - empty table */
+ rule->rule_size = 0;
+ return 0;
+ }
+
+ rule->dst_pipe_idx = rule_hdr->u.hdr.pipe_dest_idx;
+ if (rule_hdr->u.hdr.proc_ctx) {
+ rule->hdr_type = IPAHAL_RT_RULE_HDR_PROC_CTX;
+ rule->hdr_ofst = (rule_hdr->u.hdr.hdr_offset) << 5;
+ } else {
+ rule->hdr_type = IPAHAL_RT_RULE_HDR_RAW;
+ rule->hdr_ofst = (rule_hdr->u.hdr.hdr_offset) << 2;
+ }
+ rule->hdr_lcl = !rule_hdr->u.hdr.system;
+
+ rule->priority = rule_hdr->u.hdr.priority;
+ rule->retain_hdr = rule_hdr->u.hdr.retain_hdr;
+ rule->id = rule_hdr->u.hdr.rule_id;
+
+ atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule;
+ return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr),
+ atrb, &rule->rule_size);
+}
+
+static int ipa_flt_parse_hw_rule(u8 *addr, struct ipahal_flt_rule_entry *rule)
+{
+ struct ipa3_0_flt_rule_hw_hdr *rule_hdr;
+ struct ipa_ipfltri_rule_eq *atrb;
+
+ IPAHAL_DBG("Entry\n");
+
+ rule_hdr = (struct ipa3_0_flt_rule_hw_hdr *)addr;
+ atrb = &rule->rule.eq_attrib;
+
+ if (rule_hdr->u.word == 0) {
+ /* table termintator - empty table */
+ rule->rule_size = 0;
+ return 0;
+ }
+
+ switch (rule_hdr->u.hdr.action) {
+ case 0x0:
+ rule->rule.action = IPA_PASS_TO_ROUTING;
+ break;
+ case 0x1:
+ rule->rule.action = IPA_PASS_TO_SRC_NAT;
+ break;
+ case 0x2:
+ rule->rule.action = IPA_PASS_TO_DST_NAT;
+ break;
+ case 0x3:
+ rule->rule.action = IPA_PASS_TO_EXCEPTION;
+ break;
+ default:
+ IPAHAL_ERR("Invalid Rule Action %d\n", rule_hdr->u.hdr.action);
+ WARN_ON(1);
+ rule->rule.action = rule_hdr->u.hdr.action;
+ }
+
+ rule->rule.rt_tbl_idx = rule_hdr->u.hdr.rt_tbl_idx;
+ rule->rule.retain_hdr = rule_hdr->u.hdr.retain_hdr;
+ rule->priority = rule_hdr->u.hdr.priority;
+ rule->id = rule_hdr->u.hdr.rule_id;
+
+ atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule;
+ rule->rule.eq_attrib_type = 1;
+ return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr),
+ atrb, &rule->rule_size);
+}
+
+/*
+ * ipahal_fltrt_init() - Build the FLT/RT information table
+ * See ipahal_fltrt_objs[] comments
+ *
+ * Note: As global variables are initialized with zero, any un-overridden
+ * register entry will be zero. By this we recognize them.
+ */
+int ipahal_fltrt_init(enum ipa_hw_type ipa_hw_type)
+{
+ struct ipahal_fltrt_obj zero_obj;
+ int i;
+ struct ipa_mem_buffer *mem;
+ int rc = -EFAULT;
+
+ IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+ if (ipa_hw_type >= IPA_HW_MAX) {
+ IPAHAL_ERR("Invalid H/W type\n");
+ return -EFAULT;
+ }
+
+ memset(&zero_obj, 0, sizeof(zero_obj));
+ for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+ if (!memcmp(&ipahal_fltrt_objs[i+1], &zero_obj,
+ sizeof(struct ipahal_fltrt_obj))) {
+ memcpy(&ipahal_fltrt_objs[i+1],
+ &ipahal_fltrt_objs[i],
+ sizeof(struct ipahal_fltrt_obj));
+ } else {
+ /*
+ * explicitly overridden FLT RT info
+ * Check validity
+ */
+ if (!ipahal_fltrt_objs[i+1].tbl_width) {
+ IPAHAL_ERR(
+ "Zero tbl width ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].sysaddr_alignment) {
+ IPAHAL_ERR(
+ "No tbl sysaddr alignment ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].lcladdr_alignment) {
+ IPAHAL_ERR(
+ "No tbl lcladdr alignment ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].blk_sz_alignment) {
+ IPAHAL_ERR(
+ "No blk sz alignment ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].rule_start_alignment) {
+ IPAHAL_ERR(
+ "No rule start alignment ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].tbl_hdr_width) {
+ IPAHAL_ERR(
+ "Zero tbl hdr width ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].tbl_addr_mask) {
+ IPAHAL_ERR(
+ "Zero tbl hdr width ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (ipahal_fltrt_objs[i+1].rule_id_bit_len < 2) {
+ IPAHAL_ERR(
+ "Too little bits for rule_id ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].rule_buf_size) {
+ IPAHAL_ERR(
+ "zero rule buf size ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].write_val_to_hdr) {
+ IPAHAL_ERR(
+ "No write_val_to_hdr CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].create_flt_bitmap) {
+ IPAHAL_ERR(
+ "No create_flt_bitmap CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].create_tbl_addr) {
+ IPAHAL_ERR(
+ "No create_tbl_addr CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].parse_tbl_addr) {
+ IPAHAL_ERR(
+ "No parse_tbl_addr CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].rt_generate_hw_rule) {
+ IPAHAL_ERR(
+ "No rt_generate_hw_rule CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].flt_generate_hw_rule) {
+ IPAHAL_ERR(
+ "No flt_generate_hw_rule CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].flt_generate_eq) {
+ IPAHAL_ERR(
+ "No flt_generate_eq CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].rt_parse_hw_rule) {
+ IPAHAL_ERR(
+ "No rt_parse_hw_rule CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].flt_parse_hw_rule) {
+ IPAHAL_ERR(
+ "No flt_parse_hw_rule CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ }
+ }
+
+ mem = &ipahal_ctx->empty_fltrt_tbl;
+
+ /* setup an empty table in system memory; This will
+ * be used, for example, to delete a rt tbl safely
+ */
+ mem->size = ipahal_fltrt_objs[ipa_hw_type].tbl_width;
+ mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
+ &mem->phys_base, GFP_KERNEL);
+ if (!mem->base) {
+ IPAHAL_ERR("DMA buff alloc fail %d bytes for empty tbl\n",
+ mem->size);
+ return -ENOMEM;
+ }
+
+ if (mem->phys_base &
+ ipahal_fltrt_objs[ipa_hw_type].sysaddr_alignment) {
+ IPAHAL_ERR("Empty table buf is not address aligned 0x%pad\n",
+ &mem->phys_base);
+ rc = -EFAULT;
+ goto clear_empty_tbl;
+ }
+
+ memset(mem->base, 0, mem->size);
+ IPAHAL_DBG("empty table allocated in system memory");
+
+ return 0;
+
+clear_empty_tbl:
+ dma_free_coherent(ipahal_ctx->ipa_pdev, mem->size, mem->base,
+ mem->phys_base);
+ return rc;
+}
+
+void ipahal_fltrt_destroy(void)
+{
+ IPAHAL_DBG("Entry\n");
+
+ if (ipahal_ctx && ipahal_ctx->empty_fltrt_tbl.base)
+ dma_free_coherent(ipahal_ctx->ipa_pdev,
+ ipahal_ctx->empty_fltrt_tbl.size,
+ ipahal_ctx->empty_fltrt_tbl.base,
+ ipahal_ctx->empty_fltrt_tbl.phys_base);
+}
+
+/* Get the H/W table (flt/rt) header width */
+u32 ipahal_get_hw_tbl_hdr_width(void)
+{
+ return ipahal_fltrt_objs[ipahal_ctx->hw_type].tbl_hdr_width;
+}
+
+/* Get the H/W local table (SRAM) address alignment
+ * Tables headers references to local tables via offsets in SRAM
+ * This function return the alignment of the offset that IPA expects
+ */
+u32 ipahal_get_lcl_tbl_addr_alignment(void)
+{
+ return ipahal_fltrt_objs[ipahal_ctx->hw_type].lcladdr_alignment;
+}
+
+/*
+ * Rule priority is used to distinguish rules order
+ * at the integrated table consisting from hashable and
+ * non-hashable tables. Max priority are rules that once are
+ * scanned by IPA, IPA will not look for further rules and use it.
+ */
+int ipahal_get_rule_max_priority(void)
+{
+ return ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_max_prio;
+}
+
+/* Given a priority, calc and return the next lower one if it is in
+ * legal range.
+ */
+int ipahal_rule_decrease_priority(int *prio)
+{
+ struct ipahal_fltrt_obj *obj;
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (!prio) {
+ IPAHAL_ERR("Invalid Input\n");
+ return -EINVAL;
+ }
+
+ /* Priority logic is reverse. 0 priority considred max priority */
+ if (*prio > obj->rule_min_prio || *prio < obj->rule_max_prio) {
+ IPAHAL_ERR("Invalid given priority %d\n", *prio);
+ return -EINVAL;
+ }
+
+ *prio += 1;
+
+ if (*prio > obj->rule_min_prio) {
+ IPAHAL_ERR("Cannot decrease priority. Already on min\n");
+ *prio -= 1;
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/* Does the given ID represents rule miss?
+ * Rule miss ID, is always the max ID possible in the bit-pattern
+ */
+bool ipahal_is_rule_miss_id(u32 id)
+{
+ return (id ==
+ ((1U << ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_id_bit_len)
+ -1));
+}
+
+/* Get rule ID with high bit only asserted
+ * Used e.g. to create groups of IDs according to this bit
+ */
+u32 ipahal_get_rule_id_hi_bit(void)
+{
+ return BIT(ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_id_bit_len - 1);
+}
+
+/* Get the low value possible to be used for rule-id */
+u32 ipahal_get_low_rule_id(void)
+{
+ return ipahal_fltrt_objs[ipahal_ctx->hw_type].low_rule_id;
+}
+
+/*
+ * ipahal_rt_generate_empty_img() - Generate empty route image
+ * Creates routing header buffer for the given tables number.
+ * For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ */
+int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+ u32 nhash_hdr_size, struct ipa_mem_buffer *mem)
+{
+ int i;
+ u64 addr;
+ struct ipahal_fltrt_obj *obj;
+
+ IPAHAL_DBG("Entry\n");
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (!tbls_num || !nhash_hdr_size || !mem) {
+ IPAHAL_ERR("Input Error: tbls_num=%d nhash_hdr_sz=%d mem=%p\n",
+ tbls_num, nhash_hdr_size, mem);
+ return -EINVAL;
+ }
+ if (obj->support_hash && !hash_hdr_size) {
+ IPAHAL_ERR("Input Error: hash_hdr_sz=%d\n", hash_hdr_size);
+ return -EINVAL;
+ }
+
+ if (nhash_hdr_size < (tbls_num * obj->tbl_hdr_width)) {
+ IPAHAL_ERR("No enough spc at non-hash hdr blk for all tbls\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ if (obj->support_hash &&
+ (hash_hdr_size < (tbls_num * obj->tbl_hdr_width))) {
+ IPAHAL_ERR("No enough spc at hash hdr blk for all tbls\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ mem->size = tbls_num * obj->tbl_hdr_width;
+ mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
+ &mem->phys_base, GFP_KERNEL);
+ if (!mem->base) {
+ IPAHAL_ERR("fail to alloc DMA buff of size %d\n", mem->size);
+ return -ENOMEM;
+ }
+
+ addr = obj->create_tbl_addr(true,
+ ipahal_ctx->empty_fltrt_tbl.phys_base);
+ for (i = 0; i < tbls_num; i++)
+ obj->write_val_to_hdr(addr,
+ mem->base + i * obj->tbl_hdr_width);
+
+ return 0;
+}
+
+/*
+ * ipahal_flt_generate_empty_img() - Generate empty filter image
+ * Creates filter header buffer for the given tables number.
+ * For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @ep_bitmap: Bitmap representing the EP that has flt tables. The format
+ * should be: bit0->EP0, bit1->EP1
+ * If bitmap is zero -> create tbl without bitmap entry
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ */
+int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+ u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem)
+{
+ int flt_spc;
+ u64 flt_bitmap;
+ int i;
+ u64 addr;
+ struct ipahal_fltrt_obj *obj;
+
+ IPAHAL_DBG("Entry - ep_bitmap 0x%llx\n", ep_bitmap);
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (!tbls_num || !nhash_hdr_size || !mem) {
+ IPAHAL_ERR("Input Error: tbls_num=%d nhash_hdr_sz=%d mem=%p\n",
+ tbls_num, nhash_hdr_size, mem);
+ return -EINVAL;
+ }
+ if (obj->support_hash && !hash_hdr_size) {
+ IPAHAL_ERR("Input Error: hash_hdr_sz=%d\n", hash_hdr_size);
+ return -EINVAL;
+ }
+
+ if (obj->support_hash) {
+ flt_spc = hash_hdr_size;
+ /* bitmap word */
+ if (ep_bitmap)
+ flt_spc -= obj->tbl_hdr_width;
+ flt_spc /= obj->tbl_hdr_width;
+ if (tbls_num > flt_spc) {
+ IPAHAL_ERR("space for hash flt hdr is too small\n");
+ WARN_ON(1);
+ return -EPERM;
+ }
+ }
+
+ flt_spc = nhash_hdr_size;
+ /* bitmap word */
+ if (ep_bitmap)
+ flt_spc -= obj->tbl_hdr_width;
+ flt_spc /= obj->tbl_hdr_width;
+ if (tbls_num > flt_spc) {
+ IPAHAL_ERR("space for non-hash flt hdr is too small\n");
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ mem->size = tbls_num * obj->tbl_hdr_width;
+ if (ep_bitmap)
+ mem->size += obj->tbl_hdr_width;
+ mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
+ &mem->phys_base, GFP_KERNEL);
+ if (!mem->base) {
+ IPAHAL_ERR("fail to alloc DMA buff of size %d\n", mem->size);
+ return -ENOMEM;
+ }
+
+ if (ep_bitmap) {
+ flt_bitmap = obj->create_flt_bitmap(ep_bitmap);
+ IPAHAL_DBG("flt bitmap 0x%llx\n", flt_bitmap);
+ obj->write_val_to_hdr(flt_bitmap, mem->base);
+ }
+
+ addr = obj->create_tbl_addr(true,
+ ipahal_ctx->empty_fltrt_tbl.phys_base);
+
+ if (ep_bitmap) {
+ for (i = 1; i <= tbls_num; i++)
+ obj->write_val_to_hdr(addr,
+ mem->base + i * obj->tbl_hdr_width);
+ } else {
+ for (i = 0; i < tbls_num; i++)
+ obj->write_val_to_hdr(addr,
+ mem->base + i * obj->tbl_hdr_width);
+ }
+
+ return 0;
+}
+
+/*
+ * ipa_fltrt_alloc_init_tbl_hdr() - allocate and initialize buffers for
+ * flt/rt tables headers to be filled into sram. Init each table to point
+ * to empty system table
+ * @params: Allocate IN and OUT params
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_fltrt_alloc_init_tbl_hdr(
+ struct ipahal_fltrt_alloc_imgs_params *params)
+{
+ u64 addr;
+ int i;
+ struct ipahal_fltrt_obj *obj;
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (!params) {
+ IPAHAL_ERR("Input error: params=%p\n", params);
+ return -EINVAL;
+ }
+
+ params->nhash_hdr.size = params->tbls_num * obj->tbl_hdr_width;
+ params->nhash_hdr.base = dma_alloc_coherent(ipahal_ctx->ipa_pdev,
+ params->nhash_hdr.size,
+ ¶ms->nhash_hdr.phys_base, GFP_KERNEL);
+ if (!params->nhash_hdr.size) {
+ IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+ params->nhash_hdr.size);
+ goto nhash_alloc_fail;
+ }
+
+ if (obj->support_hash) {
+ params->hash_hdr.size = params->tbls_num * obj->tbl_hdr_width;
+ params->hash_hdr.base = dma_alloc_coherent(ipahal_ctx->ipa_pdev,
+ params->hash_hdr.size, ¶ms->hash_hdr.phys_base,
+ GFP_KERNEL);
+ if (!params->hash_hdr.base) {
+ IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+ params->hash_hdr.size);
+ goto hash_alloc_fail;
+ }
+ }
+
+ addr = obj->create_tbl_addr(true,
+ ipahal_ctx->empty_fltrt_tbl.phys_base);
+ for (i = 0; i < params->tbls_num; i++) {
+ obj->write_val_to_hdr(addr,
+ params->nhash_hdr.base + i * obj->tbl_hdr_width);
+ if (obj->support_hash)
+ obj->write_val_to_hdr(addr,
+ params->hash_hdr.base +
+ i * obj->tbl_hdr_width);
+ }
+
+ return 0;
+
+hash_alloc_fail:
+ ipahal_free_dma_mem(¶ms->nhash_hdr);
+nhash_alloc_fail:
+ return -ENOMEM;
+}
+
+/*
+ * ipa_fltrt_alloc_lcl_bdy() - allocate and initialize buffers for
+ * local flt/rt tables bodies to be filled into sram
+ * @params: Allocate IN and OUT params
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_fltrt_alloc_lcl_bdy(
+ struct ipahal_fltrt_alloc_imgs_params *params)
+{
+ struct ipahal_fltrt_obj *obj;
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ /* The HAL allocates larger sizes than the given effective ones
+ * for alignments and border indications
+ */
+ IPAHAL_DBG("lcl tbl bdy total effective sizes: hash=%u nhash=%u\n",
+ params->total_sz_lcl_hash_tbls,
+ params->total_sz_lcl_nhash_tbls);
+
+ IPAHAL_DBG("lcl tbl bdy count: hash=%u nhash=%u\n",
+ params->num_lcl_hash_tbls,
+ params->num_lcl_nhash_tbls);
+
+ /* Align the sizes to coop with termination word
+ * and H/W local table start offset alignment
+ */
+ if (params->nhash_bdy.size) {
+ params->nhash_bdy.size = params->total_sz_lcl_nhash_tbls;
+ /* for table terminator */
+ params->nhash_bdy.size += obj->tbl_width *
+ params->num_lcl_nhash_tbls;
+ /* align the start of local rule-set */
+ params->nhash_bdy.size += obj->lcladdr_alignment *
+ params->num_lcl_nhash_tbls;
+ /* SRAM block size alignment */
+ params->nhash_bdy.size += obj->blk_sz_alignment;
+ params->nhash_bdy.size &= ~(obj->blk_sz_alignment);
+
+ IPAHAL_DBG("nhash lcl tbl bdy total h/w size = %u\n",
+ params->nhash_bdy.size);
+
+ params->nhash_bdy.base = dma_alloc_coherent(
+ ipahal_ctx->ipa_pdev, params->nhash_bdy.size,
+ ¶ms->nhash_bdy.phys_base, GFP_KERNEL);
+ if (!params->nhash_bdy.base) {
+ IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+ params->nhash_bdy.size);
+ return -ENOMEM;
+ }
+ memset(params->nhash_bdy.base, 0, params->nhash_bdy.size);
+ }
+
+ if (!obj->support_hash && params->hash_bdy.size) {
+ IPAHAL_ERR("No HAL Hash tbls support - Will be ignored\n");
+ WARN_ON(1);
+ }
+
+ if (obj->support_hash && params->hash_bdy.size) {
+ params->hash_bdy.size = params->total_sz_lcl_hash_tbls;
+ /* for table terminator */
+ params->hash_bdy.size += obj->tbl_width *
+ params->num_lcl_hash_tbls;
+ /* align the start of local rule-set */
+ params->hash_bdy.size += obj->lcladdr_alignment *
+ params->num_lcl_hash_tbls;
+ /* SRAM block size alignment */
+ params->hash_bdy.size += obj->blk_sz_alignment;
+ params->hash_bdy.size &= ~(obj->blk_sz_alignment);
+
+ IPAHAL_DBG("hash lcl tbl bdy total h/w size = %u\n",
+ params->hash_bdy.size);
+
+ params->hash_bdy.base = dma_alloc_coherent(
+ ipahal_ctx->ipa_pdev, params->hash_bdy.size,
+ ¶ms->hash_bdy.phys_base, GFP_KERNEL);
+ if (!params->hash_bdy.base) {
+ IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+ params->hash_bdy.size);
+ goto hash_bdy_fail;
+ }
+ memset(params->hash_bdy.base, 0, params->hash_bdy.size);
+ }
+
+ return 0;
+
+hash_bdy_fail:
+ if (params->nhash_bdy.size)
+ ipahal_free_dma_mem(¶ms->nhash_bdy);
+
+ return -ENOMEM;
+}
+
+/*
+ * ipahal_fltrt_allocate_hw_tbl_imgs() - Allocate tbl images DMA structures
+ * Used usually during commit.
+ * Allocates header structures and init them to point to empty DDR table
+ * Allocate body strucutres for local bodies tables
+ * @params: Parameters for IN and OUT regard the allocation.
+ */
+int ipahal_fltrt_allocate_hw_tbl_imgs(
+ struct ipahal_fltrt_alloc_imgs_params *params)
+{
+ IPAHAL_DBG("Entry\n");
+
+ /* Input validation */
+ if (!params) {
+ IPAHAL_ERR("Input err: no params\n");
+ return -EINVAL;
+ }
+ if (params->ipt >= IPA_IP_MAX) {
+ IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt);
+ return -EINVAL;
+ }
+
+ if (ipa_fltrt_alloc_init_tbl_hdr(params)) {
+ IPAHAL_ERR("fail to alloc and init tbl hdr\n");
+ return -ENOMEM;
+ }
+
+ if (ipa_fltrt_alloc_lcl_bdy(params)) {
+ IPAHAL_ERR("fail to alloc tbl bodies\n");
+ goto bdy_alloc_fail;
+ }
+
+ return 0;
+
+bdy_alloc_fail:
+ ipahal_free_dma_mem(¶ms->nhash_hdr);
+ if (params->hash_hdr.size)
+ ipahal_free_dma_mem(¶ms->hash_hdr);
+ return -ENOMEM;
+}
+
+/*
+ * ipahal_fltrt_allocate_hw_sys_tbl() - Allocate DMA mem for H/W flt/rt sys tbl
+ * @tbl_mem: IN/OUT param. size for effective table size. Pointer, for the
+ * allocated memory.
+ *
+ * The size is adapted for needed alignments/borders.
+ */
+int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem)
+{
+ struct ipahal_fltrt_obj *obj;
+
+ IPAHAL_DBG("Entry\n");
+
+ if (!tbl_mem) {
+ IPAHAL_ERR("Input err\n");
+ return -EINVAL;
+ }
+
+ if (!tbl_mem->size) {
+ IPAHAL_ERR("Input err: zero table size\n");
+ return -EINVAL;
+ }
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ /* add word for rule-set terminator */
+ tbl_mem->size += obj->tbl_width;
+
+ tbl_mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, tbl_mem->size,
+ &tbl_mem->phys_base, GFP_KERNEL);
+ if (!tbl_mem->base) {
+ IPAHAL_ERR("fail to alloc DMA buf of size %d\n",
+ tbl_mem->size);
+ return -ENOMEM;
+ }
+ if (tbl_mem->phys_base & obj->sysaddr_alignment) {
+ IPAHAL_ERR("sys rt tbl address is not aligned\n");
+ goto align_err;
+ }
+
+ memset(tbl_mem->base, 0, tbl_mem->size);
+
+ return 0;
+
+align_err:
+ ipahal_free_dma_mem(tbl_mem);
+ return -EPERM;
+}
+
+/*
+ * ipahal_fltrt_write_addr_to_hdr() - Fill table header with table address
+ * Given table addr/offset, adapt it to IPA H/W format and write it
+ * to given header index.
+ * @addr: Address or offset to be used
+ * @hdr_base: base address of header structure to write the address
+ * @hdr_idx: index of the address in the header structure
+ * @is_sys: Is it system address or local offset
+ */
+int ipahal_fltrt_write_addr_to_hdr(u64 addr, void *hdr_base, u32 hdr_idx,
+ bool is_sys)
+{
+ struct ipahal_fltrt_obj *obj;
+ u64 hwaddr;
+ u8 *hdr;
+
+ IPAHAL_DBG("Entry\n");
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (!addr || !hdr_base) {
+ IPAHAL_ERR("Input err: addr=0x%llx hdr_base=%p\n",
+ addr, hdr_base);
+ return -EINVAL;
+ }
+
+ hdr = (u8 *)hdr_base;
+ hdr += hdr_idx * obj->tbl_hdr_width;
+ hwaddr = obj->create_tbl_addr(is_sys, addr);
+ obj->write_val_to_hdr(hwaddr, hdr);
+
+ return 0;
+}
+
+/*
+ * ipahal_fltrt_read_addr_from_hdr() - Given sram address, read it's
+ * content (physical address or offset) and parse it.
+ * @hdr_base: base sram address of the header structure.
+ * @hdr_idx: index of the header entry line in the header structure.
+ * @addr: The parsed address - Out parameter
+ * @is_sys: Is this system or local address - Out parameter
+ */
+int ipahal_fltrt_read_addr_from_hdr(void *hdr_base, u32 hdr_idx, u64 *addr,
+ bool *is_sys)
+{
+ struct ipahal_fltrt_obj *obj;
+ u64 hwaddr;
+ u8 *hdr;
+
+ IPAHAL_DBG("Entry\n");
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (!addr || !hdr_base || !is_sys) {
+ IPAHAL_ERR("Input err: addr=%p hdr_base=%p is_sys=%p\n",
+ addr, hdr_base, is_sys);
+ return -EINVAL;
+ }
+
+ hdr = (u8 *)hdr_base;
+ hdr += hdr_idx * obj->tbl_hdr_width;
+ hwaddr = *((u64 *)hdr);
+ obj->parse_tbl_addr(hwaddr, addr, is_sys);
+ return 0;
+}
+
+/*
+ * ipahal_rt_generate_hw_rule() - generates the routing hardware rule
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ * be built in internal temp buf. This is used e.g. to get the rule size
+ * only.
+ */
+int ipahal_rt_generate_hw_rule(struct ipahal_rt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf)
+{
+ struct ipahal_fltrt_obj *obj;
+ u8 *tmp = NULL;
+ int rc;
+
+ IPAHAL_DBG("Entry\n");
+
+ if (!params || !hw_len) {
+ IPAHAL_ERR("Input err: params=%p hw_len=%p\n", params, hw_len);
+ return -EINVAL;
+ }
+ if (!params->rule) {
+ IPAHAL_ERR("Input err: invalid rule\n");
+ return -EINVAL;
+ }
+ if (params->ipt >= IPA_IP_MAX) {
+ IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt);
+ return -EINVAL;
+ }
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (buf == NULL) {
+ tmp = kzalloc(obj->rule_buf_size, GFP_KERNEL);
+ if (!tmp) {
+ IPAHAL_ERR("failed to alloc %u bytes\n",
+ obj->rule_buf_size);
+ return -ENOMEM;
+ }
+ buf = tmp;
+ } else
+ if ((long)buf & obj->rule_start_alignment) {
+ IPAHAL_ERR("buff is not rule rule start aligned\n");
+ return -EPERM;
+ }
+
+ rc = ipahal_fltrt_objs[ipahal_ctx->hw_type].rt_generate_hw_rule(
+ params, hw_len, buf);
+ if (!tmp && !rc) {
+ /* write the rule-set terminator */
+ memset(buf + *hw_len, 0, obj->tbl_width);
+ }
+
+ kfree(tmp);
+
+ return rc;
+}
+
+/*
+ * ipahal_flt_generate_hw_rule() - generates the filtering hardware rule.
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ * be built in internal temp buf. This is used e.g. to get the rule size
+ * only.
+ */
+int ipahal_flt_generate_hw_rule(struct ipahal_flt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf)
+{
+ struct ipahal_fltrt_obj *obj;
+ u8 *tmp = NULL;
+ int rc;
+
+ IPAHAL_DBG("Entry\n");
+
+ if (!params || !hw_len) {
+ IPAHAL_ERR("Input err: params=%p hw_len=%p\n", params, hw_len);
+ return -EINVAL;
+ }
+ if (!params->rule) {
+ IPAHAL_ERR("Input err: invalid rule\n");
+ return -EINVAL;
+ }
+ if (params->ipt >= IPA_IP_MAX) {
+ IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt);
+ return -EINVAL;
+ }
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (buf == NULL) {
+ tmp = kzalloc(obj->rule_buf_size, GFP_KERNEL);
+ if (!tmp) {
+ IPAHAL_ERR("failed to alloc %u bytes\n",
+ obj->rule_buf_size);
+ return -ENOMEM;
+ }
+ buf = tmp;
+ } else
+ if ((long)buf & obj->rule_start_alignment) {
+ IPAHAL_ERR("buff is not rule rule start aligned\n");
+ return -EPERM;
+ }
+
+ rc = ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_generate_hw_rule(
+ params, hw_len, buf);
+ if (!tmp && !rc) {
+ /* write the rule-set terminator */
+ memset(buf + *hw_len, 0, obj->tbl_width);
+ }
+
+ kfree(tmp);
+
+ return rc;
+
+}
+
+/*
+ * ipahal_flt_generate_equation() - generate flt rule in equation form
+ * Will build equation form flt rule from given info.
+ * @ipt: IP family
+ * @attrib: Rule attribute to be generated
+ * @eq_atrb: Equation form generated rule
+ * Note: Usage example: Pass the generated form to other sub-systems
+ * for inter-subsystems rules exchange.
+ */
+int ipahal_flt_generate_equation(enum ipa_ip_type ipt,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+ IPAHAL_DBG("Entry\n");
+
+ if (ipt >= IPA_IP_MAX) {
+ IPAHAL_ERR("Input err: Invalid ip type %d\n", ipt);
+ return -EINVAL;
+ }
+
+ if (!attrib || !eq_atrb) {
+ IPAHAL_ERR("Input err: attrib=%p eq_atrb=%p\n",
+ attrib, eq_atrb);
+ return -EINVAL;
+ }
+
+ return ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_generate_eq(ipt,
+ attrib, eq_atrb);
+
+}
+
+/*
+ * ipahal_rt_parse_hw_rule() - Parse H/W formated rt rule
+ * Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_rt_parse_hw_rule(u8 *rule_addr,
+ struct ipahal_rt_rule_entry *rule)
+{
+ IPAHAL_DBG("Entry\n");
+
+ if (!rule_addr || !rule) {
+ IPAHAL_ERR("Input err: rule_addr=%p rule=%p\n",
+ rule_addr, rule);
+ return -EINVAL;
+ }
+
+ return ipahal_fltrt_objs[ipahal_ctx->hw_type].rt_parse_hw_rule(
+ rule_addr, rule);
+}
+
+/*
+ * ipahal_flt_parse_hw_rule() - Parse H/W formated flt rule
+ * Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_flt_parse_hw_rule(u8 *rule_addr,
+ struct ipahal_flt_rule_entry *rule)
+{
+ IPAHAL_DBG("Entry\n");
+
+ if (!rule_addr || !rule) {
+ IPAHAL_ERR("Input err: rule_addr=%p rule=%p\n",
+ rule_addr, rule);
+ return -EINVAL;
+ }
+
+ return ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_parse_hw_rule(
+ rule_addr, rule);
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h
new file mode 100644
index 0000000..ee2704d6
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h
@@ -0,0 +1,288 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_FLTRT_H_
+#define _IPAHAL_FLTRT_H_
+
+/*
+ * struct ipahal_fltrt_alloc_imgs_params - Params for tbls imgs allocations
+ * The allocation logic will allocate DMA memory representing the header.
+ * If the bodies are local (SRAM) the allocation will allocate
+ * a DMA buffers that would contain the content of these local tables in raw
+ * @ipt: IP version type
+ * @tbls_num: Number of tables to represent by the header
+ * @num_lcl_hash_tbls: Number of local (sram) hashable tables
+ * @num_lcl_nhash_tbls: Number of local (sram) non-hashable tables
+ * @total_sz_lcl_hash_tbls: Total size of local hashable tables
+ * @total_sz_lcl_nhash_tbls: Total size of local non-hashable tables
+ * @hash_hdr/nhash_hdr: OUT params for the header structures
+ * @hash_bdy/nhash_bdy: OUT params for the local body structures
+ */
+struct ipahal_fltrt_alloc_imgs_params {
+ enum ipa_ip_type ipt;
+ u32 tbls_num;
+ u32 num_lcl_hash_tbls;
+ u32 num_lcl_nhash_tbls;
+ u32 total_sz_lcl_hash_tbls;
+ u32 total_sz_lcl_nhash_tbls;
+
+ /* OUT PARAMS */
+ struct ipa_mem_buffer hash_hdr;
+ struct ipa_mem_buffer nhash_hdr;
+ struct ipa_mem_buffer hash_bdy;
+ struct ipa_mem_buffer nhash_bdy;
+};
+
+/*
+ * enum ipahal_rt_rule_hdr_type - Header type used in rt rules
+ * @IPAHAL_RT_RULE_HDR_NONE: No header is used
+ * @IPAHAL_RT_RULE_HDR_RAW: Raw header is used
+ * @IPAHAL_RT_RULE_HDR_PROC_CTX: Header Processing context is used
+ */
+enum ipahal_rt_rule_hdr_type {
+ IPAHAL_RT_RULE_HDR_NONE,
+ IPAHAL_RT_RULE_HDR_RAW,
+ IPAHAL_RT_RULE_HDR_PROC_CTX,
+};
+
+/*
+ * struct ipahal_rt_rule_gen_params - Params for generating rt rule
+ * @ipt: IP family version
+ * @dst_pipe_idx: Destination pipe index
+ * @hdr_type: Header type to be used
+ * @hdr_lcl: Does header on local or system table?
+ * @hdr_ofst: Offset of the header in the header table
+ * @priority: Rule priority
+ * @id: Rule ID
+ * @rule: Rule info
+ */
+struct ipahal_rt_rule_gen_params {
+ enum ipa_ip_type ipt;
+ int dst_pipe_idx;
+ enum ipahal_rt_rule_hdr_type hdr_type;
+ bool hdr_lcl;
+ u32 hdr_ofst;
+ u32 priority;
+ u32 id;
+ const struct ipa_rt_rule *rule;
+};
+
+/*
+ * struct ipahal_rt_rule_entry - Rt rule info parsed from H/W
+ * @dst_pipe_idx: Destination pipe index
+ * @hdr_lcl: Does the references header located in sram or system mem?
+ * @hdr_ofst: Offset of the header in the header table
+ * @hdr_type: Header type to be used
+ * @priority: Rule priority
+ * @retain_hdr: to retain the removed header in header removal
+ * @id: Rule ID
+ * @eq_attrib: Equations and their params in the rule
+ * @rule_size: Rule size in memory
+ */
+struct ipahal_rt_rule_entry {
+ int dst_pipe_idx;
+ bool hdr_lcl;
+ u32 hdr_ofst;
+ enum ipahal_rt_rule_hdr_type hdr_type;
+ u32 priority;
+ bool retain_hdr;
+ u32 id;
+ struct ipa_ipfltri_rule_eq eq_attrib;
+ u32 rule_size;
+};
+
+/*
+ * struct ipahal_flt_rule_gen_params - Params for generating flt rule
+ * @ipt: IP family version
+ * @rt_tbl_idx: Routing table the rule pointing to
+ * @priority: Rule priority
+ * @id: Rule ID
+ * @rule: Rule info
+ */
+struct ipahal_flt_rule_gen_params {
+ enum ipa_ip_type ipt;
+ u32 rt_tbl_idx;
+ u32 priority;
+ u32 id;
+ const struct ipa_flt_rule *rule;
+};
+
+/*
+ * struct ipahal_flt_rule_entry - Flt rule info parsed from H/W
+ * @rule: Rule info
+ * @priority: Rule priority
+ * @id: Rule ID
+ * @rule_size: Rule size in memory
+ */
+struct ipahal_flt_rule_entry {
+ struct ipa_flt_rule rule;
+ u32 priority;
+ u32 id;
+ u32 rule_size;
+};
+
+/* Get the H/W table (flt/rt) header width */
+u32 ipahal_get_hw_tbl_hdr_width(void);
+
+/* Get the H/W local table (SRAM) address alignment
+ * Tables headers references to local tables via offsets in SRAM
+ * This function return the alignment of the offset that IPA expects
+ */
+u32 ipahal_get_lcl_tbl_addr_alignment(void);
+
+/*
+ * Rule priority is used to distinguish rules order
+ * at the integrated table consisting from hashable and
+ * non-hashable tables. Max priority are rules that once are
+ * scanned by IPA, IPA will not look for further rules and use it.
+ */
+int ipahal_get_rule_max_priority(void);
+
+/* Given a priority, calc and return the next lower one if it is in
+ * legal range.
+ */
+int ipahal_rule_decrease_priority(int *prio);
+
+/* Does the given ID represents rule miss? */
+bool ipahal_is_rule_miss_id(u32 id);
+
+/* Get rule ID with high bit only asserted
+ * Used e.g. to create groups of IDs according to this bit
+ */
+u32 ipahal_get_rule_id_hi_bit(void);
+
+/* Get the low value possible to be used for rule-id */
+u32 ipahal_get_low_rule_id(void);
+
+/*
+ * ipahal_rt_generate_empty_img() - Generate empty route image
+ * Creates routing header buffer for the given tables number.
+ * For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ */
+int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+ u32 nhash_hdr_size, struct ipa_mem_buffer *mem);
+
+/*
+ * ipahal_flt_generate_empty_img() - Generate empty filter image
+ * Creates filter header buffer for the given tables number.
+ * For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @ep_bitmap: Bitmap representing the EP that has flt tables. The format
+ * should be: bit0->EP0, bit1->EP1
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ */
+int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+ u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem);
+
+/*
+ * ipahal_fltrt_allocate_hw_tbl_imgs() - Allocate tbl images DMA structures
+ * Used usually during commit.
+ * Allocates header structures and init them to point to empty DDR table
+ * Allocate body strucutres for local bodies tables
+ * @params: Parameters for IN and OUT regard the allocation.
+ */
+int ipahal_fltrt_allocate_hw_tbl_imgs(
+ struct ipahal_fltrt_alloc_imgs_params *params);
+
+/*
+ * ipahal_fltrt_allocate_hw_sys_tbl() - Allocate DMA mem for H/W flt/rt sys tbl
+ * @tbl_mem: IN/OUT param. size for effective table size. Pointer, for the
+ * allocated memory.
+ *
+ * The size is adapted for needed alignments/borders.
+ */
+int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem);
+
+/*
+ * ipahal_fltrt_write_addr_to_hdr() - Fill table header with table address
+ * Given table addr/offset, adapt it to IPA H/W format and write it
+ * to given header index.
+ * @addr: Address or offset to be used
+ * @hdr_base: base address of header structure to write the address
+ * @hdr_idx: index of the address in the header structure
+ * @is_sys: Is it system address or local offset
+ */
+int ipahal_fltrt_write_addr_to_hdr(u64 addr, void *hdr_base, u32 hdr_idx,
+ bool is_sys);
+
+/*
+ * ipahal_fltrt_read_addr_from_hdr() - Given sram address, read it's
+ * content (physical address or offset) and parse it.
+ * @hdr_base: base sram address of the header structure.
+ * @hdr_idx: index of the header entry line in the header structure.
+ * @addr: The parsed address - Out parameter
+ * @is_sys: Is this system or local address - Out parameter
+ */
+int ipahal_fltrt_read_addr_from_hdr(void *hdr_base, u32 hdr_idx, u64 *addr,
+ bool *is_sys);
+
+/*
+ * ipahal_rt_generate_hw_rule() - generates the routing hardware rule.
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ * be built in internal temp buf. This is used e.g. to get the rule size
+ * only.
+ */
+int ipahal_rt_generate_hw_rule(struct ipahal_rt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf);
+
+/*
+ * ipahal_flt_generate_hw_rule() - generates the filtering hardware rule.
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ * be built in internal temp buf. This is used e.g. to get the rule size
+ * only.
+ */
+int ipahal_flt_generate_hw_rule(struct ipahal_flt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf);
+
+/*
+ * ipahal_flt_generate_equation() - generate flt rule in equation form
+ * Will build equation form flt rule from given info.
+ * @ipt: IP family
+ * @attrib: Rule attribute to be generated
+ * @eq_atrb: Equation form generated rule
+ * Note: Usage example: Pass the generated form to other sub-systems
+ * for inter-subsystems rules exchange.
+ */
+int ipahal_flt_generate_equation(enum ipa_ip_type ipt,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb);
+
+/*
+ * ipahal_rt_parse_hw_rule() - Parse H/W formated rt rule
+ * Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_rt_parse_hw_rule(u8 *rule_addr,
+ struct ipahal_rt_rule_entry *rule);
+
+/*
+ * ipahal_flt_parse_hw_rule() - Parse H/W formated flt rule
+ * Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_flt_parse_hw_rule(u8 *rule_addr,
+ struct ipahal_flt_rule_entry *rule);
+
+
+#endif /* _IPAHAL_FLTRT_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h
new file mode 100644
index 0000000..0c0637d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h
@@ -0,0 +1,143 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_FLTRT_I_H_
+#define _IPAHAL_FLTRT_I_H_
+
+/*
+ * enum ipa_fltrt_equations - RULE equations
+ * These are names values to the equations that can be used
+ * The HAL layer holds mapping between these names and H/W
+ * presentation.
+ */
+enum ipa_fltrt_equations {
+ IPA_TOS_EQ,
+ IPA_PROTOCOL_EQ,
+ IPA_TC_EQ,
+ IPA_OFFSET_MEQ128_0,
+ IPA_OFFSET_MEQ128_1,
+ IPA_OFFSET_MEQ32_0,
+ IPA_OFFSET_MEQ32_1,
+ IPA_IHL_OFFSET_MEQ32_0,
+ IPA_IHL_OFFSET_MEQ32_1,
+ IPA_METADATA_COMPARE,
+ IPA_IHL_OFFSET_RANGE16_0,
+ IPA_IHL_OFFSET_RANGE16_1,
+ IPA_IHL_OFFSET_EQ_32,
+ IPA_IHL_OFFSET_EQ_16,
+ IPA_FL_EQ,
+ IPA_IS_FRAG,
+ IPA_EQ_MAX,
+};
+
+/* Width and Alignment values for H/W structures.
+ * Specific for IPA version.
+ */
+#define IPA3_0_HW_TBL_SYSADDR_ALIGNMENT (127)
+#define IPA3_0_HW_TBL_LCLADDR_ALIGNMENT (7)
+#define IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT (127)
+#define IPA3_0_HW_TBL_WIDTH (8)
+#define IPA3_0_HW_TBL_HDR_WIDTH (8)
+#define IPA3_0_HW_TBL_ADDR_MASK (127)
+#define IPA3_0_HW_RULE_BUF_SIZE (256)
+#define IPA3_0_HW_RULE_START_ALIGNMENT (7)
+
+
+/*
+ * Rules Priority.
+ * Needed due to rules classification to hashable and non-hashable.
+ * Higher priority is lower in number. i.e. 0 is highest priority
+ */
+#define IPA3_0_RULE_MAX_PRIORITY (0)
+#define IPA3_0_RULE_MIN_PRIORITY (1023)
+
+/*
+ * RULE ID, bit length (e.g. 10 bits).
+ */
+#define IPA3_0_RULE_ID_BIT_LEN (10)
+#define IPA3_0_LOW_RULE_ID (1)
+
+/**
+ * struct ipa3_0_rt_rule_hw_hdr - HW header of IPA routing rule
+ * @word: routing rule header properties
+ * @en_rule: enable rule - Equation bit fields
+ * @pipe_dest_idx: destination pipe index
+ * @system: Is referenced header is lcl or sys memory
+ * @hdr_offset: header offset
+ * @proc_ctx: whether hdr_offset points to header table or to
+ * header processing context table
+ * @priority: Rule priority. Added to distinguish rules order
+ * at the integrated table consisting from hashable and
+ * non-hashable parts
+ * @rsvd1: reserved bits
+ * @retain_hdr: added to add back to the packet the header removed
+ * as part of header removal. This will be done as part of
+ * header insertion block.
+ * @rule_id: rule ID that will be returned in the packet status
+ * @rsvd2: reserved bits
+ */
+struct ipa3_0_rt_rule_hw_hdr {
+ union {
+ u64 word;
+ struct {
+ u64 en_rule:16;
+ u64 pipe_dest_idx:5;
+ u64 system:1;
+ u64 hdr_offset:9;
+ u64 proc_ctx:1;
+ u64 priority:10;
+ u64 rsvd1:5;
+ u64 retain_hdr:1;
+ u64 rule_id:10;
+ u64 rsvd2:6;
+ } hdr;
+ } u;
+};
+
+/**
+ * struct ipa3_0_flt_rule_hw_hdr - HW header of IPA filter rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @action: post filtering action
+ * @rt_tbl_idx: index in routing table
+ * @retain_hdr: added to add back to the packet the header removed
+ * as part of header removal. This will be done as part of
+ * header insertion block.
+ * @rsvd1: reserved bits
+ * @priority: Rule priority. Added to distinguish rules order
+ * at the integrated table consisting from hashable and
+ * non-hashable parts
+ * @rsvd2: reserved bits
+ * @rule_id: rule ID that will be returned in the packet status
+ * @rsvd3: reserved bits
+ */
+struct ipa3_0_flt_rule_hw_hdr {
+ union {
+ u64 word;
+ struct {
+ u64 en_rule:16;
+ u64 action:5;
+ u64 rt_tbl_idx:5;
+ u64 retain_hdr:1;
+ u64 rsvd1:5;
+ u64 priority:10;
+ u64 rsvd2:6;
+ u64 rule_id:10;
+ u64 rsvd3:6;
+ } hdr;
+ } u;
+};
+
+int ipahal_fltrt_init(enum ipa_hw_type ipa_hw_type);
+void ipahal_fltrt_destroy(void);
+
+#endif /* _IPAHAL_FLTRT_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
new file mode 100644
index 0000000..4c4b666
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -0,0 +1,549 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_I_H_
+#define _IPAHAL_I_H_
+
+#include <linux/ipa.h>
+#include "../../ipa_common_i.h"
+
+#define IPAHAL_DRV_NAME "ipahal"
+
+#define IPAHAL_DBG(fmt, args...) \
+ do { \
+ pr_debug(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+ ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPAHAL_DBG_LOW(fmt, args...) \
+ do { \
+ pr_debug(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+ ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPAHAL_ERR(fmt, args...) \
+ do { \
+ pr_err(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+ ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+/*
+ * struct ipahal_context - HAL global context data
+ * @hw_type: IPA H/W type/version.
+ * @base: Base address to be used for accessing IPA memory. This is
+ * I/O memory mapped address.
+ * Controlled by debugfs. default is off
+ * @dent: Debugfs folder dir entry
+ * @ipa_pdev: IPA Platform Device. Will be used for DMA memory
+ * @empty_fltrt_tbl: Empty table to be used at tables init.
+ */
+struct ipahal_context {
+ enum ipa_hw_type hw_type;
+ void __iomem *base;
+ struct dentry *dent;
+ struct device *ipa_pdev;
+ struct ipa_mem_buffer empty_fltrt_tbl;
+};
+
+extern struct ipahal_context *ipahal_ctx;
+
+
+
+/* Immediate commands H/W structures */
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v4_filter_init - IP_V4_FILTER_INIT command payload
+ * in H/W format.
+ * Inits IPv4 filter block.
+ * @hash_rules_addr: Addr in system mem where ipv4 hashable flt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable flt tbl should
+ * be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable flt tbl should
+ * be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v4_filter_init {
+ u64 hash_rules_addr:64;
+ u64 hash_rules_size:12;
+ u64 hash_local_addr:16;
+ u64 nhash_rules_size:12;
+ u64 nhash_local_addr:16;
+ u64 rsvd:8;
+ u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v6_filter_init - IP_V6_FILTER_INIT command payload
+ * in H/W format.
+ * Inits IPv6 filter block.
+ * @hash_rules_addr: Addr in system mem where ipv6 hashable flt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable flt tbl should
+ * be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable flt tbl should
+ * be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v6_filter_init {
+ u64 hash_rules_addr:64;
+ u64 hash_rules_size:12;
+ u64 hash_local_addr:16;
+ u64 nhash_rules_size:12;
+ u64 nhash_local_addr:16;
+ u64 rsvd:8;
+ u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v4_nat_init - IP_V4_NAT_INIT command payload
+ * in H/W format.
+ * Inits IPv4 NAT block. Initiate NAT table with it dimensions, location
+ * cache address abd itger related parameters.
+ * @ipv4_rules_addr: Addr in sys/shared mem where ipv4 NAT rules start
+ * @ipv4_expansion_rules_addr: Addr in sys/shared mem where expantion NAT
+ * table starts. IPv4 NAT rules that result in NAT collision are located
+ * in this table.
+ * @index_table_addr: Addr in sys/shared mem where index table, which points
+ * to NAT table starts
+ * @index_table_expansion_addr: Addr in sys/shared mem where expansion index
+ * table starts
+ * @table_index: For future support of multiple NAT tables
+ * @rsvd1: reserved
+ * @ipv4_rules_addr_type: ipv4_rules_addr in sys or shared mem
+ * @ipv4_expansion_rules_addr_type: ipv4_expansion_rules_addr in
+ * sys or shared mem
+ * @index_table_addr_type: index_table_addr in sys or shared mem
+ * @index_table_expansion_addr_type: index_table_expansion_addr in
+ * sys or shared mem
+ * @size_base_tables: Num of entries in NAT tbl and idx tbl (each)
+ * @size_expansion_tables: Num of entries in NAT expantion tbl and expantion
+ * idx tbl (each)
+ * @rsvd2: reserved
+ * @public_ip_addr: public IP address
+ */
+struct ipa_imm_cmd_hw_ip_v4_nat_init {
+ u64 ipv4_rules_addr:64;
+ u64 ipv4_expansion_rules_addr:64;
+ u64 index_table_addr:64;
+ u64 index_table_expansion_addr:64;
+ u64 table_index:3;
+ u64 rsvd1:1;
+ u64 ipv4_rules_addr_type:1;
+ u64 ipv4_expansion_rules_addr_type:1;
+ u64 index_table_addr_type:1;
+ u64 index_table_expansion_addr_type:1;
+ u64 size_base_tables:12;
+ u64 size_expansion_tables:10;
+ u64 rsvd2:2;
+ u64 public_ip_addr:32;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v4_routing_init - IP_V4_ROUTING_INIT command payload
+ * in H/W format.
+ * Inits IPv4 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in system mem where ipv4 hashable rt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable rt tbl should
+ * be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable rt tbl should
+ * be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v4_routing_init {
+ u64 hash_rules_addr:64;
+ u64 hash_rules_size:12;
+ u64 hash_local_addr:16;
+ u64 nhash_rules_size:12;
+ u64 nhash_local_addr:16;
+ u64 rsvd:8;
+ u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v6_routing_init - IP_V6_ROUTING_INIT command payload
+ * in H/W format.
+ * Inits IPv6 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in system mem where ipv6 hashable rt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable rt tbl should
+ * be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable rt tbl should
+ * be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v6_routing_init {
+ u64 hash_rules_addr:64;
+ u64 hash_rules_size:12;
+ u64 hash_local_addr:16;
+ u64 nhash_rules_size:12;
+ u64 nhash_local_addr:16;
+ u64 rsvd:8;
+ u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_hdr_init_local - HDR_INIT_LOCAL command payload
+ * in H/W format.
+ * Inits hdr table within local mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in sys mem where the table starts (SRC)
+ * @size_hdr_table: Size of the above (in bytes)
+ * @hdr_addr: header address in IPA sram (used as DST for memory copy)
+ * @rsvd: reserved
+ */
+struct ipa_imm_cmd_hw_hdr_init_local {
+ u64 hdr_table_addr:64;
+ u64 size_hdr_table:12;
+ u64 hdr_addr:16;
+ u64 rsvd:4;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_nat_dma - NAT_DMA command payload
+ * in H/W format
+ * Perform DMA operation on NAT related mem addressess. Copy data into
+ * different locations within NAT associated tbls. (For add/remove NAT rules)
+ * @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op.
+ * @rsvd1: reserved
+ * @base_addr: Base addr to which the DMA operation should be performed.
+ * @rsvd2: reserved
+ * @offset: offset in bytes from base addr to write 'data' to
+ * @data: data to be written
+ * @rsvd3: reserved
+ */
+struct ipa_imm_cmd_hw_nat_dma {
+ u64 table_index:3;
+ u64 rsvd1:1;
+ u64 base_addr:2;
+ u64 rsvd2:2;
+ u64 offset:32;
+ u64 data:16;
+ u64 rsvd3:8;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_hdr_init_system - HDR_INIT_SYSTEM command payload
+ * in H/W format.
+ * Inits hdr table within sys mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in system memory where the hdrs tbl starts.
+ */
+struct ipa_imm_cmd_hw_hdr_init_system {
+ u64 hdr_table_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_packet_init - IP_PACKET_INIT command payload
+ * in H/W format.
+ * Configuration for specific IP pkt. Shall be called prior to an IP pkt
+ * data. Pkt will not go through IP pkt processing.
+ * @destination_pipe_index: Destination pipe index (in case routing
+ * is enabled, this field will overwrite the rt rule)
+ * @rsvd: reserved
+ */
+struct ipa_imm_cmd_hw_ip_packet_init {
+ u64 destination_pipe_index:5;
+ u64 rsv1:59;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_register_write - REGISTER_WRITE command payload
+ * in H/W format.
+ * Write value to register. Allows reg changes to be synced with data packet
+ * and other immediate command. Can be used to access the sram
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait
+ * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
+ * @value: value to write to register
+ * @value_mask: mask specifying which value bits to write to the register
+ * @pipeline_clear_options: options for pipeline to clear
+ * 0: HPS - no pkt inside HPS (not grp specific)
+ * 1: source group - The immediate cmd src grp does not use any pkt ctxs
+ * 2: Wait until no pkt reside inside IPA pipeline
+ * 3: reserved
+ * @rsvd: reserved - should be set to zero
+ */
+struct ipa_imm_cmd_hw_register_write {
+ u64 sw_rsvd:15;
+ u64 skip_pipeline_clear:1;
+ u64 offset:16;
+ u64 value:32;
+ u64 value_mask:32;
+ u64 pipeline_clear_options:2;
+ u64 rsvd:30;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_dma_shared_mem - DMA_SHARED_MEM command payload
+ * in H/W format.
+ * Perform mem copy into or out of the SW area of IPA local mem
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @size: Size in bytes of data to copy. Expected size is up to 2K bytes
+ * @local_addr: Address in IPA local memory
+ * @direction: Read or write?
+ * 0: IPA write, Write to local address from system address
+ * 1: IPA read, Read from local address to system address
+ * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait
+ * @pipeline_clear_options: options for pipeline to clear
+ * 0: HPS - no pkt inside HPS (not grp specific)
+ * 1: source group - The immediate cmd src grp does npt use any pkt ctxs
+ * 2: Wait until no pkt reside inside IPA pipeline
+ * 3: reserved
+ * @rsvd: reserved - should be set to zero
+ * @system_addr: Address in system memory
+ */
+struct ipa_imm_cmd_hw_dma_shared_mem {
+ u64 sw_rsvd:16;
+ u64 size:16;
+ u64 local_addr:16;
+ u64 direction:1;
+ u64 skip_pipeline_clear:1;
+ u64 pipeline_clear_options:2;
+ u64 rsvd:12;
+ u64 system_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_packet_tag_status -
+ * IP_PACKET_TAG_STATUS command payload in H/W format.
+ * This cmd is used for to allow SW to track HW processing by setting a TAG
+ * value that is passed back to SW inside Packet Status information.
+ * TAG info will be provided as part of Packet Status info generated for
+ * the next pkt transferred over the pipe.
+ * This immediate command must be followed by a packet in the same transfer.
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @tag: Tag that is provided back to SW
+ */
+struct ipa_imm_cmd_hw_ip_packet_tag_status {
+ u64 sw_rsvd:16;
+ u64 tag:48;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_dma_task_32b_addr -
+ * IPA_DMA_TASK_32B_ADDR command payload in H/W format.
+ * Used by clients using 32bit addresses. Used to perform DMA operation on
+ * multiple descriptors.
+ * The Opcode is dynamic, where it holds the number of buffer to process
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @cmplt: Complete flag: When asserted IPA will interrupt SW when the entire
+ * DMA related data was completely xfered to its destination.
+ * @eof: Enf Of Frame flag: When asserted IPA will assert the EOT to the
+ * dest client. This is used used for aggr sequence
+ * @flsh: Flush flag: When asserted, pkt will go through the IPA blocks but
+ * will not be xfered to dest client but rather will be discarded
+ * @lock: Lock pipe flag: When asserted, IPA will stop processing descriptors
+ * from other EPs in the same src grp (RX queue)
+ * @unlock: Unlock pipe flag: When asserted, IPA will stop exclusively
+ * servicing current EP out of the src EPs of the grp (RX queue)
+ * @size1: Size of buffer1 data
+ * @addr1: Pointer to buffer1 data
+ * @packet_size: Total packet size. If a pkt send using multiple DMA_TASKs,
+ * only the first one needs to have this field set. It will be ignored
+ * in subsequent DMA_TASKs until the packet ends (EOT). First DMA_TASK
+ * must contain this field (2 or more buffers) or EOT.
+ */
+struct ipa_imm_cmd_hw_dma_task_32b_addr {
+ u64 sw_rsvd:11;
+ u64 cmplt:1;
+ u64 eof:1;
+ u64 flsh:1;
+ u64 lock:1;
+ u64 unlock:1;
+ u64 size1:16;
+ u64 addr1:32;
+ u64 packet_size:16;
+};
+
+
+
+/* IPA Status packet H/W structures and info */
+
+/*
+ * struct ipa_status_pkt_hw - IPA status packet payload in H/W format.
+ * This structure describes the status packet H/W structure for the
+ * following statuses: IPA_STATUS_PACKET, IPA_STATUS_DROPPED_PACKET,
+ * IPA_STATUS_SUSPENDED_PACKET.
+ * Other statuses types has different status packet structure.
+ * @status_opcode: The Type of the status (Opcode).
+ * @exception: (not bitmask) - the first exception that took place.
+ * In case of exception, src endp and pkt len are always valid.
+ * @status_mask: Bit mask specifying on which H/W blocks the pkt was processed.
+ * @pkt_len: Pkt pyld len including hdr, include retained hdr if used. Does
+ * not include padding or checksum trailer len.
+ * @endp_src_idx: Source end point index.
+ * @rsvd1: reserved
+ * @endp_dest_idx: Destination end point index.
+ * Not valid in case of exception
+ * @rsvd2: reserved
+ * @metadata: meta data value used by packet
+ * @flt_local: Filter table location flag: Does matching flt rule belongs to
+ * flt tbl that resides in lcl memory? (if not, then system mem)
+ * @flt_hash: Filter hash hit flag: Does matching flt rule was in hash tbl?
+ * @flt_global: Global filter rule flag: Does matching flt rule belongs to
+ * the global flt tbl? (if not, then the per endp tables)
+ * @flt_ret_hdr: Retain header in filter rule flag: Does matching flt rule
+ * specifies to retain header?
+ * @flt_rule_id: The ID of the matching filter rule. This info can be combined
+ * with endp_src_idx to locate the exact rule. ID=0x3FF reserved to specify
+ * flt miss. In case of miss, all flt info to be ignored
+ * @rt_local: Route table location flag: Does matching rt rule belongs to
+ * rt tbl that resides in lcl memory? (if not, then system mem)
+ * @rt_hash: Route hash hit flag: Does matching rt rule was in hash tbl?
+ * @ucp: UC Processing flag.
+ * @rt_tbl_idx: Index of rt tbl that contains the rule on which was a match
+ * @rt_rule_id: The ID of the matching rt rule. This info can be combined
+ * with rt_tbl_idx to locate the exact rule. ID=0x3FF reserved to specify
+ * rt miss. In case of miss, all rt info to be ignored
+ * @nat_hit: NAT hit flag: Was their NAT hit?
+ * @nat_entry_idx: Index of the NAT entry used of NAT processing
+ * @nat_type: Defines the type of the NAT operation:
+ * 00: No NAT
+ * 01: Source NAT
+ * 10: Destination NAT
+ * 11: Reserved
+ * @tag_info: S/W defined value provided via immediate command
+ * @seq_num: Per source endp unique packet sequence number
+ * @time_of_day_ctr: running counter from IPA clock
+ * @hdr_local: Header table location flag: In header insertion, was the header
+ * taken from the table resides in local memory? (If no, then system mem)
+ * @hdr_offset: Offset of used header in the header table
+ * @frag_hit: Frag hit flag: Was their frag rule hit in H/W frag table?
+ * @frag_rule: Frag rule index in H/W frag table in case of frag hit
+ * @hw_specific: H/W specific reserved value
+ */
+struct ipa_pkt_status_hw {
+ u64 status_opcode:8;
+ u64 exception:8;
+ u64 status_mask:16;
+ u64 pkt_len:16;
+ u64 endp_src_idx:5;
+ u64 rsvd1:3;
+ u64 endp_dest_idx:5;
+ u64 rsvd2:3;
+ u64 metadata:32;
+ u64 flt_local:1;
+ u64 flt_hash:1;
+ u64 flt_global:1;
+ u64 flt_ret_hdr:1;
+ u64 flt_rule_id:10;
+ u64 rt_local:1;
+ u64 rt_hash:1;
+ u64 ucp:1;
+ u64 rt_tbl_idx:5;
+ u64 rt_rule_id:10;
+ u64 nat_hit:1;
+ u64 nat_entry_idx:13;
+ u64 nat_type:2;
+ u64 tag_info:48;
+ u64 seq_num:8;
+ u64 time_of_day_ctr:24;
+ u64 hdr_local:1;
+ u64 hdr_offset:10;
+ u64 frag_hit:1;
+ u64 frag_rule:4;
+ u64 hw_specific:16;
+};
+
+/* Size of H/W Packet Status */
+#define IPA3_0_PKT_STATUS_SIZE 32
+
+/* Headers and processing context H/W structures and definitions */
+
+/* uCP command numbers */
+#define IPA_HDR_UCP_802_3_TO_802_3 6
+#define IPA_HDR_UCP_802_3_TO_ETHII 7
+#define IPA_HDR_UCP_ETHII_TO_802_3 8
+#define IPA_HDR_UCP_ETHII_TO_ETHII 9
+
+/* Processing context TLV type */
+#define IPA_PROC_CTX_TLV_TYPE_END 0
+#define IPA_PROC_CTX_TLV_TYPE_HDR_ADD 1
+#define IPA_PROC_CTX_TLV_TYPE_PROC_CMD 3
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_tlv -
+ * HW structure of IPA processing context header - TLV part
+ * @type: 0 - end type
+ * 1 - header addition type
+ * 3 - processing command type
+ * @length: number of bytes after tlv
+ * for type:
+ * 0 - needs to be 0
+ * 1 - header addition length
+ * 3 - number of 32B including type and length.
+ * @value: specific value for type
+ * for type:
+ * 0 - needs to be 0
+ * 1 - header length
+ * 3 - command ID (see IPA_HDR_UCP_* definitions)
+ */
+struct ipa_hw_hdr_proc_ctx_tlv {
+ u32 type:8;
+ u32 length:8;
+ u32 value:16;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_hdr_add -
+ * HW structure of IPA processing context - add header tlv
+ * @tlv: IPA processing context TLV
+ * @hdr_addr: processing context header address
+ */
+struct ipa_hw_hdr_proc_ctx_hdr_add {
+ struct ipa_hw_hdr_proc_ctx_tlv tlv;
+ u32 hdr_addr;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_add_hdr_seq -
+ * IPA processing context header - add header sequence
+ * @hdr_add: add header command
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hw_hdr_proc_ctx_add_hdr_seq {
+ struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
+ struct ipa_hw_hdr_proc_ctx_tlv end;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq -
+ * IPA processing context header - process command sequence
+ * @hdr_add: add header command
+ * @cmd: tlv processing command (cmd.type must be 3)
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq {
+ struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
+ struct ipa_hw_hdr_proc_ctx_tlv cmd;
+ struct ipa_hw_hdr_proc_ctx_tlv end;
+};
+
+#endif /* _IPAHAL_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
new file mode 100644
index 0000000..08decd8
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -0,0 +1,1541 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/ipa.h>
+#include <linux/kernel.h>
+#include <linux/msm_ipa.h>
+#include "ipahal_i.h"
+#include "ipahal_reg.h"
+#include "ipahal_reg_i.h"
+
+static const char *ipareg_name_to_str[IPA_REG_MAX] = {
+ __stringify(IPA_ROUTE),
+ __stringify(IPA_IRQ_STTS_EE_n),
+ __stringify(IPA_IRQ_EN_EE_n),
+ __stringify(IPA_IRQ_CLR_EE_n),
+ __stringify(IPA_IRQ_SUSPEND_INFO_EE_n),
+ __stringify(IPA_SUSPEND_IRQ_EN_EE_n),
+ __stringify(IPA_SUSPEND_IRQ_CLR_EE_n),
+ __stringify(IPA_BCR),
+ __stringify(IPA_ENABLED_PIPES),
+ __stringify(IPA_COMP_SW_RESET),
+ __stringify(IPA_VERSION),
+ __stringify(IPA_TAG_TIMER),
+ __stringify(IPA_COMP_HW_VERSION),
+ __stringify(IPA_SPARE_REG_1),
+ __stringify(IPA_SPARE_REG_2),
+ __stringify(IPA_COMP_CFG),
+ __stringify(IPA_STATE_AGGR_ACTIVE),
+ __stringify(IPA_ENDP_INIT_HDR_n),
+ __stringify(IPA_ENDP_INIT_HDR_EXT_n),
+ __stringify(IPA_ENDP_INIT_AGGR_n),
+ __stringify(IPA_AGGR_FORCE_CLOSE),
+ __stringify(IPA_ENDP_INIT_ROUTE_n),
+ __stringify(IPA_ENDP_INIT_MODE_n),
+ __stringify(IPA_ENDP_INIT_NAT_n),
+ __stringify(IPA_ENDP_INIT_CTRL_n),
+ __stringify(IPA_ENDP_INIT_HOL_BLOCK_EN_n),
+ __stringify(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n),
+ __stringify(IPA_ENDP_INIT_DEAGGR_n),
+ __stringify(IPA_ENDP_INIT_SEQ_n),
+ __stringify(IPA_DEBUG_CNT_REG_n),
+ __stringify(IPA_ENDP_INIT_CFG_n),
+ __stringify(IPA_IRQ_EE_UC_n),
+ __stringify(IPA_ENDP_INIT_HDR_METADATA_MASK_n),
+ __stringify(IPA_ENDP_INIT_HDR_METADATA_n),
+ __stringify(IPA_ENDP_INIT_RSRC_GRP_n),
+ __stringify(IPA_SHARED_MEM_SIZE),
+ __stringify(IPA_SRAM_DIRECT_ACCESS_n),
+ __stringify(IPA_DEBUG_CNT_CTRL_n),
+ __stringify(IPA_UC_MAILBOX_m_n),
+ __stringify(IPA_FILT_ROUT_HASH_FLUSH),
+ __stringify(IPA_SINGLE_NDP_MODE),
+ __stringify(IPA_QCNCM),
+ __stringify(IPA_SYS_PKT_PROC_CNTXT_BASE),
+ __stringify(IPA_LOCAL_PKT_PROC_CNTXT_BASE),
+ __stringify(IPA_ENDP_STATUS_n),
+ __stringify(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n),
+ __stringify(IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n),
+ __stringify(IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n),
+ __stringify(IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n),
+ __stringify(IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n),
+ __stringify(IPA_DST_RSRC_GRP_01_RSRC_TYPE_n),
+ __stringify(IPA_DST_RSRC_GRP_23_RSRC_TYPE_n),
+ __stringify(IPA_DST_RSRC_GRP_45_RSRC_TYPE_n),
+ __stringify(IPA_DST_RSRC_GRP_67_RSRC_TYPE_n),
+ __stringify(IPA_RX_HPS_CLIENTS_MIN_DEPTH_0),
+ __stringify(IPA_RX_HPS_CLIENTS_MIN_DEPTH_1),
+ __stringify(IPA_RX_HPS_CLIENTS_MAX_DEPTH_0),
+ __stringify(IPA_RX_HPS_CLIENTS_MAX_DEPTH_1),
+ __stringify(IPA_QSB_MAX_WRITES),
+ __stringify(IPA_QSB_MAX_READS),
+ __stringify(IPA_TX_CFG),
+};
+
+static void ipareg_construct_dummy(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ IPAHAL_ERR("No construct function for %s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+}
+
+static void ipareg_parse_dummy(enum ipahal_reg_name reg,
+ void *fields, u32 val)
+{
+ IPAHAL_ERR("No parse function for %s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+}
+
+static void ipareg_construct_rx_hps_clients_depth1(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_rx_hps_clients *clients =
+ (struct ipahal_reg_rx_hps_clients *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(0));
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(1));
+}
+
+static void ipareg_construct_rx_hps_clients_depth0(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_rx_hps_clients *clients =
+ (struct ipahal_reg_rx_hps_clients *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(0));
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(1));
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[2],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(2),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(2));
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[3],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(3),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(3));
+}
+
+static void ipareg_construct_rx_hps_clients_depth0_v3_5(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_rx_hps_clients *clients =
+ (struct ipahal_reg_rx_hps_clients *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(0));
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(1));
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[2],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(2),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(2));
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[3],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(3),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(3));
+}
+
+static void ipareg_construct_rsrg_grp_xy(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_rsrc_grp_cfg *grp =
+ (struct ipahal_reg_rsrc_grp_cfg *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, grp->x_min,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK);
+ IPA_SETFIELD_IN_REG(*val, grp->x_max,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK);
+ IPA_SETFIELD_IN_REG(*val, grp->y_min,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK);
+ IPA_SETFIELD_IN_REG(*val, grp->y_max,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK);
+}
+
+static void ipareg_construct_rsrg_grp_xy_v3_5(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_rsrc_grp_cfg *grp =
+ (struct ipahal_reg_rsrc_grp_cfg *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, grp->x_min,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5);
+ IPA_SETFIELD_IN_REG(*val, grp->x_max,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT_V3_5,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK_V3_5);
+
+ /* DST_23 register has only X fields at ipa V3_5 */
+ if (reg == IPA_DST_RSRC_GRP_23_RSRC_TYPE_n)
+ return;
+
+ IPA_SETFIELD_IN_REG(*val, grp->y_min,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT_V3_5,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK_V3_5);
+ IPA_SETFIELD_IN_REG(*val, grp->y_max,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT_V3_5,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK_V3_5);
+}
+
+static void ipareg_construct_hash_cfg_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_fltrt_hash_tuple *tuple =
+ (struct ipahal_reg_fltrt_hash_tuple *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, tuple->flt.src_id,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->flt.src_ip_addr,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->flt.dst_ip_addr,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->flt.src_port,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->flt.dst_port,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->flt.protocol,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->flt.meta_data,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->undefined1,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->rt.src_id,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->rt.src_ip_addr,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->rt.dst_ip_addr,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->rt.src_port,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->rt.dst_port,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->rt.protocol,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->rt.meta_data,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->undefined2,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK);
+}
+
+static void ipareg_parse_hash_cfg_n(
+ enum ipahal_reg_name reg, void *fields, u32 val)
+{
+ struct ipahal_reg_fltrt_hash_tuple *tuple =
+ (struct ipahal_reg_fltrt_hash_tuple *)fields;
+
+ tuple->flt.src_id =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK);
+ tuple->flt.src_ip_addr =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK);
+ tuple->flt.dst_ip_addr =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK);
+ tuple->flt.src_port =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK);
+ tuple->flt.dst_port =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK);
+ tuple->flt.protocol =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK);
+ tuple->flt.meta_data =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK);
+ tuple->undefined1 =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK);
+ tuple->rt.src_id =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK);
+ tuple->rt.src_ip_addr =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK);
+ tuple->rt.dst_ip_addr =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK);
+ tuple->rt.src_port =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK);
+ tuple->rt.dst_port =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK);
+ tuple->rt.protocol =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK);
+ tuple->rt.meta_data =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK);
+ tuple->undefined2 =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK);
+}
+
+static void ipareg_construct_endp_status_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_ep_cfg_status *ep_status =
+ (struct ipahal_reg_ep_cfg_status *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_status->status_en,
+ IPA_ENDP_STATUS_n_STATUS_EN_SHFT,
+ IPA_ENDP_STATUS_n_STATUS_EN_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_status->status_ep,
+ IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT,
+ IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_status->status_location,
+ IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT,
+ IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK);
+}
+
+static void ipareg_construct_qcncm(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_qcncm *qcncm =
+ (struct ipahal_reg_qcncm *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, qcncm->mode_en ? 1 : 0,
+ IPA_QCNCM_MODE_EN_SHFT,
+ IPA_QCNCM_MODE_EN_BMSK);
+ IPA_SETFIELD_IN_REG(*val, qcncm->mode_val,
+ IPA_QCNCM_MODE_VAL_SHFT,
+ IPA_QCNCM_MODE_VAL_BMSK);
+ IPA_SETFIELD_IN_REG(*val, qcncm->undefined,
+ 0, IPA_QCNCM_MODE_VAL_BMSK);
+}
+
+static void ipareg_parse_qcncm(
+ enum ipahal_reg_name reg, void *fields, u32 val)
+{
+ struct ipahal_reg_qcncm *qcncm =
+ (struct ipahal_reg_qcncm *)fields;
+
+ memset(qcncm, 0, sizeof(struct ipahal_reg_qcncm));
+ qcncm->mode_en = IPA_GETFIELD_FROM_REG(val,
+ IPA_QCNCM_MODE_EN_SHFT,
+ IPA_QCNCM_MODE_EN_BMSK);
+ qcncm->mode_val = IPA_GETFIELD_FROM_REG(val,
+ IPA_QCNCM_MODE_VAL_SHFT,
+ IPA_QCNCM_MODE_VAL_BMSK);
+ qcncm->undefined = IPA_GETFIELD_FROM_REG(val,
+ 0, IPA_QCNCM_UNDEFINED1_BMSK);
+ qcncm->undefined |= IPA_GETFIELD_FROM_REG(val,
+ 0, IPA_QCNCM_MODE_UNDEFINED2_BMSK);
+}
+
+static void ipareg_construct_single_ndp_mode(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_single_ndp_mode *mode =
+ (struct ipahal_reg_single_ndp_mode *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, mode->single_ndp_en ? 1 : 0,
+ IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT,
+ IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, mode->undefined,
+ IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT,
+ IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK);
+}
+
+static void ipareg_parse_single_ndp_mode(
+ enum ipahal_reg_name reg, void *fields, u32 val)
+{
+ struct ipahal_reg_single_ndp_mode *mode =
+ (struct ipahal_reg_single_ndp_mode *)fields;
+
+ memset(mode, 0, sizeof(struct ipahal_reg_single_ndp_mode));
+ mode->single_ndp_en = IPA_GETFIELD_FROM_REG(val,
+ IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT,
+ IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK);
+ mode->undefined = IPA_GETFIELD_FROM_REG(val,
+ IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT,
+ IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK);
+}
+
+static void ipareg_construct_debug_cnt_ctrl_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_debug_cnt_ctrl *dbg_cnt_ctrl =
+ (struct ipahal_reg_debug_cnt_ctrl *)fields;
+ u8 type;
+
+ IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->en ? 1 : 0,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_BMSK);
+
+ switch (dbg_cnt_ctrl->type) {
+ case DBG_CNT_TYPE_IPV4_FLTR:
+ type = 0x0;
+ if (!dbg_cnt_ctrl->rule_idx_pipe_rule) {
+ IPAHAL_ERR("No FLT global rules\n");
+ WARN_ON(1);
+ }
+ break;
+ case DBG_CNT_TYPE_IPV4_ROUT:
+ type = 0x1;
+ break;
+ case DBG_CNT_TYPE_GENERAL:
+ type = 0x2;
+ break;
+ case DBG_CNT_TYPE_IPV6_FLTR:
+ type = 0x4;
+ if (!dbg_cnt_ctrl->rule_idx_pipe_rule) {
+ IPAHAL_ERR("No FLT global rules\n");
+ WARN_ON(1);
+ }
+ break;
+ case DBG_CNT_TYPE_IPV6_ROUT:
+ type = 0x5;
+ break;
+ default:
+ IPAHAL_ERR("Invalid dbg_cnt_ctrl type (%d) for %s\n",
+ dbg_cnt_ctrl->type, ipahal_reg_name_str(reg));
+ WARN_ON(1);
+ return;
+
+ };
+
+ IPA_SETFIELD_IN_REG(*val, type,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->product ? 1 : 0,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->src_pipe,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_BMSK);
+
+ if (ipahal_ctx->hw_type <= IPA_HW_v3_1) {
+ IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK);
+ IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx_pipe_rule,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_BMSK
+ );
+ } else {
+ IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK_V3_5);
+ }
+}
+
+static void ipareg_parse_shared_mem_size(
+ enum ipahal_reg_name reg, void *fields, u32 val)
+{
+ struct ipahal_reg_shared_mem_size *smem_sz =
+ (struct ipahal_reg_shared_mem_size *)fields;
+
+ memset(smem_sz, 0, sizeof(struct ipahal_reg_shared_mem_size));
+ smem_sz->shared_mem_sz = IPA_GETFIELD_FROM_REG(val,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK);
+
+ smem_sz->shared_mem_baddr = IPA_GETFIELD_FROM_REG(val,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK);
+}
+
+static void ipareg_construct_endp_init_rsrc_grp_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_endp_init_rsrc_grp *rsrc_grp =
+ (struct ipahal_reg_endp_init_rsrc_grp *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, rsrc_grp->rsrc_grp,
+ IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT,
+ IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK);
+}
+
+static void ipareg_construct_endp_init_rsrc_grp_n_v3_5(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_endp_init_rsrc_grp *rsrc_grp =
+ (struct ipahal_reg_endp_init_rsrc_grp *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, rsrc_grp->rsrc_grp,
+ IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v3_5,
+ IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v3_5);
+}
+
+static void ipareg_construct_endp_init_hdr_metadata_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_metadata *metadata =
+ (struct ipa_ep_cfg_metadata *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, metadata->qmap_id,
+ IPA_ENDP_INIT_HDR_METADATA_n_METADATA_SHFT,
+ IPA_ENDP_INIT_HDR_METADATA_n_METADATA_BMSK);
+}
+
+static void ipareg_construct_endp_init_hdr_metadata_mask_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_metadata_mask *metadata_mask =
+ (struct ipa_ep_cfg_metadata_mask *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, metadata_mask->metadata_mask,
+ IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT,
+ IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK);
+}
+
+static void ipareg_construct_endp_init_cfg_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_cfg *cfg =
+ (struct ipa_ep_cfg_cfg *)fields;
+ u32 cs_offload_en;
+
+ switch (cfg->cs_offload_en) {
+ case IPA_DISABLE_CS_OFFLOAD:
+ cs_offload_en = 0;
+ break;
+ case IPA_ENABLE_CS_OFFLOAD_UL:
+ cs_offload_en = 1;
+ break;
+ case IPA_ENABLE_CS_OFFLOAD_DL:
+ cs_offload_en = 2;
+ break;
+ default:
+ IPAHAL_ERR("Invalid cs_offload_en value for %s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+ return;
+ }
+
+ IPA_SETFIELD_IN_REG(*val, cfg->frag_offload_en ? 1 : 0,
+ IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT,
+ IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK);
+ IPA_SETFIELD_IN_REG(*val, cs_offload_en,
+ IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT,
+ IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK);
+ IPA_SETFIELD_IN_REG(*val, cfg->cs_metadata_hdr_offset,
+ IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT,
+ IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK);
+ IPA_SETFIELD_IN_REG(*val, cfg->gen_qmb_master_sel,
+ IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_SHFT,
+ IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_BMSK);
+
+}
+
+static void ipareg_construct_endp_init_deaggr_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_deaggr *ep_deaggr =
+ (struct ipa_ep_cfg_deaggr *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_deaggr->deaggr_hdr_len,
+ IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT,
+ IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_deaggr->packet_offset_valid,
+ IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT,
+ IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_deaggr->packet_offset_location,
+ IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT,
+ IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_deaggr->max_packet_len,
+ IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT,
+ IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK);
+}
+
+static void ipareg_construct_endp_init_hol_block_en_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_holb *ep_holb =
+ (struct ipa_ep_cfg_holb *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_holb->en,
+ IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT,
+ IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK);
+}
+
+static void ipareg_construct_endp_init_hol_block_timer_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_holb *ep_holb =
+ (struct ipa_ep_cfg_holb *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_holb->tmr_val,
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_SHFT,
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_BMSK);
+}
+
+static void ipareg_construct_endp_init_ctrl_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_ctrl *ep_ctrl =
+ (struct ipa_ep_cfg_ctrl *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_suspend,
+ IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT,
+ IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_delay,
+ IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT,
+ IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK);
+}
+
+static void ipareg_construct_endp_init_nat_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_nat *ep_nat =
+ (struct ipa_ep_cfg_nat *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_nat->nat_en,
+ IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT,
+ IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK);
+}
+
+static void ipareg_construct_endp_init_mode_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipahal_reg_endp_init_mode *init_mode =
+ (struct ipahal_reg_endp_init_mode *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, init_mode->ep_mode.mode,
+ IPA_ENDP_INIT_MODE_n_MODE_SHFT,
+ IPA_ENDP_INIT_MODE_n_MODE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, init_mode->dst_pipe_number,
+ IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT,
+ IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK);
+}
+
+static void ipareg_construct_endp_init_route_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipahal_reg_endp_init_route *ep_init_rt =
+ (struct ipahal_reg_endp_init_route *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_init_rt->route_table_index,
+ IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT,
+ IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK);
+
+}
+
+static void ipareg_parse_endp_init_aggr_n(enum ipahal_reg_name reg,
+ void *fields, u32 val)
+{
+ struct ipa_ep_cfg_aggr *ep_aggr =
+ (struct ipa_ep_cfg_aggr *)fields;
+
+ memset(ep_aggr, 0, sizeof(struct ipa_ep_cfg_aggr));
+
+ ep_aggr->aggr_en =
+ (((val & IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK) >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT)
+ == IPA_ENABLE_AGGR);
+ ep_aggr->aggr =
+ ((val & IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK) >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT);
+ ep_aggr->aggr_byte_limit =
+ ((val & IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK) >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT);
+ ep_aggr->aggr_time_limit =
+ ((val & IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK) >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT);
+ ep_aggr->aggr_pkt_limit =
+ ((val & IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK) >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT);
+ ep_aggr->aggr_sw_eof_active =
+ ((val & IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK) >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT);
+ ep_aggr->aggr_hard_byte_limit_en =
+ ((val & IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK)
+ >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT);
+}
+
+static void ipareg_construct_endp_init_aggr_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_aggr *ep_aggr =
+ (struct ipa_ep_cfg_aggr *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_en,
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_byte_limit,
+ IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_time_limit,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_pkt_limit,
+ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_sw_eof_active,
+ IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK);
+
+ /* At IPAv3 hard_byte_limit is not supported */
+ ep_aggr->aggr_hard_byte_limit_en = 0;
+ IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_hard_byte_limit_en,
+ IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK);
+}
+
+static void ipareg_construct_endp_init_hdr_ext_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_hdr_ext *ep_hdr_ext;
+ u8 hdr_endianness;
+
+ ep_hdr_ext = (struct ipa_ep_cfg_hdr_ext *)fields;
+ hdr_endianness = ep_hdr_ext->hdr_little_endian ? 0 : 1;
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_pad_to_alignment,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v3_0);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad_offset,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_payload_len_inc_padding,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad_valid,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, hdr_endianness,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK);
+}
+
+static void ipareg_construct_endp_init_hdr_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_hdr *ep_hdr;
+
+ ep_hdr = (struct ipa_ep_cfg_hdr *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_metadata_reg_valid,
+ IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_SHFT_v2,
+ IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_BMSK_v2);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_remove_additional,
+ IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT_v2,
+ IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK_v2);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_a5_mux,
+ IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size_valid,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_additional_const_len,
+ IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata_valid,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_len,
+ IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK);
+}
+
+static void ipareg_construct_route(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipahal_reg_route *route;
+
+ route = (struct ipahal_reg_route *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, route->route_dis,
+ IPA_ROUTE_ROUTE_DIS_SHFT,
+ IPA_ROUTE_ROUTE_DIS_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, route->route_def_pipe,
+ IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
+ IPA_ROUTE_ROUTE_DEF_PIPE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, route->route_def_hdr_table,
+ IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
+ IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, route->route_def_hdr_ofst,
+ IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
+ IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, route->route_frag_def_pipe,
+ IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT,
+ IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, route->route_def_retain_hdr,
+ IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT,
+ IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK);
+}
+
+static void ipareg_construct_qsb_max_writes(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ int *qsb_max_writes = (int *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, qsb_max_writes[0],
+ IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_SHFT,
+ IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_BMSK);
+ IPA_SETFIELD_IN_REG(*val, qsb_max_writes[1],
+ IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_SHFT,
+ IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_BMSK);
+}
+
+static void ipareg_construct_qsb_max_reads(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ int *qsb_max_reads = (int *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, qsb_max_reads[0],
+ IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT,
+ IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK);
+ IPA_SETFIELD_IN_REG(*val, qsb_max_reads[1],
+ IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT,
+ IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK);
+}
+
+static void ipareg_construct_tx_cfg(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipahal_reg_tx_cfg *tx_cfg;
+
+ tx_cfg = (struct ipahal_reg_tx_cfg *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, tx_cfg->tx0_prefetch_disable,
+ IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5,
+ IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5);
+
+ IPA_SETFIELD_IN_REG(*val, tx_cfg->tx1_prefetch_disable,
+ IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5,
+ IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5);
+
+ IPA_SETFIELD_IN_REG(*val, tx_cfg->prefetch_almost_empty_size,
+ IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5,
+ IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5);
+}
+
+/*
+ * struct ipahal_reg_obj - Register H/W information for specific IPA version
+ * @construct - CB to construct register value from abstracted structure
+ * @parse - CB to parse register value to abstracted structure
+ * @offset - register offset relative to base address
+ * @n_ofst - N parameterized register sub-offset
+ */
+struct ipahal_reg_obj {
+ void (*construct)(enum ipahal_reg_name reg, const void *fields,
+ u32 *val);
+ void (*parse)(enum ipahal_reg_name reg, void *fields,
+ u32 val);
+ u32 offset;
+ u32 n_ofst;
+};
+
+/*
+ * This table contains the info regarding each register for IPAv3 and later.
+ * Information like: offset and construct/parse functions.
+ * All the information on the register on IPAv3 are statically defined below.
+ * If information is missing regarding some register on some IPA version,
+ * the init function will fill it with the information from the previous
+ * IPA version.
+ * Information is considered missing if all of the fields are 0.
+ * If offset is -1, this means that the register is removed on the
+ * specific version.
+ */
+static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
+ /* IPAv3 */
+ [IPA_HW_v3_0][IPA_ROUTE] = {
+ ipareg_construct_route, ipareg_parse_dummy,
+ 0x00000048, 0},
+ [IPA_HW_v3_0][IPA_IRQ_STTS_EE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00003008, 0x1000},
+ [IPA_HW_v3_0][IPA_IRQ_EN_EE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x0000300c, 0x1000},
+ [IPA_HW_v3_0][IPA_IRQ_CLR_EE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00003010, 0x1000},
+ [IPA_HW_v3_0][IPA_IRQ_SUSPEND_INFO_EE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00003098, 0x1000},
+ [IPA_HW_v3_0][IPA_BCR] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x000001D0, 0},
+ [IPA_HW_v3_0][IPA_ENABLED_PIPES] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000038, 0},
+ [IPA_HW_v3_0][IPA_COMP_SW_RESET] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000040, 0},
+ [IPA_HW_v3_0][IPA_VERSION] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000034, 0},
+ [IPA_HW_v3_0][IPA_TAG_TIMER] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000060, 0 },
+ [IPA_HW_v3_0][IPA_COMP_HW_VERSION] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000030, 0},
+ [IPA_HW_v3_0][IPA_SPARE_REG_1] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00005090, 0},
+ [IPA_HW_v3_0][IPA_SPARE_REG_2] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00005094, 0},
+ [IPA_HW_v3_0][IPA_COMP_CFG] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x0000003C, 0},
+ [IPA_HW_v3_0][IPA_STATE_AGGR_ACTIVE] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x0000010C, 0},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_HDR_n] = {
+ ipareg_construct_endp_init_hdr_n, ipareg_parse_dummy,
+ 0x00000810, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_HDR_EXT_n] = {
+ ipareg_construct_endp_init_hdr_ext_n, ipareg_parse_dummy,
+ 0x00000814, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_AGGR_n] = {
+ ipareg_construct_endp_init_aggr_n,
+ ipareg_parse_endp_init_aggr_n,
+ 0x00000824, 0x70},
+ [IPA_HW_v3_0][IPA_AGGR_FORCE_CLOSE] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x000001EC, 0},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_ROUTE_n] = {
+ ipareg_construct_endp_init_route_n, ipareg_parse_dummy,
+ 0x00000828, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_MODE_n] = {
+ ipareg_construct_endp_init_mode_n, ipareg_parse_dummy,
+ 0x00000820, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_NAT_n] = {
+ ipareg_construct_endp_init_nat_n, ipareg_parse_dummy,
+ 0x0000080C, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_CTRL_n] = {
+ ipareg_construct_endp_init_ctrl_n, ipareg_parse_dummy,
+ 0x00000800, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_HOL_BLOCK_EN_n] = {
+ ipareg_construct_endp_init_hol_block_en_n,
+ ipareg_parse_dummy,
+ 0x0000082c, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_HOL_BLOCK_TIMER_n] = {
+ ipareg_construct_endp_init_hol_block_timer_n,
+ ipareg_parse_dummy,
+ 0x00000830, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_DEAGGR_n] = {
+ ipareg_construct_endp_init_deaggr_n,
+ ipareg_parse_dummy,
+ 0x00000834, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_SEQ_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x0000083C, 0x70},
+ [IPA_HW_v3_0][IPA_DEBUG_CNT_REG_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000600, 0x4},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_CFG_n] = {
+ ipareg_construct_endp_init_cfg_n, ipareg_parse_dummy,
+ 0x00000808, 0x70},
+ [IPA_HW_v3_0][IPA_IRQ_EE_UC_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x0000301c, 0x1000},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_HDR_METADATA_MASK_n] = {
+ ipareg_construct_endp_init_hdr_metadata_mask_n,
+ ipareg_parse_dummy,
+ 0x00000818, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_HDR_METADATA_n] = {
+ ipareg_construct_endp_init_hdr_metadata_n,
+ ipareg_parse_dummy,
+ 0x0000081c, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_RSRC_GRP_n] = {
+ ipareg_construct_endp_init_rsrc_grp_n,
+ ipareg_parse_dummy,
+ 0x00000838, 0x70},
+ [IPA_HW_v3_0][IPA_SHARED_MEM_SIZE] = {
+ ipareg_construct_dummy, ipareg_parse_shared_mem_size,
+ 0x00000054, 0},
+ [IPA_HW_v3_0][IPA_SRAM_DIRECT_ACCESS_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00007000, 0x4},
+ [IPA_HW_v3_0][IPA_DEBUG_CNT_CTRL_n] = {
+ ipareg_construct_debug_cnt_ctrl_n, ipareg_parse_dummy,
+ 0x00000640, 0x4},
+ [IPA_HW_v3_0][IPA_UC_MAILBOX_m_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00032000, 0x4},
+ [IPA_HW_v3_0][IPA_FILT_ROUT_HASH_FLUSH] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000090, 0},
+ [IPA_HW_v3_0][IPA_SINGLE_NDP_MODE] = {
+ ipareg_construct_single_ndp_mode, ipareg_parse_single_ndp_mode,
+ 0x00000068, 0},
+ [IPA_HW_v3_0][IPA_QCNCM] = {
+ ipareg_construct_qcncm, ipareg_parse_qcncm,
+ 0x00000064, 0},
+ [IPA_HW_v3_0][IPA_SYS_PKT_PROC_CNTXT_BASE] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x000001e0, 0},
+ [IPA_HW_v3_0][IPA_LOCAL_PKT_PROC_CNTXT_BASE] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x000001e8, 0},
+ [IPA_HW_v3_0][IPA_ENDP_STATUS_n] = {
+ ipareg_construct_endp_status_n, ipareg_parse_dummy,
+ 0x00000840, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_FILTER_ROUTER_HSH_CFG_n] = {
+ ipareg_construct_hash_cfg_n, ipareg_parse_hash_cfg_n,
+ 0x0000085C, 0x70},
+ [IPA_HW_v3_0][IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x00000400, 0x20},
+ [IPA_HW_v3_0][IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x00000404, 0x20},
+ [IPA_HW_v3_0][IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x00000408, 0x20},
+ [IPA_HW_v3_0][IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x0000040C, 0x20},
+ [IPA_HW_v3_0][IPA_DST_RSRC_GRP_01_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x00000500, 0x20},
+ [IPA_HW_v3_0][IPA_DST_RSRC_GRP_23_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x00000504, 0x20},
+ [IPA_HW_v3_0][IPA_DST_RSRC_GRP_45_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x00000508, 0x20},
+ [IPA_HW_v3_0][IPA_DST_RSRC_GRP_67_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x0000050c, 0x20},
+ [IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MIN_DEPTH_0] = {
+ ipareg_construct_rx_hps_clients_depth0, ipareg_parse_dummy,
+ 0x000023C4, 0},
+ [IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MIN_DEPTH_1] = {
+ ipareg_construct_rx_hps_clients_depth1, ipareg_parse_dummy,
+ 0x000023C8, 0},
+ [IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MAX_DEPTH_0] = {
+ ipareg_construct_rx_hps_clients_depth0, ipareg_parse_dummy,
+ 0x000023CC, 0},
+ [IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MAX_DEPTH_1] = {
+ ipareg_construct_rx_hps_clients_depth1, ipareg_parse_dummy,
+ 0x000023D0, 0},
+ [IPA_HW_v3_0][IPA_QSB_MAX_WRITES] = {
+ ipareg_construct_qsb_max_writes, ipareg_parse_dummy,
+ 0x00000074, 0},
+ [IPA_HW_v3_0][IPA_QSB_MAX_READS] = {
+ ipareg_construct_qsb_max_reads, ipareg_parse_dummy,
+ 0x00000078, 0},
+
+
+ /* IPAv3.1 */
+ [IPA_HW_v3_1][IPA_IRQ_SUSPEND_INFO_EE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00003030, 0x1000},
+ [IPA_HW_v3_1][IPA_SUSPEND_IRQ_EN_EE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00003034, 0x1000},
+ [IPA_HW_v3_1][IPA_SUSPEND_IRQ_CLR_EE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00003038, 0x1000},
+
+
+ /* IPAv3.5 */
+ [IPA_HW_v3_5][IPA_TX_CFG] = {
+ ipareg_construct_tx_cfg, ipareg_parse_dummy,
+ 0x000001FC, 0},
+ [IPA_HW_v3_5][IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+ 0x00000400, 0x20},
+ [IPA_HW_v3_5][IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+ 0x00000404, 0x20},
+ [IPA_HW_v3_5][IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ -1, 0},
+ [IPA_HW_v3_5][IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ -1, 0},
+ [IPA_HW_v3_5][IPA_DST_RSRC_GRP_01_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+ 0x00000500, 0x20},
+ [IPA_HW_v3_5][IPA_DST_RSRC_GRP_23_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+ 0x00000504, 0x20},
+ [IPA_HW_v3_5][IPA_DST_RSRC_GRP_45_RSRC_TYPE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ -1, 0},
+ [IPA_HW_v3_5][IPA_DST_RSRC_GRP_67_RSRC_TYPE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ -1, 0},
+ [IPA_HW_v3_5][IPA_ENDP_INIT_RSRC_GRP_n] = {
+ ipareg_construct_endp_init_rsrc_grp_n_v3_5,
+ ipareg_parse_dummy,
+ 0x00000838, 0x70},
+ [IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MIN_DEPTH_0] = {
+ ipareg_construct_rx_hps_clients_depth0_v3_5,
+ ipareg_parse_dummy,
+ 0x000023C4, 0},
+ [IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MIN_DEPTH_1] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ -1, 0},
+ [IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MAX_DEPTH_0] = {
+ ipareg_construct_rx_hps_clients_depth0_v3_5,
+ ipareg_parse_dummy,
+ 0x000023CC, 0},
+ [IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MAX_DEPTH_1] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ -1, 0},
+ [IPA_HW_v3_5][IPA_SPARE_REG_1] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00002780, 0},
+ [IPA_HW_v3_5][IPA_SPARE_REG_2] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00002784, 0},
+};
+
+/*
+ * ipahal_reg_init() - Build the registers information table
+ * See ipahal_reg_objs[][] comments
+ *
+ * Note: As global variables are initialized with zero, any un-overridden
+ * register entry will be zero. By this we recognize them.
+ */
+int ipahal_reg_init(enum ipa_hw_type ipa_hw_type)
+{
+ int i;
+ int j;
+ struct ipahal_reg_obj zero_obj;
+
+ IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+ if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
+ IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+ return -EINVAL;
+ }
+
+ memset(&zero_obj, 0, sizeof(zero_obj));
+ for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+ for (j = 0; j < IPA_REG_MAX ; j++) {
+ if (!memcmp(&ipahal_reg_objs[i+1][j], &zero_obj,
+ sizeof(struct ipahal_reg_obj))) {
+ memcpy(&ipahal_reg_objs[i+1][j],
+ &ipahal_reg_objs[i][j],
+ sizeof(struct ipahal_reg_obj));
+ } else {
+ /*
+ * explicitly overridden register.
+ * Check validity
+ */
+ if (!ipahal_reg_objs[i+1][j].offset) {
+ IPAHAL_ERR(
+ "reg=%s with zero offset ipa_ver=%d\n",
+ ipahal_reg_name_str(j), i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_reg_objs[i+1][j].construct) {
+ IPAHAL_ERR(
+ "reg=%s with NULL construct func ipa_ver=%d\n",
+ ipahal_reg_name_str(j), i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_reg_objs[i+1][j].parse) {
+ IPAHAL_ERR(
+ "reg=%s with NULL parse func ipa_ver=%d\n",
+ ipahal_reg_name_str(j), i+1);
+ WARN_ON(1);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * ipahal_reg_name_str() - returns string that represent the register
+ * @reg_name: [in] register name
+ */
+const char *ipahal_reg_name_str(enum ipahal_reg_name reg_name)
+{
+ if (reg_name < 0 || reg_name >= IPA_REG_MAX) {
+ IPAHAL_ERR("requested name of invalid reg=%d\n", reg_name);
+ return "Invalid Register";
+ }
+
+ return ipareg_name_to_str[reg_name];
+}
+
+/*
+ * ipahal_read_reg_n() - Get n parameterized reg value
+ */
+u32 ipahal_read_reg_n(enum ipahal_reg_name reg, u32 n)
+{
+ u32 offset;
+
+ if (reg >= IPA_REG_MAX) {
+ IPAHAL_ERR("Invalid register reg=%u\n", reg);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ IPAHAL_DBG_LOW("read from %s n=%u\n",
+ ipahal_reg_name_str(reg), n);
+
+ offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+ if (offset == -1) {
+ IPAHAL_ERR("Read access to obsolete reg=%s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+ return -EFAULT;
+ }
+ offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+ return ioread32(ipahal_ctx->base + offset);
+}
+
+/*
+ * ipahal_write_reg_mn() - Write to m/n parameterized reg a raw value
+ */
+void ipahal_write_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n, u32 val)
+{
+ u32 offset;
+
+ if (reg >= IPA_REG_MAX) {
+ IPAHAL_ERR("Invalid register reg=%u\n", reg);
+ return;
+ }
+
+ IPAHAL_DBG_LOW("write to %s m=%u n=%u val=%u\n",
+ ipahal_reg_name_str(reg), m, n, val);
+ offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+ if (offset == -1) {
+ IPAHAL_ERR("Write access to obsolete reg=%s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+ return;
+ }
+ /*
+ * Currently there is one register with m and n parameters
+ * IPA_UC_MAILBOX_m_n. The m value of it is 0x80.
+ * If more such registers will be added in the future,
+ * we can move the m parameter to the table above.
+ */
+ offset += 0x80 * m;
+ offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+ iowrite32(val, ipahal_ctx->base + offset);
+}
+
+/*
+ * ipahal_read_reg_n_fields() - Get the parsed value of n parameterized reg
+ */
+u32 ipahal_read_reg_n_fields(enum ipahal_reg_name reg, u32 n, void *fields)
+{
+ u32 val = 0;
+ u32 offset;
+
+ if (!fields) {
+ IPAHAL_ERR("Input error fields=%p\n", fields);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ if (reg >= IPA_REG_MAX) {
+ IPAHAL_ERR("Invalid register reg=%u\n", reg);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ IPAHAL_DBG_LOW("read from %s n=%u and parse it\n",
+ ipahal_reg_name_str(reg), n);
+ offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+ if (offset == -1) {
+ IPAHAL_ERR("Read access to obsolete reg=%s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+ return -EFAULT;
+ }
+ offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+ val = ioread32(ipahal_ctx->base + offset);
+ ipahal_reg_objs[ipahal_ctx->hw_type][reg].parse(reg, fields, val);
+
+ return val;
+}
+
+/*
+ * ipahal_write_reg_n_fields() - Write to n parameterized reg a prased value
+ */
+void ipahal_write_reg_n_fields(enum ipahal_reg_name reg, u32 n,
+ const void *fields)
+{
+ u32 val = 0;
+ u32 offset;
+
+ if (!fields) {
+ IPAHAL_ERR("Input error fields=%p\n", fields);
+ return;
+ }
+
+ if (reg >= IPA_REG_MAX) {
+ IPAHAL_ERR("Invalid register reg=%u\n", reg);
+ return;
+ }
+
+ IPAHAL_DBG_LOW("write to %s n=%u after constructing it\n",
+ ipahal_reg_name_str(reg), n);
+ offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+ if (offset == -1) {
+ IPAHAL_ERR("Write access to obsolete reg=%s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+ return;
+ }
+ offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+ ipahal_reg_objs[ipahal_ctx->hw_type][reg].construct(reg, fields, &val);
+
+ iowrite32(val, ipahal_ctx->base + offset);
+}
+
+/*
+ * Get the offset of a m/n parameterized register
+ */
+u32 ipahal_get_reg_mn_ofst(enum ipahal_reg_name reg, u32 m, u32 n)
+{
+ u32 offset;
+
+ if (reg >= IPA_REG_MAX) {
+ IPAHAL_ERR("Invalid register reg=%u\n", reg);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ IPAHAL_DBG_LOW("get offset of %s m=%u n=%u\n",
+ ipahal_reg_name_str(reg), m, n);
+ offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+ if (offset == -1) {
+ IPAHAL_ERR("Access to obsolete reg=%s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+ return -EFAULT;
+ }
+ /*
+ * Currently there is one register with m and n parameters
+ * IPA_UC_MAILBOX_m_n. The m value of it is 0x80.
+ * If more such registers will be added in the future,
+ * we can move the m parameter to the table above.
+ */
+ offset += 0x80 * m;
+ offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+
+ return offset;
+}
+
+u32 ipahal_get_reg_base(void)
+{
+ return 0x00040000;
+}
+
+
+/*
+ * Specific functions
+ * These functions supply specific register values for specific operations
+ * that cannot be reached by generic functions.
+ * E.g. To disable aggregation, need to write to specific bits of the AGGR
+ * register. The other bits should be untouched. This oeprate is very specific
+ * and cannot be generically defined. For such operations we define these
+ * specific functions.
+ */
+
+void ipahal_get_disable_aggr_valmask(struct ipahal_reg_valmask *valmask)
+{
+ if (!valmask) {
+ IPAHAL_ERR("Input error\n");
+ return;
+ }
+
+ valmask->val = (1 & IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK) <<
+ IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
+ valmask->mask = IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK <<
+ IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
+
+ valmask->val |= ((0 & IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK) <<
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT);
+ valmask->mask |= ((IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK <<
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT));
+}
+
+u32 ipahal_aggr_get_max_byte_limit(void)
+{
+ return
+ IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT;
+}
+
+u32 ipahal_aggr_get_max_pkt_limit(void)
+{
+ return
+ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT;
+}
+
+void ipahal_get_aggr_force_close_valmask(int ep_idx,
+ struct ipahal_reg_valmask *valmask)
+{
+ u32 shft;
+ u32 bmsk;
+
+ if (!valmask) {
+ IPAHAL_ERR("Input error\n");
+ return;
+ }
+
+ if (ipahal_ctx->hw_type <= IPA_HW_v3_1) {
+ shft = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT;
+ bmsk = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK;
+ } else {
+ shft =
+ IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V3_5;
+ bmsk =
+ IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5;
+ }
+
+ IPA_SETFIELD_IN_REG(valmask->val, 1 << ep_idx, shft, bmsk);
+ valmask->mask = bmsk << shft;
+}
+
+void ipahal_get_fltrt_hash_flush_valmask(
+ struct ipahal_reg_fltrt_hash_flush *flush,
+ struct ipahal_reg_valmask *valmask)
+{
+ if (!flush || !valmask) {
+ IPAHAL_ERR("Input error: flush=%p ; valmask=%p\n",
+ flush, valmask);
+ return;
+ }
+
+ memset(valmask, 0, sizeof(struct ipahal_reg_valmask));
+
+ if (flush->v6_rt)
+ valmask->val |=
+ (1<<IPA_FILT_ROUT_HASH_FLUSH_IPv6_ROUT_SHFT);
+ if (flush->v6_flt)
+ valmask->val |=
+ (1<<IPA_FILT_ROUT_HASH_FLUSH_IPv6_FILT_SHFT);
+ if (flush->v4_rt)
+ valmask->val |=
+ (1<<IPA_FILT_ROUT_HASH_FLUSH_IPv4_ROUT_SHFT);
+ if (flush->v4_flt)
+ valmask->val |=
+ (1<<IPA_FILT_ROUT_HASH_FLUSH_IPv4_FILT_SHFT);
+
+ valmask->mask = valmask->val;
+}
+
+void ipahal_get_status_ep_valmask(int pipe_num,
+ struct ipahal_reg_valmask *valmask)
+{
+ if (!valmask) {
+ IPAHAL_ERR("Input error\n");
+ return;
+ }
+
+ valmask->val =
+ (pipe_num & IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK) <<
+ IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
+
+ valmask->mask =
+ IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK <<
+ IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
new file mode 100644
index 0000000..8fb9040
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -0,0 +1,449 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_REG_H_
+#define _IPAHAL_REG_H_
+
+#include <linux/ipa.h>
+
+/*
+ * Registers names
+ *
+ * NOTE:: Any change to this enum, need to change to ipareg_name_to_str
+ * array as well.
+ */
+enum ipahal_reg_name {
+ IPA_ROUTE,
+ IPA_IRQ_STTS_EE_n,
+ IPA_IRQ_EN_EE_n,
+ IPA_IRQ_CLR_EE_n,
+ IPA_IRQ_SUSPEND_INFO_EE_n,
+ IPA_SUSPEND_IRQ_EN_EE_n,
+ IPA_SUSPEND_IRQ_CLR_EE_n,
+ IPA_BCR,
+ IPA_ENABLED_PIPES,
+ IPA_COMP_SW_RESET,
+ IPA_VERSION,
+ IPA_TAG_TIMER,
+ IPA_COMP_HW_VERSION,
+ IPA_SPARE_REG_1,
+ IPA_SPARE_REG_2,
+ IPA_COMP_CFG,
+ IPA_STATE_AGGR_ACTIVE,
+ IPA_ENDP_INIT_HDR_n,
+ IPA_ENDP_INIT_HDR_EXT_n,
+ IPA_ENDP_INIT_AGGR_n,
+ IPA_AGGR_FORCE_CLOSE,
+ IPA_ENDP_INIT_ROUTE_n,
+ IPA_ENDP_INIT_MODE_n,
+ IPA_ENDP_INIT_NAT_n,
+ IPA_ENDP_INIT_CTRL_n,
+ IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
+ IPA_ENDP_INIT_DEAGGR_n,
+ IPA_ENDP_INIT_SEQ_n,
+ IPA_DEBUG_CNT_REG_n,
+ IPA_ENDP_INIT_CFG_n,
+ IPA_IRQ_EE_UC_n,
+ IPA_ENDP_INIT_HDR_METADATA_MASK_n,
+ IPA_ENDP_INIT_HDR_METADATA_n,
+ IPA_ENDP_INIT_RSRC_GRP_n,
+ IPA_SHARED_MEM_SIZE,
+ IPA_SRAM_DIRECT_ACCESS_n,
+ IPA_DEBUG_CNT_CTRL_n,
+ IPA_UC_MAILBOX_m_n,
+ IPA_FILT_ROUT_HASH_FLUSH,
+ IPA_SINGLE_NDP_MODE,
+ IPA_QCNCM,
+ IPA_SYS_PKT_PROC_CNTXT_BASE,
+ IPA_LOCAL_PKT_PROC_CNTXT_BASE,
+ IPA_ENDP_STATUS_n,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+ IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+ IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+ IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n,
+ IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n,
+ IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+ IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+ IPA_DST_RSRC_GRP_45_RSRC_TYPE_n,
+ IPA_DST_RSRC_GRP_67_RSRC_TYPE_n,
+ IPA_RX_HPS_CLIENTS_MIN_DEPTH_0,
+ IPA_RX_HPS_CLIENTS_MIN_DEPTH_1,
+ IPA_RX_HPS_CLIENTS_MAX_DEPTH_0,
+ IPA_RX_HPS_CLIENTS_MAX_DEPTH_1,
+ IPA_QSB_MAX_WRITES,
+ IPA_QSB_MAX_READS,
+ IPA_TX_CFG,
+ IPA_REG_MAX,
+};
+
+/*
+ * struct ipahal_reg_route - IPA route register
+ * @route_dis: route disable
+ * @route_def_pipe: route default pipe
+ * @route_def_hdr_table: route default header table
+ * @route_def_hdr_ofst: route default header offset table
+ * @route_frag_def_pipe: Default pipe to route fragmented exception
+ * packets and frag new rule statues, if source pipe does not have
+ * a notification status pipe defined.
+ * @route_def_retain_hdr: default value of retain header. It is used
+ * when no rule was hit
+ */
+struct ipahal_reg_route {
+ u32 route_dis;
+ u32 route_def_pipe;
+ u32 route_def_hdr_table;
+ u32 route_def_hdr_ofst;
+ u8 route_frag_def_pipe;
+ u32 route_def_retain_hdr;
+};
+
+/*
+ * struct ipahal_reg_endp_init_route - IPA ENDP_INIT_ROUTE_n register
+ * @route_table_index: Default index of routing table (IPA Consumer).
+ */
+struct ipahal_reg_endp_init_route {
+ u32 route_table_index;
+};
+
+/*
+ * struct ipahal_reg_endp_init_rsrc_grp - IPA_ENDP_INIT_RSRC_GRP_n register
+ * @rsrc_grp: Index of group for this ENDP. If this ENDP is a source-ENDP,
+ * index is for source-resource-group. If destination ENPD, index is
+ * for destination-resoruce-group.
+ */
+struct ipahal_reg_endp_init_rsrc_grp {
+ u32 rsrc_grp;
+};
+
+/*
+ * struct ipahal_reg_endp_init_mode - IPA ENDP_INIT_MODE_n register
+ * @dst_pipe_number: This parameter specifies destination output-pipe-packets
+ * will be routed to. Valid for DMA mode only and for Input
+ * Pipes only (IPA Consumer)
+ */
+struct ipahal_reg_endp_init_mode {
+ u32 dst_pipe_number;
+ struct ipa_ep_cfg_mode ep_mode;
+};
+
+/*
+ * struct ipahal_reg_shared_mem_size - IPA SHARED_MEM_SIZE register
+ * @shared_mem_sz: Available size [in 8Bytes] of SW partition within
+ * IPA shared memory.
+ * @shared_mem_baddr: Offset of SW partition within IPA
+ * shared memory[in 8Bytes]. To get absolute address of SW partition,
+ * add this offset to IPA_SRAM_DIRECT_ACCESS_n baddr.
+ */
+struct ipahal_reg_shared_mem_size {
+ u32 shared_mem_sz;
+ u32 shared_mem_baddr;
+};
+
+/*
+ * struct ipahal_reg_ep_cfg_status - status configuration in IPA end-point
+ * @status_en: Determines if end point supports Status Indications. SW should
+ * set this bit in order to enable Statuses. Output Pipe - send
+ * Status indications only if bit is set. Input Pipe - forward Status
+ * indication to STATUS_ENDP only if bit is set. Valid for Input
+ * and Output Pipes (IPA Consumer and Producer)
+ * @status_ep: Statuses generated for this endpoint will be forwarded to the
+ * specified Status End Point. Status endpoint needs to be
+ * configured with STATUS_EN=1 Valid only for Input Pipes (IPA
+ * Consumer)
+ * @status_location: Location of PKT-STATUS on destination pipe.
+ * If set to 0 (default), PKT-STATUS will be appended before the packet
+ * for this endpoint. If set to 1, PKT-STATUS will be appended after the
+ * packet for this endpoint. Valid only for Output Pipes (IPA Producer)
+ */
+struct ipahal_reg_ep_cfg_status {
+ bool status_en;
+ u8 status_ep;
+ bool status_location;
+};
+
+/*
+ * struct ipa_hash_tuple - Hash tuple members for flt and rt
+ * the fields tells if to be masked or not
+ * @src_id: pipe number for flt, table index for rt
+ * @src_ip_addr: IP source address
+ * @dst_ip_addr: IP destination address
+ * @src_port: L4 source port
+ * @dst_port: L4 destination port
+ * @protocol: IP protocol field
+ * @meta_data: packet meta-data
+ *
+ */
+struct ipahal_reg_hash_tuple {
+ /* src_id: pipe in flt, tbl index in rt */
+ bool src_id;
+ bool src_ip_addr;
+ bool dst_ip_addr;
+ bool src_port;
+ bool dst_port;
+ bool protocol;
+ bool meta_data;
+};
+
+/*
+ * struct ipahal_reg_fltrt_hash_tuple - IPA hash tuple register
+ * @flt: Hash tuple info for filtering
+ * @rt: Hash tuple info for routing
+ * @undefinedX: Undefined/Unused bit fields set of the register
+ */
+struct ipahal_reg_fltrt_hash_tuple {
+ struct ipahal_reg_hash_tuple flt;
+ struct ipahal_reg_hash_tuple rt;
+ u32 undefined1;
+ u32 undefined2;
+};
+
+/*
+ * enum ipahal_reg_dbg_cnt_type - Debug Counter Type
+ * DBG_CNT_TYPE_IPV4_FLTR - Count IPv4 filtering rules
+ * DBG_CNT_TYPE_IPV4_ROUT - Count IPv4 routing rules
+ * DBG_CNT_TYPE_GENERAL - General counter
+ * DBG_CNT_TYPE_IPV6_FLTR - Count IPv6 filtering rules
+ * DBG_CNT_TYPE_IPV4_ROUT - Count IPv6 routing rules
+ */
+enum ipahal_reg_dbg_cnt_type {
+ DBG_CNT_TYPE_IPV4_FLTR,
+ DBG_CNT_TYPE_IPV4_ROUT,
+ DBG_CNT_TYPE_GENERAL,
+ DBG_CNT_TYPE_IPV6_FLTR,
+ DBG_CNT_TYPE_IPV6_ROUT,
+};
+
+/*
+ * struct ipahal_reg_debug_cnt_ctrl - IPA_DEBUG_CNT_CTRL_n register
+ * @en - Enable debug counter
+ * @type - Type of debugging couting
+ * @product - False->Count Bytes . True->Count #packets
+ * @src_pipe - Specific Pipe to match. If FF, no need to match
+ * specific pipe
+ * @rule_idx_pipe_rule - Global Rule or Pipe Rule. If pipe, then indicated by
+ * src_pipe. Starting at IPA V3_5,
+ * no support on Global Rule. This field will be ignored.
+ * @rule_idx - Rule index. Irrelevant for type General
+ */
+struct ipahal_reg_debug_cnt_ctrl {
+ bool en;
+ enum ipahal_reg_dbg_cnt_type type;
+ bool product;
+ u8 src_pipe;
+ bool rule_idx_pipe_rule;
+ u16 rule_idx;
+};
+
+/*
+ * struct ipahal_reg_rsrc_grp_cfg - Mix/Max values for two rsrc groups
+ * @x_min - first group min value
+ * @x_max - first group max value
+ * @y_min - second group min value
+ * @y_max - second group max value
+ */
+struct ipahal_reg_rsrc_grp_cfg {
+ u32 x_min;
+ u32 x_max;
+ u32 y_min;
+ u32 y_max;
+};
+
+/*
+ * struct ipahal_reg_rx_hps_clients - Min or Max values for RX HPS clients
+ * @client_minmax - Min or Max values. In case of depth 0 the 4 values
+ * are used. In case of depth 1, only the first 2 values are used
+ */
+struct ipahal_reg_rx_hps_clients {
+ u32 client_minmax[4];
+};
+
+/*
+ * struct ipahal_reg_valmask - holding values and masking for registers
+ * HAL application may require only value and mask of it for some
+ * register fields.
+ * @val - The value
+ * @mask - Tha mask of the value
+ */
+struct ipahal_reg_valmask {
+ u32 val;
+ u32 mask;
+};
+
+/*
+ * struct ipahal_reg_fltrt_hash_flush - Flt/Rt flush configuration
+ * @v6_rt - Flush IPv6 Routing cache
+ * @v6_flt - Flush IPv6 Filtering cache
+ * @v4_rt - Flush IPv4 Routing cache
+ * @v4_flt - Flush IPv4 Filtering cache
+ */
+struct ipahal_reg_fltrt_hash_flush {
+ bool v6_rt;
+ bool v6_flt;
+ bool v4_rt;
+ bool v4_flt;
+};
+
+/*
+ * struct ipahal_reg_single_ndp_mode - IPA SINGLE_NDP_MODE register
+ * @single_ndp_en: When set to '1', IPA builds MBIM frames with up to 1
+ * NDP-header.
+ * @unused: undefined bits of the register
+ */
+struct ipahal_reg_single_ndp_mode {
+ bool single_ndp_en;
+ u32 undefined;
+};
+
+/*
+ * struct ipahal_reg_qcncm - IPA QCNCM register
+ * @mode_en: When QCNCM_MODE_EN=1, IPA will use QCNCM signature.
+ * @mode_val: Used only when QCNCM_MODE_EN=1 and sets SW Signature in
+ * the NDP header.
+ * @unused: undefined bits of the register
+ */
+struct ipahal_reg_qcncm {
+ bool mode_en;
+ u32 mode_val;
+ u32 undefined;
+};
+
+/*
+ * struct ipahal_reg_tx_cfg - IPA TX_CFG register
+ * @tx0_prefetch_disable: Disable prefetch on TX0
+ * @tx1_prefetch_disable: Disable prefetch on TX1
+ * @prefetch_almost_empty_size: Prefetch almost empty size
+ */
+struct ipahal_reg_tx_cfg {
+ bool tx0_prefetch_disable;
+ bool tx1_prefetch_disable;
+ u16 prefetch_almost_empty_size;
+};
+
+/*
+ * ipahal_reg_name_str() - returns string that represent the register
+ * @reg_name: [in] register name
+ */
+const char *ipahal_reg_name_str(enum ipahal_reg_name reg_name);
+
+/*
+ * ipahal_read_reg_n() - Get the raw value of n parameterized reg
+ */
+u32 ipahal_read_reg_n(enum ipahal_reg_name reg, u32 n);
+
+/*
+ * ipahal_write_reg_mn() - Write to m/n parameterized reg a raw value
+ */
+void ipahal_write_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n, u32 val);
+
+/*
+ * ipahal_write_reg_n() - Write to n parameterized reg a raw value
+ */
+static inline void ipahal_write_reg_n(enum ipahal_reg_name reg,
+ u32 n, u32 val)
+{
+ ipahal_write_reg_mn(reg, 0, n, val);
+}
+
+/*
+ * ipahal_read_reg_n_fields() - Get the parsed value of n parameterized reg
+ */
+u32 ipahal_read_reg_n_fields(enum ipahal_reg_name reg, u32 n, void *fields);
+
+/*
+ * ipahal_write_reg_n_fields() - Write to n parameterized reg a prased value
+ */
+void ipahal_write_reg_n_fields(enum ipahal_reg_name reg, u32 n,
+ const void *fields);
+
+/*
+ * ipahal_read_reg() - Get the raw value of a reg
+ */
+static inline u32 ipahal_read_reg(enum ipahal_reg_name reg)
+{
+ return ipahal_read_reg_n(reg, 0);
+}
+
+/*
+ * ipahal_write_reg() - Write to reg a raw value
+ */
+static inline void ipahal_write_reg(enum ipahal_reg_name reg,
+ u32 val)
+{
+ ipahal_write_reg_mn(reg, 0, 0, val);
+}
+
+/*
+ * ipahal_read_reg_fields() - Get the parsed value of a reg
+ */
+static inline u32 ipahal_read_reg_fields(enum ipahal_reg_name reg, void *fields)
+{
+ return ipahal_read_reg_n_fields(reg, 0, fields);
+}
+
+/*
+ * ipahal_write_reg_fields() - Write to reg a parsed value
+ */
+static inline void ipahal_write_reg_fields(enum ipahal_reg_name reg,
+ const void *fields)
+{
+ ipahal_write_reg_n_fields(reg, 0, fields);
+}
+
+/*
+ * Get the offset of a m/n parameterized register
+ */
+u32 ipahal_get_reg_mn_ofst(enum ipahal_reg_name reg, u32 m, u32 n);
+
+/*
+ * Get the offset of a n parameterized register
+ */
+static inline u32 ipahal_get_reg_n_ofst(enum ipahal_reg_name reg, u32 n)
+{
+ return ipahal_get_reg_mn_ofst(reg, 0, n);
+}
+
+/*
+ * Get the offset of a register
+ */
+static inline u32 ipahal_get_reg_ofst(enum ipahal_reg_name reg)
+{
+ return ipahal_get_reg_mn_ofst(reg, 0, 0);
+}
+
+/*
+ * Get the register base address
+ */
+u32 ipahal_get_reg_base(void);
+
+/*
+ * Specific functions
+ * These functions supply specific register values for specific operations
+ * that cannot be reached by generic functions.
+ * E.g. To disable aggregation, need to write to specific bits of the AGGR
+ * register. The other bits should be untouched. This oeprate is very specific
+ * and cannot be generically defined. For such operations we define these
+ * specific functions.
+ */
+void ipahal_get_disable_aggr_valmask(struct ipahal_reg_valmask *valmask);
+u32 ipahal_aggr_get_max_byte_limit(void);
+u32 ipahal_aggr_get_max_pkt_limit(void);
+void ipahal_get_aggr_force_close_valmask(int ep_idx,
+ struct ipahal_reg_valmask *valmask);
+void ipahal_get_fltrt_hash_flush_valmask(
+ struct ipahal_reg_fltrt_hash_flush *flush,
+ struct ipahal_reg_valmask *valmask);
+void ipahal_get_status_ep_valmask(int pipe_num,
+ struct ipahal_reg_valmask *valmask);
+
+#endif /* _IPAHAL_REG_H_ */
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
new file mode 100644
index 0000000..1606a2f
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
@@ -0,0 +1,315 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_REG_I_H_
+#define _IPAHAL_REG_I_H_
+
+int ipahal_reg_init(enum ipa_hw_type ipa_hw_type);
+
+#define IPA_SETFIELD(val, shift, mask) (((val) << (shift)) & (mask))
+#define IPA_SETFIELD_IN_REG(reg, val, shift, mask) \
+ (reg |= ((val) << (shift)) & (mask))
+#define IPA_GETFIELD_FROM_REG(reg, shift, mask) \
+ (((reg) & (mask)) >> (shift))
+
+
+/* IPA_ROUTE register */
+#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0
+#define IPA_ROUTE_ROUTE_DIS_BMSK 0x1
+#define IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1
+#define IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0X40
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80
+#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK 0x3e0000
+#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT 0x11
+#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK 0x1000000
+#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT 0x18
+
+/* IPA_ENDP_INIT_HDR_n register */
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK 0x3f
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT 0x0
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK 0x40
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT 0x6
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT 0x7
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK 0x1f80
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK 0x3f00000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT 0x14
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK 0x4000000
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT 0x1a
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK_v2 0x8000000
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT_v2 0x1b
+#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_BMSK_v2 0x10000000
+#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_SHFT_v2 0x1c
+
+/* IPA_ENDP_INIT_HDR_EXT_n register */
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK 0x1
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT 0x0
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK 0x2
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT 0x1
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK 0x4
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT 0x2
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK 0x8
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT 0x3
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK 0x3f0
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT 0x4
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT 0xa
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v3_0 0x3c00
+
+/* IPA_ENDP_INIT_AGGR_N register */
+#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK 0x1000000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT 0x18
+#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK 0x400000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT 0x16
+#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK 0x200000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT 0x15
+#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK 0x1f8000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT 0xf
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK 0x7c00
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT 0xa
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK 0x3e0
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT 0x5
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK 0x1c
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT 0x2
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK 0x3
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT 0x0
+
+/* IPA_AGGR_FORCE_CLOSE register */
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK 0x3fffffff
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT 0
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5 0xfffff
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V3_5 0
+
+/* IPA_ENDP_INIT_ROUTE_n register */
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK 0x1f
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT 0x0
+
+/* IPA_ENDP_INIT_MODE_n register */
+#define IPA_ENDP_INIT_MODE_n_HDR_FTCH_DISABLE_BMSK 0x40000000
+#define IPA_ENDP_INIT_MODE_n_HDR_FTCH_DISABLE_SHFT 0x1e
+#define IPA_ENDP_INIT_MODE_n_PAD_EN_BMSK 0x20000000
+#define IPA_ENDP_INIT_MODE_n_PAD_EN_SHFT 0x1d
+#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_BMSK 0x10000000
+#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_SHFT 0x1c
+#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_BMSK 0xffff000
+#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_SHFT 0xc
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK 0x1f0
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT 0x4
+#define IPA_ENDP_INIT_MODE_n_MODE_BMSK 0x7
+#define IPA_ENDP_INIT_MODE_n_MODE_SHFT 0x0
+
+/* IPA_ENDP_INIT_NAT_n register */
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK 0x3
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT 0x0
+
+/* IPA_ENDP_INIT_CTRL_n register */
+#define IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK 0x1
+#define IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT 0x0
+#define IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK 0x2
+#define IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT 0x1
+
+/* IPA_ENDP_INIT_HOL_BLOCK_EN_n register */
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_RMSK 0x1
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_MAX 19
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK 0x1
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT 0x0
+
+/* IPA_ENDP_INIT_HOL_BLOCK_TIMER_n register */
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_BMSK 0xffffffff
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_SHFT 0x0
+
+/* IPA_ENDP_INIT_DEAGGR_n register */
+#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK 0xFFFF0000
+#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT 0x10
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK 0x3F00
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT 0x8
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK 0x80
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT 0x7
+#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK 0x3F
+#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT 0x0
+
+/* IPA_IPA_ENDP_INIT_SEQ_n register */
+#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_BMSK 0xf000
+#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_SHFT 0xc
+#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_BMSK 0xf00
+#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_SHFT 0x8
+#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_BMSK 0xf0
+#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_SHFT 0x4
+#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_BMSK 0xf
+#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_SHFT 0x0
+
+/* IPA_DEBUG_CNT_REG_m register */
+#define IPA_DEBUG_CNT_REG_N_RMSK 0xffffffff
+#define IPA_DEBUG_CNT_REG_N_MAX 15
+#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_BMSK 0xffffffff
+#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_SHFT 0x0
+
+/* IPA_ENDP_INIT_CFG_n register */
+#define IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_BMSK 0x100
+#define IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_SHFT 0x8
+#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK 0x78
+#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT 0x3
+#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK 0x6
+#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT 0x1
+#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK 0x1
+#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT 0x0
+
+/* IPA_ENDP_INIT_HDR_METADATA_MASK_n register */
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK 0xffffffff
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT 0x0
+
+/* IPA_IPA_ENDP_INIT_HDR_METADATA_n register */
+#define IPA_ENDP_INIT_HDR_METADATA_n_METADATA_BMSK 0xffffffff
+#define IPA_ENDP_INIT_HDR_METADATA_n_METADATA_SHFT 0x0
+
+/* IPA_ENDP_INIT_RSRC_GRP_n register */
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK 0x7
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT 0
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v3_5 0x3
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v3_5 0
+
+/* IPA_SHARED_MEM_SIZE register */
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK 0xffff0000
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT 0x10
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK 0xffff
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT 0x0
+
+/* IPA_DEBUG_CNT_CTRL_n register */
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_BMSK 0x10000000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_SHFT 0x1c
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK 0x0ff00000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK_V3_5 0x1ff00000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT 0x14
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_BMSK 0x1f000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_SHFT 0xc
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_BMSK 0x100
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_SHFT 0x8
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_BMSK 0x70
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_SHFT 0x4
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_BMSK 0x1
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_SHFT 0x0
+
+/* IPA_FILT_ROUT_HASH_FLUSH register */
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_FILT_SHFT 12
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_ROUT_SHFT 8
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_FILT_SHFT 4
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_ROUT_SHFT 0
+
+/* IPA_SINGLE_NDP_MODE register */
+#define IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK 0xfffffffe
+#define IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT 0x1
+#define IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK 0x1
+#define IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT 0
+
+/* IPA_QCNCM register */
+#define IPA_QCNCM_MODE_UNDEFINED2_BMSK 0xf0000000
+#define IPA_QCNCM_MODE_UNDEFINED2_SHFT 0x1c
+#define IPA_QCNCM_MODE_VAL_BMSK 0xffffff0
+#define IPA_QCNCM_MODE_VAL_SHFT 0x4
+#define IPA_QCNCM_UNDEFINED1_BMSK 0xe
+#define IPA_QCNCM_UNDEFINED1_SHFT 0x1
+#define IPA_QCNCM_MODE_EN_BMSK 0x1
+#define IPA_QCNCM_MODE_EN_SHFT 0
+
+/* IPA_ENDP_STATUS_n register */
+#define IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK 0x100
+#define IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT 0x8
+#define IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK 0x3e
+#define IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT 0x1
+#define IPA_ENDP_STATUS_n_STATUS_EN_BMSK 0x1
+#define IPA_ENDP_STATUS_n_STATUS_EN_SHFT 0x0
+
+/* IPA_ENDP_FILTER_ROUTER_HSH_CFG_n register */
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT 0
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK 0x1
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT 1
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK 0x2
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT 2
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK 0x4
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT 3
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK 0x8
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT 4
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK 0x10
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT 5
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK 0x20
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT 6
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK 0x40
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT 7
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK 0xff80
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT 16
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK 0x10000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT 17
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK 0x20000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT 18
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK 0x40000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT 19
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK 0x80000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT 20
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK 0x100000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT 21
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK 0x200000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT 22
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK 0x400000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT 23
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK 0xff800000
+
+/* IPA_RSRC_GRP_XY_RSRC_TYPE_n register */
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK 0xFF000000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT 24
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK 0xFF0000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT 16
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK 0xFF00
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT 8
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK 0xFF
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT 0
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK_V3_5 0x3F000000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT_V3_5 24
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK_V3_5 0x3F0000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT_V3_5 16
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK_V3_5 0x3F00
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT_V3_5 8
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5 0x3F
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5 0
+
+
+/* IPA_IPA_IPA_RX_HPS_CLIENTS_MIN/MAX_DEPTH_0/1 registers */
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(n) (0x7F << (8 * (n)))
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(n) \
+ (0xF << (8 * (n)))
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(n) (8 * (n))
+
+/* IPA_QSB_MAX_WRITES register */
+#define IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_BMSK (0xf)
+#define IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_SHFT (0)
+#define IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_BMSK (0xf0)
+#define IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_SHFT (4)
+
+/* IPA_QSB_MAX_READS register */
+#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK (0xf)
+#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT (0)
+#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK (0xf0)
+#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT (4)
+
+/* IPA_TX_CFG register */
+#define IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5 (0x1)
+#define IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5 (0)
+#define IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5 (0x2)
+#define IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5 (1)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5 (0x1C)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5 (2)
+
+#endif /* _IPAHAL_REG_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
new file mode 100644
index 0000000..56ec538
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -0,0 +1,2960 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * WWAN Transport Network Driver.
+ */
+
+#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_device.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <net/pkt_sched.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+#include "ipa_qmi_service.h"
+#include <linux/rmnet_ipa_fd_ioctl.h>
+#include <linux/ipa.h>
+#include <uapi/linux/net_map.h>
+
+#include "ipa_trace.h"
+
+#define WWAN_METADATA_SHFT 24
+#define WWAN_METADATA_MASK 0xFF000000
+#define WWAN_DATA_LEN 2000
+#define IPA_RM_INACTIVITY_TIMER 100 /* IPA_RM */
+#define HEADROOM_FOR_QMAP 8 /* for mux header */
+#define TAILROOM 0 /* for padding by mux layer */
+#define MAX_NUM_OF_MUX_CHANNEL 10 /* max mux channels */
+#define UL_FILTER_RULE_HANDLE_START 69
+#define DEFAULT_OUTSTANDING_HIGH_CTL 96
+#define DEFAULT_OUTSTANDING_HIGH 64
+#define DEFAULT_OUTSTANDING_LOW 32
+
+#define IPA_WWAN_DEV_NAME "rmnet_ipa%d"
+
+#define IPA_WWAN_RX_SOFTIRQ_THRESH 16
+
+#define INVALID_MUX_ID 0xFF
+#define IPA_QUOTA_REACH_ALERT_MAX_SIZE 64
+#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64
+#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
+#define NAPI_WEIGHT 60
+
+#define IPA_NETDEV() \
+ ((rmnet_ipa3_ctx && rmnet_ipa3_ctx->wwan_priv) ? \
+ rmnet_ipa3_ctx->wwan_priv->net : NULL)
+
+
+static int ipa3_wwan_add_ul_flt_rule_to_ipa(void);
+static int ipa3_wwan_del_ul_flt_rule_to_ipa(void);
+static void ipa3_wwan_msg_free_cb(void*, u32, u32);
+static void ipa3_rmnet_rx_cb(void *priv);
+static int ipa3_rmnet_poll(struct napi_struct *napi, int budget);
+
+static void ipa3_wake_tx_queue(struct work_struct *work);
+static DECLARE_WORK(ipa3_tx_wakequeue_work, ipa3_wake_tx_queue);
+
+static void tethering_stats_poll_queue(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa_tether_stats_poll_wakequeue_work,
+ tethering_stats_poll_queue);
+
+enum ipa3_wwan_device_status {
+ WWAN_DEVICE_INACTIVE = 0,
+ WWAN_DEVICE_ACTIVE = 1
+};
+
+struct ipa3_rmnet_plat_drv_res {
+ bool ipa_rmnet_ssr;
+ bool ipa_loaduC;
+ bool ipa_advertise_sg_support;
+ bool ipa_napi_enable;
+};
+
+/**
+ * struct ipa3_wwan_private - WWAN private data
+ * @net: network interface struct implemented by this driver
+ * @stats: iface statistics
+ * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed
+ * @outstanding_high: number of outstanding packets allowed
+ * @outstanding_low: number of outstanding packets which shall cause
+ * @ch_id: channel id
+ * @lock: spinlock for mutual exclusion
+ * @device_status: holds device status
+ *
+ * WWAN private - holds all relevant info about WWAN driver
+ */
+struct ipa3_wwan_private {
+ struct net_device *net;
+ struct net_device_stats stats;
+ atomic_t outstanding_pkts;
+ int outstanding_high_ctl;
+ int outstanding_high;
+ int outstanding_low;
+ uint32_t ch_id;
+ spinlock_t lock;
+ struct completion resource_granted_completion;
+ enum ipa3_wwan_device_status device_status;
+ struct napi_struct napi;
+};
+
+struct rmnet_ipa3_context {
+ struct ipa3_wwan_private *wwan_priv;
+ struct ipa_sys_connect_params apps_to_ipa_ep_cfg;
+ struct ipa_sys_connect_params ipa_to_apps_ep_cfg;
+ u32 qmap_hdr_hdl;
+ u32 dflt_v4_wan_rt_hdl;
+ u32 dflt_v6_wan_rt_hdl;
+ struct ipa3_rmnet_mux_val mux_channel[MAX_NUM_OF_MUX_CHANNEL];
+ int num_q6_rules;
+ int old_num_q6_rules;
+ int rmnet_index;
+ bool egress_set;
+ bool a7_ul_flt_set;
+ struct workqueue_struct *rm_q6_wq;
+ atomic_t is_initialized;
+ atomic_t is_ssr;
+ void *subsys_notify_handle;
+ u32 apps_to_ipa3_hdl;
+ u32 ipa3_to_apps_hdl;
+ struct mutex ipa_to_apps_pipe_handle_guard;
+};
+
+static struct rmnet_ipa3_context *rmnet_ipa3_ctx;
+static struct ipa3_rmnet_plat_drv_res ipa3_rmnet_res;
+
+/**
+* ipa3_setup_a7_qmap_hdr() - Setup default a7 qmap hdr
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate memory
+* -EPERM: failed to add the tables
+*/
+static int ipa3_setup_a7_qmap_hdr(void)
+{
+ struct ipa_ioc_add_hdr *hdr;
+ struct ipa_hdr_add *hdr_entry;
+ u32 pyld_sz;
+ int ret;
+
+ /* install the basic exception header */
+ pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
+ sizeof(struct ipa_hdr_add);
+ hdr = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!hdr) {
+ IPAWANERR("fail to alloc exception hdr\n");
+ return -ENOMEM;
+ }
+ hdr->num_hdrs = 1;
+ hdr->commit = 1;
+ hdr_entry = &hdr->hdr[0];
+
+ strlcpy(hdr_entry->name, IPA_A7_QMAP_HDR_NAME,
+ IPA_RESOURCE_NAME_MAX);
+ hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
+
+ if (ipa3_add_hdr(hdr)) {
+ IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ if (hdr_entry->status) {
+ IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+ rmnet_ipa3_ctx->qmap_hdr_hdl = hdr_entry->hdr_hdl;
+
+ ret = 0;
+bail:
+ kfree(hdr);
+ return ret;
+}
+
+static void ipa3_del_a7_qmap_hdr(void)
+{
+ struct ipa_ioc_del_hdr *del_hdr;
+ struct ipa_hdr_del *hdl_entry;
+ u32 pyld_sz;
+ int ret;
+
+ pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
+ sizeof(struct ipa_hdr_del);
+ del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!del_hdr) {
+ IPAWANERR("fail to alloc exception hdr_del\n");
+ return;
+ }
+
+ del_hdr->commit = 1;
+ del_hdr->num_hdls = 1;
+ hdl_entry = &del_hdr->hdl[0];
+ hdl_entry->hdl = rmnet_ipa3_ctx->qmap_hdr_hdl;
+
+ ret = ipa3_del_hdr(del_hdr);
+ if (ret || hdl_entry->status)
+ IPAWANERR("ipa3_del_hdr failed\n");
+ else
+ IPAWANDBG("hdrs deletion done\n");
+
+ rmnet_ipa3_ctx->qmap_hdr_hdl = 0;
+ kfree(del_hdr);
+}
+
+static void ipa3_del_qmap_hdr(uint32_t hdr_hdl)
+{
+ struct ipa_ioc_del_hdr *del_hdr;
+ struct ipa_hdr_del *hdl_entry;
+ u32 pyld_sz;
+ int ret;
+
+ if (hdr_hdl == 0) {
+ IPAWANERR("Invalid hdr_hdl provided\n");
+ return;
+ }
+
+ pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
+ sizeof(struct ipa_hdr_del);
+ del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!del_hdr) {
+ IPAWANERR("fail to alloc exception hdr_del\n");
+ return;
+ }
+
+ del_hdr->commit = 1;
+ del_hdr->num_hdls = 1;
+ hdl_entry = &del_hdr->hdl[0];
+ hdl_entry->hdl = hdr_hdl;
+
+ ret = ipa3_del_hdr(del_hdr);
+ if (ret || hdl_entry->status)
+ IPAWANERR("ipa3_del_hdr failed\n");
+ else
+ IPAWANDBG("header deletion done\n");
+
+ rmnet_ipa3_ctx->qmap_hdr_hdl = 0;
+ kfree(del_hdr);
+}
+
+static void ipa3_del_mux_qmap_hdrs(void)
+{
+ int index;
+
+ for (index = 0; index < rmnet_ipa3_ctx->rmnet_index; index++) {
+ ipa3_del_qmap_hdr(rmnet_ipa3_ctx->mux_channel[index].hdr_hdl);
+ rmnet_ipa3_ctx->mux_channel[index].hdr_hdl = 0;
+ }
+}
+
+static int ipa3_add_qmap_hdr(uint32_t mux_id, uint32_t *hdr_hdl)
+{
+ struct ipa_ioc_add_hdr *hdr;
+ struct ipa_hdr_add *hdr_entry;
+ char hdr_name[IPA_RESOURCE_NAME_MAX];
+ u32 pyld_sz;
+ int ret;
+
+ pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
+ sizeof(struct ipa_hdr_add);
+ hdr = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!hdr) {
+ IPAWANERR("fail to alloc exception hdr\n");
+ return -ENOMEM;
+ }
+ hdr->num_hdrs = 1;
+ hdr->commit = 1;
+ hdr_entry = &hdr->hdr[0];
+
+ snprintf(hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
+ A2_MUX_HDR_NAME_V4_PREF,
+ mux_id);
+ strlcpy(hdr_entry->name, hdr_name,
+ IPA_RESOURCE_NAME_MAX);
+
+ hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
+ hdr_entry->hdr[1] = (uint8_t) mux_id;
+ IPAWANDBG("header (%s) with mux-id: (%d)\n",
+ hdr_name,
+ hdr_entry->hdr[1]);
+ if (ipa3_add_hdr(hdr)) {
+ IPAWANERR("fail to add IPA_QMAP hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ if (hdr_entry->status) {
+ IPAWANERR("fail to add IPA_QMAP hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+ *hdr_hdl = hdr_entry->hdr_hdl;
+bail:
+ kfree(hdr);
+ return ret;
+}
+
+/**
+* ipa3_setup_dflt_wan_rt_tables() - Setup default wan routing tables
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate memory
+* -EPERM: failed to add the tables
+*/
+static int ipa3_setup_dflt_wan_rt_tables(void)
+{
+ struct ipa_ioc_add_rt_rule *rt_rule;
+ struct ipa_rt_rule_add *rt_rule_entry;
+
+ rt_rule =
+ kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
+ sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
+ if (!rt_rule) {
+ IPAWANERR("fail to alloc mem\n");
+ return -ENOMEM;
+ }
+ /* setup a default v4 route to point to Apps */
+ rt_rule->num_rules = 1;
+ rt_rule->commit = 1;
+ rt_rule->ip = IPA_IP_v4;
+ strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_WAN_RT_TBL_NAME,
+ IPA_RESOURCE_NAME_MAX);
+
+ rt_rule_entry = &rt_rule->rules[0];
+ rt_rule_entry->at_rear = 1;
+ rt_rule_entry->rule.dst = IPA_CLIENT_APPS_WAN_CONS;
+ rt_rule_entry->rule.hdr_hdl = rmnet_ipa3_ctx->qmap_hdr_hdl;
+
+ if (ipa3_add_rt_rule(rt_rule)) {
+ IPAWANERR("fail to add dflt_wan v4 rule\n");
+ kfree(rt_rule);
+ return -EPERM;
+ }
+
+ IPAWANDBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+ rmnet_ipa3_ctx->dflt_v4_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
+
+ /* setup a default v6 route to point to A5 */
+ rt_rule->ip = IPA_IP_v6;
+ if (ipa3_add_rt_rule(rt_rule)) {
+ IPAWANERR("fail to add dflt_wan v6 rule\n");
+ kfree(rt_rule);
+ return -EPERM;
+ }
+ IPAWANDBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+ rmnet_ipa3_ctx->dflt_v6_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
+
+ kfree(rt_rule);
+ return 0;
+}
+
+static void ipa3_del_dflt_wan_rt_tables(void)
+{
+ struct ipa_ioc_del_rt_rule *rt_rule;
+ struct ipa_rt_rule_del *rt_rule_entry;
+ int len;
+
+ len = sizeof(struct ipa_ioc_del_rt_rule) + 1 *
+ sizeof(struct ipa_rt_rule_del);
+ rt_rule = kzalloc(len, GFP_KERNEL);
+ if (!rt_rule) {
+ IPAWANERR("unable to allocate memory for del route rule\n");
+ return;
+ }
+
+ memset(rt_rule, 0, len);
+ rt_rule->commit = 1;
+ rt_rule->num_hdls = 1;
+ rt_rule->ip = IPA_IP_v4;
+
+ rt_rule_entry = &rt_rule->hdl[0];
+ rt_rule_entry->status = -1;
+ rt_rule_entry->hdl = rmnet_ipa3_ctx->dflt_v4_wan_rt_hdl;
+
+ IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
+ rt_rule_entry->hdl, IPA_IP_v4);
+ if (ipa3_del_rt_rule(rt_rule) ||
+ (rt_rule_entry->status)) {
+ IPAWANERR("Routing rule deletion failed!\n");
+ }
+
+ rt_rule->ip = IPA_IP_v6;
+ rt_rule_entry->hdl = rmnet_ipa3_ctx->dflt_v6_wan_rt_hdl;
+ IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
+ rt_rule_entry->hdl, IPA_IP_v6);
+ if (ipa3_del_rt_rule(rt_rule) ||
+ (rt_rule_entry->status)) {
+ IPAWANERR("Routing rule deletion failed!\n");
+ }
+
+ kfree(rt_rule);
+}
+
+int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
+ *rule_req)
+{
+ int i, j;
+
+ if (rule_req->filter_spec_ex_list_valid == true) {
+ rmnet_ipa3_ctx->num_q6_rules =
+ rule_req->filter_spec_ex_list_len;
+ IPAWANDBG("Received (%d) install_flt_req\n",
+ rmnet_ipa3_ctx->num_q6_rules);
+ } else {
+ rmnet_ipa3_ctx->num_q6_rules = 0;
+ IPAWANERR("got no UL rules from modem\n");
+ return -EINVAL;
+ }
+
+ /* copy UL filter rules from Modem*/
+ for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
+ /* check if rules overside the cache*/
+ if (i == MAX_NUM_Q6_RULE) {
+ IPAWANERR("Reaching (%d) max cache ",
+ MAX_NUM_Q6_RULE);
+ IPAWANERR(" however total (%d)\n",
+ rmnet_ipa3_ctx->num_q6_rules);
+ goto failure;
+ }
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].ip =
+ rule_req->filter_spec_ex_list[i].ip_type;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].action =
+ rule_req->filter_spec_ex_list[i].filter_action;
+ if (rule_req->filter_spec_ex_list[i].
+ is_routing_table_index_valid == true)
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx =
+ rule_req->filter_spec_ex_list[i].route_table_index;
+ if (rule_req->filter_spec_ex_list[i].is_mux_id_valid == true)
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].mux_id =
+ rule_req->filter_spec_ex_list[i].mux_id;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].rule_id =
+ rule_req->filter_spec_ex_list[i].rule_id;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].is_rule_hashable =
+ rule_req->filter_spec_ex_list[i].is_rule_hashable;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.rule_eq_bitmap =
+ rule_req->filter_spec_ex_list[i].filter_rule.
+ rule_eq_bitmap;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq_present =
+ rule_req->filter_spec_ex_list[i].filter_rule.
+ tos_eq_present;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq =
+ rule_req->filter_spec_ex_list[i].filter_rule.tos_eq;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ protocol_eq_present = rule_req->filter_spec_ex_list[i].
+ filter_rule.protocol_eq_present;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.protocol_eq =
+ rule_req->filter_spec_ex_list[i].filter_rule.
+ protocol_eq;
+
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ num_ihl_offset_range_16 =
+ rule_req->filter_spec_ex_list[i].
+ filter_rule.num_ihl_offset_range_16;
+ for (j = 0; j < ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ num_ihl_offset_range_16; j++) {
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_range_16[j].offset = rule_req->
+ filter_spec_ex_list[i].filter_rule.
+ ihl_offset_range_16[j].offset;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_range_16[j].range_low = rule_req->
+ filter_spec_ex_list[i].filter_rule.
+ ihl_offset_range_16[j].range_low;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_range_16[j].range_high = rule_req->
+ filter_spec_ex_list[i].filter_rule.
+ ihl_offset_range_16[j].range_high;
+ }
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_32 =
+ rule_req->filter_spec_ex_list[i].filter_rule.
+ num_offset_meq_32;
+ for (j = 0; j < ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ num_offset_meq_32; j++) {
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ offset_meq_32[j].offset =
+ rule_req->filter_spec_ex_list[i].
+ filter_rule.offset_meq_32[j].offset;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ offset_meq_32[j].mask =
+ rule_req->filter_spec_ex_list[i].
+ filter_rule.offset_meq_32[j].mask;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ offset_meq_32[j].value =
+ rule_req->filter_spec_ex_list[i].
+ filter_rule.offset_meq_32[j].value;
+ }
+
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq_present =
+ rule_req->filter_spec_ex_list[i].
+ filter_rule.tc_eq_present;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq =
+ rule_req->filter_spec_ex_list[i].filter_rule.tc_eq;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq_present =
+ rule_req->filter_spec_ex_list[i].filter_rule.
+ flow_eq_present;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq =
+ rule_req->filter_spec_ex_list[i].filter_rule.flow_eq;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_eq_16_present = rule_req->filter_spec_ex_list[i].
+ filter_rule.ihl_offset_eq_16_present;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_eq_16.offset = rule_req->filter_spec_ex_list[i].
+ filter_rule.ihl_offset_eq_16.offset;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_eq_16.value = rule_req->filter_spec_ex_list[i].
+ filter_rule.ihl_offset_eq_16.value;
+
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_eq_32_present = rule_req->filter_spec_ex_list[i].
+ filter_rule.ihl_offset_eq_32_present;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_eq_32.offset = rule_req->filter_spec_ex_list[i].
+ filter_rule.ihl_offset_eq_32.offset;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_eq_32.value = rule_req->filter_spec_ex_list[i].
+ filter_rule.ihl_offset_eq_32.value;
+
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ num_ihl_offset_meq_32 = rule_req->filter_spec_ex_list[i].
+ filter_rule.num_ihl_offset_meq_32;
+ for (j = 0; j < ipa3_qmi_ctx->q6_ul_filter_rule[i].
+ eq_attrib.num_ihl_offset_meq_32; j++) {
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_meq_32[j].offset = rule_req->
+ filter_spec_ex_list[i].filter_rule.
+ ihl_offset_meq_32[j].offset;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_meq_32[j].mask = rule_req->
+ filter_spec_ex_list[i].filter_rule.
+ ihl_offset_meq_32[j].mask;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ihl_offset_meq_32[j].value = rule_req->
+ filter_spec_ex_list[i].filter_rule.
+ ihl_offset_meq_32[j].value;
+ }
+ ipa3_qmi_ctx->
+ q6_ul_filter_rule[i].eq_attrib.num_offset_meq_128 =
+ rule_req->filter_spec_ex_list[i].filter_rule.
+ num_offset_meq_128;
+ for (j = 0; j < ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ num_offset_meq_128; j++) {
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ offset_meq_128[j].offset = rule_req->
+ filter_spec_ex_list[i].filter_rule.
+ offset_meq_128[j].offset;
+ memcpy(ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ offset_meq_128[j].mask,
+ rule_req->filter_spec_ex_list[i].
+ filter_rule.offset_meq_128[j].mask, 16);
+ memcpy(ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ offset_meq_128[j].value, rule_req->
+ filter_spec_ex_list[i].filter_rule.
+ offset_meq_128[j].value, 16);
+ }
+
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ metadata_meq32_present =
+ rule_req->filter_spec_ex_list[i].
+ filter_rule.metadata_meq32_present;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ metadata_meq32.offset =
+ rule_req->filter_spec_ex_list[i].
+ filter_rule.metadata_meq32.offset;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ metadata_meq32.mask = rule_req->filter_spec_ex_list[i].
+ filter_rule.metadata_meq32.mask;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.metadata_meq32.
+ value = rule_req->filter_spec_ex_list[i].filter_rule.
+ metadata_meq32.value;
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+ ipv4_frag_eq_present = rule_req->filter_spec_ex_list[i].
+ filter_rule.ipv4_frag_eq_present;
+ }
+
+ if (rule_req->xlat_filter_indices_list_valid) {
+ if (rule_req->xlat_filter_indices_list_len >
+ rmnet_ipa3_ctx->num_q6_rules) {
+ IPAWANERR("Number of xlat indices is not valid: %d\n",
+ rule_req->xlat_filter_indices_list_len);
+ goto failure;
+ }
+ IPAWANDBG("Receive %d XLAT indices: ",
+ rule_req->xlat_filter_indices_list_len);
+ for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++)
+ IPAWANDBG("%d ", rule_req->xlat_filter_indices_list[i]);
+ IPAWANDBG("\n");
+
+ for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) {
+ if (rule_req->xlat_filter_indices_list[i]
+ >= rmnet_ipa3_ctx->num_q6_rules) {
+ IPAWANERR("Xlat rule idx is wrong: %d\n",
+ rule_req->xlat_filter_indices_list[i]);
+ goto failure;
+ } else {
+ ipa3_qmi_ctx->q6_ul_filter_rule
+ [rule_req->xlat_filter_indices_list[i]]
+ .is_xlat_rule = 1;
+ IPAWANDBG("Rule %d is xlat rule\n",
+ rule_req->xlat_filter_indices_list[i]);
+ }
+ }
+ }
+ goto success;
+
+failure:
+ rmnet_ipa3_ctx->num_q6_rules = 0;
+ memset(ipa3_qmi_ctx->q6_ul_filter_rule, 0,
+ sizeof(ipa3_qmi_ctx->q6_ul_filter_rule));
+ return -EINVAL;
+
+success:
+ return 0;
+}
+
+static int ipa3_wwan_add_ul_flt_rule_to_ipa(void)
+{
+ u32 pyld_sz;
+ int i, retval = 0;
+ struct ipa_ioc_add_flt_rule *param;
+ struct ipa_flt_rule_add flt_rule_entry;
+ struct ipa_fltr_installed_notif_req_msg_v01 *req;
+
+ pyld_sz = sizeof(struct ipa_ioc_add_flt_rule) +
+ sizeof(struct ipa_flt_rule_add);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param)
+ return -ENOMEM;
+
+ req = (struct ipa_fltr_installed_notif_req_msg_v01 *)
+ kzalloc(sizeof(struct ipa_fltr_installed_notif_req_msg_v01),
+ GFP_KERNEL);
+ if (!req) {
+ kfree(param);
+ return -ENOMEM;
+ }
+
+ param->commit = 1;
+ param->ep = IPA_CLIENT_APPS_LAN_WAN_PROD;
+ param->global = false;
+ param->num_rules = (uint8_t)1;
+
+ for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
+ param->ip = ipa3_qmi_ctx->q6_ul_filter_rule[i].ip;
+ memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_add));
+ flt_rule_entry.at_rear = true;
+ flt_rule_entry.rule.action =
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].action;
+ flt_rule_entry.rule.rt_tbl_idx
+ = ipa3_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx;
+ flt_rule_entry.rule.retain_hdr = true;
+ flt_rule_entry.rule.hashable =
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].is_rule_hashable;
+ flt_rule_entry.rule.rule_id =
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].rule_id;
+
+ /* debug rt-hdl*/
+ IPAWANDBG("install-IPA index(%d),rt-tbl:(%d)\n",
+ i, flt_rule_entry.rule.rt_tbl_idx);
+ flt_rule_entry.rule.eq_attrib_type = true;
+ memcpy(&(flt_rule_entry.rule.eq_attrib),
+ &ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib,
+ sizeof(struct ipa_ipfltri_rule_eq));
+ memcpy(&(param->rules[0]), &flt_rule_entry,
+ sizeof(struct ipa_flt_rule_add));
+ if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
+ retval = -EFAULT;
+ IPAWANERR("add A7 UL filter rule(%d) failed\n", i);
+ } else {
+ /* store the rule handler */
+ ipa3_qmi_ctx->q6_ul_filter_rule_hdl[i] =
+ param->rules[0].flt_rule_hdl;
+ }
+ }
+
+ /* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/
+ req->source_pipe_index =
+ ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
+ req->install_status = QMI_RESULT_SUCCESS_V01;
+ req->rule_id_valid = 1;
+ req->rule_id_len = rmnet_ipa3_ctx->num_q6_rules;
+ for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
+ req->rule_id[i] =
+ ipa3_qmi_ctx->q6_ul_filter_rule[i].rule_id;
+ }
+ if (ipa3_qmi_filter_notify_send(req)) {
+ IPAWANDBG("add filter rule index on A7-RX failed\n");
+ retval = -EFAULT;
+ }
+ rmnet_ipa3_ctx->old_num_q6_rules = rmnet_ipa3_ctx->num_q6_rules;
+ IPAWANDBG("add (%d) filter rule index on A7-RX\n",
+ rmnet_ipa3_ctx->old_num_q6_rules);
+ kfree(param);
+ kfree(req);
+ return retval;
+}
+
+static int ipa3_wwan_del_ul_flt_rule_to_ipa(void)
+{
+ u32 pyld_sz;
+ int i, retval = 0;
+ struct ipa_ioc_del_flt_rule *param;
+ struct ipa_flt_rule_del flt_rule_entry;
+
+ pyld_sz = sizeof(struct ipa_ioc_del_flt_rule) +
+ sizeof(struct ipa_flt_rule_del);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ IPAWANERR("kzalloc failed\n");
+ return -ENOMEM;
+ }
+
+ param->commit = 1;
+ param->num_hdls = (uint8_t) 1;
+
+ for (i = 0; i < rmnet_ipa3_ctx->old_num_q6_rules; i++) {
+ param->ip = ipa3_qmi_ctx->q6_ul_filter_rule[i].ip;
+ memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_del));
+ flt_rule_entry.hdl = ipa3_qmi_ctx->q6_ul_filter_rule_hdl[i];
+ /* debug rt-hdl*/
+ IPAWANDBG("delete-IPA rule index(%d)\n", i);
+ memcpy(&(param->hdl[0]), &flt_rule_entry,
+ sizeof(struct ipa_flt_rule_del));
+ if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
+ IPAWANERR("del A7 UL filter rule(%d) failed\n", i);
+ kfree(param);
+ return -EFAULT;
+ }
+ }
+
+ /* set UL filter-rule add-indication */
+ rmnet_ipa3_ctx->a7_ul_flt_set = false;
+ rmnet_ipa3_ctx->old_num_q6_rules = 0;
+
+ kfree(param);
+ return retval;
+}
+
+static int ipa3_find_mux_channel_index(uint32_t mux_id)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
+ if (mux_id == rmnet_ipa3_ctx->mux_channel[i].mux_id)
+ return i;
+ }
+ return MAX_NUM_OF_MUX_CHANNEL;
+}
+
+static int find_vchannel_name_index(const char *vchannel_name)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
+ if (strcmp(rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
+ vchannel_name) == 0)
+ return i;
+ }
+ return MAX_NUM_OF_MUX_CHANNEL;
+}
+
+static int ipa3_wwan_register_to_ipa(int index)
+{
+ struct ipa_tx_intf tx_properties = {0};
+ struct ipa_ioc_tx_intf_prop tx_ioc_properties[2] = { {0}, {0} };
+ struct ipa_ioc_tx_intf_prop *tx_ipv4_property;
+ struct ipa_ioc_tx_intf_prop *tx_ipv6_property;
+ struct ipa_rx_intf rx_properties = {0};
+ struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
+ struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
+ struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
+ struct ipa_ext_intf ext_properties = {0};
+ struct ipa_ioc_ext_intf_prop *ext_ioc_properties;
+ u32 pyld_sz;
+ int ret = 0, i;
+
+ IPAWANDBG("index(%d) device[%s]:\n", index,
+ rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
+ if (!rmnet_ipa3_ctx->mux_channel[index].mux_hdr_set) {
+ ret = ipa3_add_qmap_hdr(
+ rmnet_ipa3_ctx->mux_channel[index].mux_id,
+ &rmnet_ipa3_ctx->mux_channel[index].hdr_hdl);
+ if (ret) {
+ IPAWANERR("ipa_add_mux_hdr failed (%d)\n", index);
+ return ret;
+ }
+ rmnet_ipa3_ctx->mux_channel[index].mux_hdr_set = true;
+ }
+ tx_properties.prop = tx_ioc_properties;
+ tx_ipv4_property = &tx_properties.prop[0];
+ tx_ipv4_property->ip = IPA_IP_v4;
+ tx_ipv4_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
+ snprintf(tx_ipv4_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
+ A2_MUX_HDR_NAME_V4_PREF,
+ rmnet_ipa3_ctx->mux_channel[index].mux_id);
+ tx_ipv6_property = &tx_properties.prop[1];
+ tx_ipv6_property->ip = IPA_IP_v6;
+ tx_ipv6_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
+ /* no need use A2_MUX_HDR_NAME_V6_PREF, same header */
+ snprintf(tx_ipv6_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
+ A2_MUX_HDR_NAME_V4_PREF,
+ rmnet_ipa3_ctx->mux_channel[index].mux_id);
+ tx_properties.num_props = 2;
+
+ rx_properties.prop = rx_ioc_properties;
+ rx_ipv4_property = &rx_properties.prop[0];
+ rx_ipv4_property->ip = IPA_IP_v4;
+ rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
+ rx_ipv4_property->attrib.meta_data =
+ rmnet_ipa3_ctx->mux_channel[index].mux_id << WWAN_METADATA_SHFT;
+ rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
+ rx_ipv4_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
+ rx_ipv6_property = &rx_properties.prop[1];
+ rx_ipv6_property->ip = IPA_IP_v6;
+ rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
+ rx_ipv6_property->attrib.meta_data =
+ rmnet_ipa3_ctx->mux_channel[index].mux_id << WWAN_METADATA_SHFT;
+ rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
+ rx_ipv6_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
+ rx_properties.num_props = 2;
+
+ pyld_sz = rmnet_ipa3_ctx->num_q6_rules *
+ sizeof(struct ipa_ioc_ext_intf_prop);
+ ext_ioc_properties = kmalloc(pyld_sz, GFP_KERNEL);
+ if (!ext_ioc_properties) {
+ IPAWANERR("Error allocate memory\n");
+ return -ENOMEM;
+ }
+
+ ext_properties.prop = ext_ioc_properties;
+ ext_properties.excp_pipe_valid = true;
+ ext_properties.excp_pipe = IPA_CLIENT_APPS_WAN_CONS;
+ ext_properties.num_props = rmnet_ipa3_ctx->num_q6_rules;
+ for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
+ memcpy(&(ext_properties.prop[i]),
+ &(ipa3_qmi_ctx->q6_ul_filter_rule[i]),
+ sizeof(struct ipa_ioc_ext_intf_prop));
+ ext_properties.prop[i].mux_id =
+ rmnet_ipa3_ctx->mux_channel[index].mux_id;
+ IPAWANDBG("index %d ip: %d rt-tbl:%d\n", i,
+ ext_properties.prop[i].ip,
+ ext_properties.prop[i].rt_tbl_idx);
+ IPAWANDBG("action: %d mux:%d\n",
+ ext_properties.prop[i].action,
+ ext_properties.prop[i].mux_id);
+ }
+ ret = ipa3_register_intf_ext(rmnet_ipa3_ctx->mux_channel[index].
+ vchannel_name, &tx_properties,
+ &rx_properties, &ext_properties);
+ if (ret) {
+ IPAWANERR("[%s]:ipa3_register_intf failed %d\n",
+ rmnet_ipa3_ctx->mux_channel[index].vchannel_name, ret);
+ goto fail;
+ }
+ rmnet_ipa3_ctx->mux_channel[index].ul_flt_reg = true;
+fail:
+ kfree(ext_ioc_properties);
+ return ret;
+}
+
+static void ipa3_cleanup_deregister_intf(void)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < rmnet_ipa3_ctx->rmnet_index; i++) {
+ if (rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg) {
+ ret = ipa3_deregister_intf(
+ rmnet_ipa3_ctx->mux_channel[i].vchannel_name);
+ if (ret < 0) {
+ IPAWANERR("de-register device %s(%d) failed\n",
+ rmnet_ipa3_ctx->mux_channel[i].
+ vchannel_name,
+ i);
+ return;
+ }
+ IPAWANDBG("de-register device %s(%d) success\n",
+ rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
+ i);
+ }
+ rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg = false;
+ }
+}
+
+int ipa3_wwan_update_mux_channel_prop(void)
+{
+ int ret = 0, i;
+ /* install UL filter rules */
+ if (rmnet_ipa3_ctx->egress_set) {
+ if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false) {
+ IPAWANDBG("setup UL filter rules\n");
+ if (rmnet_ipa3_ctx->a7_ul_flt_set) {
+ IPAWANDBG("del previous UL filter rules\n");
+ /* delete rule hdlers */
+ ret = ipa3_wwan_del_ul_flt_rule_to_ipa();
+ if (ret) {
+ IPAWANERR("failed to del old rules\n");
+ return -EINVAL;
+ }
+ IPAWANDBG("deleted old UL rules\n");
+ }
+ ret = ipa3_wwan_add_ul_flt_rule_to_ipa();
+ }
+ if (ret)
+ IPAWANERR("failed to install UL rules\n");
+ else
+ rmnet_ipa3_ctx->a7_ul_flt_set = true;
+ }
+ /* update Tx/Rx/Ext property */
+ IPAWANDBG("update Tx/Rx/Ext property in IPA\n");
+ if (rmnet_ipa3_ctx->rmnet_index == 0) {
+ IPAWANDBG("no Tx/Rx/Ext property registered in IPA\n");
+ return ret;
+ }
+
+ ipa3_cleanup_deregister_intf();
+
+ for (i = 0; i < rmnet_ipa3_ctx->rmnet_index; i++) {
+ ret = ipa3_wwan_register_to_ipa(i);
+ if (ret < 0) {
+ IPAWANERR("failed to re-regist %s, mux %d, index %d\n",
+ rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
+ rmnet_ipa3_ctx->mux_channel[i].mux_id,
+ i);
+ return -ENODEV;
+ }
+ IPAWANERR("dev(%s) has registered to IPA\n",
+ rmnet_ipa3_ctx->mux_channel[i].vchannel_name);
+ rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg = true;
+ }
+ return ret;
+}
+
+#ifdef INIT_COMPLETION
+#define reinit_completion(x) INIT_COMPLETION(*(x))
+#endif /* INIT_COMPLETION */
+
+static int __ipa_wwan_open(struct net_device *dev)
+{
+ struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
+
+ IPAWANDBG("[%s] __wwan_open()\n", dev->name);
+ if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE)
+ reinit_completion(&wwan_ptr->resource_granted_completion);
+ wwan_ptr->device_status = WWAN_DEVICE_ACTIVE;
+
+ if (ipa3_rmnet_res.ipa_napi_enable)
+ napi_enable(&(wwan_ptr->napi));
+ return 0;
+}
+
+/**
+ * wwan_open() - Opens the wwan network interface. Opens logical
+ * channel on A2 MUX driver and starts the network stack queue
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * -ENODEV: Error while opening logical channel on A2 MUX driver
+ */
+static int ipa3_wwan_open(struct net_device *dev)
+{
+ int rc = 0;
+
+ IPAWANDBG("[%s] wwan_open()\n", dev->name);
+ rc = __ipa_wwan_open(dev);
+ if (rc == 0)
+ netif_start_queue(dev);
+ return rc;
+}
+
+static int __ipa_wwan_close(struct net_device *dev)
+{
+ struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
+ int rc = 0;
+
+ if (wwan_ptr->device_status == WWAN_DEVICE_ACTIVE) {
+ wwan_ptr->device_status = WWAN_DEVICE_INACTIVE;
+ /* do not close wwan port once up, this causes
+ * remote side to hang if tried to open again
+ */
+ reinit_completion(&wwan_ptr->resource_granted_completion);
+ rc = ipa3_deregister_intf(dev->name);
+ if (rc) {
+ IPAWANERR("[%s]: ipa3_deregister_intf failed %d\n",
+ dev->name, rc);
+ return rc;
+ }
+ return rc;
+ } else {
+ return -EBADF;
+ }
+}
+
+/**
+ * ipa3_wwan_stop() - Stops the wwan network interface. Closes
+ * logical channel on A2 MUX driver and stops the network stack
+ * queue
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * -ENODEV: Error while opening logical channel on A2 MUX driver
+ */
+static int ipa3_wwan_stop(struct net_device *dev)
+{
+ IPAWANDBG("[%s] ipa3_wwan_stop()\n", dev->name);
+ __ipa_wwan_close(dev);
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static int ipa3_wwan_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (0 > new_mtu || WWAN_DATA_LEN < new_mtu)
+ return -EINVAL;
+ IPAWANDBG("[%s] MTU change: old=%d new=%d\n",
+ dev->name, dev->mtu, new_mtu);
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+/**
+ * ipa3_wwan_xmit() - Transmits an skb.
+ *
+ * @skb: skb to be transmitted
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
+ * later
+ * -EFAULT: Error while transmitting the skb
+ */
+static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ int ret = 0;
+ bool qmap_check;
+ struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
+ struct ipa_tx_meta meta;
+
+ if (skb->protocol != htons(ETH_P_MAP)) {
+ IPAWANDBG_LOW
+ ("SW filtering out none QMAP packet received from %s",
+ current->comm);
+ return NETDEV_TX_OK;
+ }
+
+ qmap_check = RMNET_MAP_GET_CD_BIT(skb);
+ if (netif_queue_stopped(dev)) {
+ if (qmap_check &&
+ atomic_read(&wwan_ptr->outstanding_pkts) <
+ wwan_ptr->outstanding_high_ctl) {
+ pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name);
+ goto send;
+ } else {
+ pr_err("[%s]fatal: ipa_wwan_xmit stopped\n", dev->name);
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ /* checking High WM hit */
+ if (atomic_read(&wwan_ptr->outstanding_pkts) >=
+ wwan_ptr->outstanding_high) {
+ if (!qmap_check) {
+ IPAWANDBG_LOW("pending(%d)/(%d)- stop(%d)\n",
+ atomic_read(&wwan_ptr->outstanding_pkts),
+ wwan_ptr->outstanding_high,
+ netif_queue_stopped(dev));
+ IPAWANDBG_LOW("qmap_chk(%d)\n", qmap_check);
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+send:
+ /* IPA_RM checking start */
+ ret = ipa_rm_inactivity_timer_request_resource(
+ IPA_RM_RESOURCE_WWAN_0_PROD);
+ if (ret == -EINPROGRESS) {
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+ if (ret) {
+ pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
+ dev->name, ret);
+ return -EFAULT;
+ }
+ /* IPA_RM checking end */
+
+ if (RMNET_MAP_GET_CD_BIT(skb)) {
+ memset(&meta, 0, sizeof(meta));
+ meta.pkt_init_dst_ep_valid = true;
+ meta.pkt_init_dst_ep_remote = true;
+ ret = ipa3_tx_dp(IPA_CLIENT_Q6_LAN_CONS, skb, &meta);
+ } else {
+ ret = ipa3_tx_dp(IPA_CLIENT_APPS_LAN_WAN_PROD, skb, NULL);
+ }
+
+ if (ret) {
+ ret = NETDEV_TX_BUSY;
+ dev->stats.tx_dropped++;
+ goto out;
+ }
+
+ atomic_inc(&wwan_ptr->outstanding_pkts);
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ ret = NETDEV_TX_OK;
+out:
+ ipa_rm_inactivity_timer_release_resource(
+ IPA_RM_RESOURCE_WWAN_0_PROD);
+ return ret;
+}
+
+static void ipa3_wwan_tx_timeout(struct net_device *dev)
+{
+ IPAWANERR("[%s] ipa3_wwan_tx_timeout(), data stall in UL\n", dev->name);
+}
+
+/**
+ * apps_ipa_tx_complete_notify() - Rx notify
+ *
+ * @priv: driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * Check that the packet is the one we sent and release it
+ * This function will be called in defered context in IPA wq.
+ */
+static void apps_ipa_tx_complete_notify(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ struct sk_buff *skb = (struct sk_buff *)data;
+ struct net_device *dev = (struct net_device *)priv;
+ struct ipa3_wwan_private *wwan_ptr;
+
+ if (dev != IPA_NETDEV()) {
+ IPAWANDBG("Received pre-SSR packet completion\n");
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if (evt != IPA_WRITE_DONE) {
+ IPAWANERR("unsupported evt on Tx callback, Drop the packet\n");
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return;
+ }
+
+ wwan_ptr = netdev_priv(dev);
+ atomic_dec(&wwan_ptr->outstanding_pkts);
+ __netif_tx_lock_bh(netdev_get_tx_queue(dev, 0));
+ if (!atomic_read(&rmnet_ipa3_ctx->is_ssr) &&
+ netif_queue_stopped(wwan_ptr->net) &&
+ atomic_read(&wwan_ptr->outstanding_pkts) <
+ (wwan_ptr->outstanding_low)) {
+ IPAWANDBG_LOW("Outstanding low (%d) - waking up queue\n",
+ wwan_ptr->outstanding_low);
+ netif_wake_queue(wwan_ptr->net);
+ }
+ __netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0));
+ dev_kfree_skb_any(skb);
+ ipa_rm_inactivity_timer_release_resource(
+ IPA_RM_RESOURCE_WWAN_0_PROD);
+}
+
+/**
+ * apps_ipa_packet_receive_notify() - Rx notify
+ *
+ * @priv: driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * IPA will pass a packet to the Linux network stack with skb->data
+ */
+static void apps_ipa_packet_receive_notify(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)priv;
+
+ if (evt == IPA_RECEIVE) {
+ struct sk_buff *skb = (struct sk_buff *)data;
+ int result;
+ unsigned int packet_len = skb->len;
+
+ IPAWANDBG_LOW("Rx packet was received");
+ skb->dev = IPA_NETDEV();
+ skb->protocol = htons(ETH_P_MAP);
+
+ if (ipa3_rmnet_res.ipa_napi_enable) {
+ trace_rmnet_ipa_netif_rcv_skb3(dev->stats.rx_packets);
+ result = netif_receive_skb(skb);
+ } else {
+ if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH
+ == 0) {
+ trace_rmnet_ipa_netifni3(dev->stats.rx_packets);
+ result = netif_rx_ni(skb);
+ } else {
+ trace_rmnet_ipa_netifrx3(dev->stats.rx_packets);
+ result = netif_rx(skb);
+ }
+ }
+
+ if (result) {
+ pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n",
+ __func__, __LINE__);
+ dev->stats.rx_dropped++;
+ }
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += packet_len;
+ } else if (evt == IPA_CLIENT_START_POLL)
+ ipa3_rmnet_rx_cb(priv);
+ else if (evt == IPA_CLIENT_COMP_NAPI) {
+ if (ipa3_rmnet_res.ipa_napi_enable)
+ napi_complete(&(rmnet_ipa3_ctx->wwan_priv->napi));
+ } else
+ IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt);
+}
+
+/**
+ * ipa3_wwan_ioctl() - I/O control for wwan network driver.
+ *
+ * @dev: network device
+ * @ifr: ignored
+ * @cmd: cmd to be excecuded. can be one of the following:
+ * IPA_WWAN_IOCTL_OPEN - Open the network interface
+ * IPA_WWAN_IOCTL_CLOSE - Close the network interface
+ *
+ * Return codes:
+ * 0: success
+ * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
+ * later
+ * -EFAULT: Error while transmitting the skb
+ */
+static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ int rc = 0;
+ int mru = 1000, epid = 1, mux_index, len;
+ struct ipa_msg_meta msg_meta;
+ struct ipa_wan_msg *wan_msg = NULL;
+ struct rmnet_ioctl_extended_s extend_ioctl_data;
+ struct rmnet_ioctl_data_s ioctl_data;
+ struct ipa3_rmnet_mux_val *mux_channel;
+ int rmnet_index;
+
+ IPAWANDBG("rmnet_ipa got ioctl number 0x%08x", cmd);
+ switch (cmd) {
+ /* Set Ethernet protocol */
+ case RMNET_IOCTL_SET_LLP_ETHERNET:
+ break;
+ /* Set RAWIP protocol */
+ case RMNET_IOCTL_SET_LLP_IP:
+ break;
+ /* Get link protocol */
+ case RMNET_IOCTL_GET_LLP:
+ ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+ sizeof(struct rmnet_ioctl_data_s)))
+ rc = -EFAULT;
+ break;
+ /* Set QoS header enabled */
+ case RMNET_IOCTL_SET_QOS_ENABLE:
+ return -EINVAL;
+ /* Set QoS header disabled */
+ case RMNET_IOCTL_SET_QOS_DISABLE:
+ break;
+ /* Get QoS header state */
+ case RMNET_IOCTL_GET_QOS:
+ ioctl_data.u.operation_mode = RMNET_MODE_NONE;
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+ sizeof(struct rmnet_ioctl_data_s)))
+ rc = -EFAULT;
+ break;
+ /* Get operation mode */
+ case RMNET_IOCTL_GET_OPMODE:
+ ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+ sizeof(struct rmnet_ioctl_data_s)))
+ rc = -EFAULT;
+ break;
+ /* Open transport port */
+ case RMNET_IOCTL_OPEN:
+ break;
+ /* Close transport port */
+ case RMNET_IOCTL_CLOSE:
+ break;
+ /* Flow enable */
+ case RMNET_IOCTL_FLOW_ENABLE:
+ IPAWANDBG("Received flow enable\n");
+ if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
+ sizeof(struct rmnet_ioctl_data_s))) {
+ rc = -EFAULT;
+ break;
+ }
+ ipa3_flow_control(IPA_CLIENT_USB_PROD, true,
+ ioctl_data.u.tcm_handle);
+ break;
+ /* Flow disable */
+ case RMNET_IOCTL_FLOW_DISABLE:
+ IPAWANDBG("Received flow disable\n");
+ if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
+ sizeof(struct rmnet_ioctl_data_s))) {
+ rc = -EFAULT;
+ break;
+ }
+ ipa3_flow_control(IPA_CLIENT_USB_PROD, false,
+ ioctl_data.u.tcm_handle);
+ break;
+ /* Set flow handle */
+ case RMNET_IOCTL_FLOW_SET_HNDL:
+ break;
+
+ /* Extended IOCTLs */
+ case RMNET_IOCTL_EXTENDED:
+ IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n");
+ if (copy_from_user(&extend_ioctl_data,
+ (u8 *)ifr->ifr_ifru.ifru_data,
+ sizeof(struct rmnet_ioctl_extended_s))) {
+ IPAWANERR("failed to copy extended ioctl data\n");
+ rc = -EFAULT;
+ break;
+ }
+ switch (extend_ioctl_data.extended_ioctl) {
+ /* Get features */
+ case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
+ IPAWANDBG("get RMNET_IOCTL_GET_SUPPORTED_FEATURES\n");
+ extend_ioctl_data.u.data =
+ (RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL |
+ RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT |
+ RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT);
+ if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+ &extend_ioctl_data,
+ sizeof(struct rmnet_ioctl_extended_s)))
+ rc = -EFAULT;
+ break;
+ /* Set MRU */
+ case RMNET_IOCTL_SET_MRU:
+ mru = extend_ioctl_data.u.data;
+ IPAWANDBG("get MRU size %d\n",
+ extend_ioctl_data.u.data);
+ break;
+ /* Get MRU */
+ case RMNET_IOCTL_GET_MRU:
+ extend_ioctl_data.u.data = mru;
+ if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+ &extend_ioctl_data,
+ sizeof(struct rmnet_ioctl_extended_s)))
+ rc = -EFAULT;
+ break;
+ /* GET SG support */
+ case RMNET_IOCTL_GET_SG_SUPPORT:
+ extend_ioctl_data.u.data =
+ ipa3_rmnet_res.ipa_advertise_sg_support;
+ if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+ &extend_ioctl_data,
+ sizeof(struct rmnet_ioctl_extended_s)))
+ rc = -EFAULT;
+ break;
+ /* Get endpoint ID */
+ case RMNET_IOCTL_GET_EPID:
+ IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EPID\n");
+ extend_ioctl_data.u.data = epid;
+ if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+ &extend_ioctl_data,
+ sizeof(struct rmnet_ioctl_extended_s)))
+ rc = -EFAULT;
+ if (copy_from_user(&extend_ioctl_data,
+ (u8 *)ifr->ifr_ifru.ifru_data,
+ sizeof(struct rmnet_ioctl_extended_s))) {
+ IPAWANERR("copy extended ioctl data failed\n");
+ rc = -EFAULT;
+ break;
+ }
+ IPAWANDBG("RMNET_IOCTL_GET_EPID return %d\n",
+ extend_ioctl_data.u.data);
+ break;
+ /* Endpoint pair */
+ case RMNET_IOCTL_GET_EP_PAIR:
+ IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EP_PAIR\n");
+ extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num =
+ ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
+ extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num =
+ ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+ if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+ &extend_ioctl_data,
+ sizeof(struct rmnet_ioctl_extended_s)))
+ rc = -EFAULT;
+ if (copy_from_user(&extend_ioctl_data,
+ (u8 *)ifr->ifr_ifru.ifru_data,
+ sizeof(struct rmnet_ioctl_extended_s))) {
+ IPAWANERR("copy extended ioctl data failed\n");
+ rc = -EFAULT;
+ break;
+ }
+ IPAWANDBG("RMNET_IOCTL_GET_EP_PAIR c: %d p: %d\n",
+ extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num,
+ extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num);
+ break;
+ /* Get driver name */
+ case RMNET_IOCTL_GET_DRIVER_NAME:
+ memcpy(&extend_ioctl_data.u.if_name,
+ IPA_NETDEV()->name,
+ sizeof(IFNAMSIZ));
+ if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+ &extend_ioctl_data,
+ sizeof(struct rmnet_ioctl_extended_s)))
+ rc = -EFAULT;
+ break;
+ /* Add MUX ID */
+ case RMNET_IOCTL_ADD_MUX_CHANNEL:
+ mux_index = ipa3_find_mux_channel_index(
+ extend_ioctl_data.u.rmnet_mux_val.mux_id);
+ if (mux_index < MAX_NUM_OF_MUX_CHANNEL) {
+ IPAWANDBG("already setup mux(%d)\n",
+ extend_ioctl_data.u.
+ rmnet_mux_val.mux_id);
+ return rc;
+ }
+ if (rmnet_ipa3_ctx->rmnet_index
+ >= MAX_NUM_OF_MUX_CHANNEL) {
+ IPAWANERR("Exceed mux_channel limit(%d)\n",
+ rmnet_ipa3_ctx->rmnet_index);
+ return -EFAULT;
+ }
+ IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
+ extend_ioctl_data.u.rmnet_mux_val.mux_id,
+ extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
+ /* cache the mux name and id */
+ mux_channel = rmnet_ipa3_ctx->mux_channel;
+ rmnet_index = rmnet_ipa3_ctx->rmnet_index;
+
+ mux_channel[rmnet_index].mux_id =
+ extend_ioctl_data.u.rmnet_mux_val.mux_id;
+ memcpy(mux_channel[rmnet_index].vchannel_name,
+ extend_ioctl_data.u.rmnet_mux_val.vchannel_name,
+ sizeof(mux_channel[rmnet_index]
+ .vchannel_name));
+ IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n",
+ mux_channel[rmnet_index].vchannel_name,
+ mux_channel[rmnet_index].mux_id,
+ rmnet_index);
+ /* check if UL filter rules coming*/
+ if (rmnet_ipa3_ctx->num_q6_rules != 0) {
+ IPAWANERR("dev(%s) register to IPA\n",
+ extend_ioctl_data.u.rmnet_mux_val.
+ vchannel_name);
+ rc = ipa3_wwan_register_to_ipa(
+ rmnet_ipa3_ctx->rmnet_index);
+ if (rc < 0) {
+ IPAWANERR("device %s reg IPA failed\n",
+ extend_ioctl_data.u.
+ rmnet_mux_val.vchannel_name);
+ return -ENODEV;
+ }
+ mux_channel[rmnet_index].mux_channel_set = true;
+ mux_channel[rmnet_index].ul_flt_reg = true;
+ } else {
+ IPAWANDBG("dev(%s) haven't registered to IPA\n",
+ extend_ioctl_data.u.
+ rmnet_mux_val.vchannel_name);
+ mux_channel[rmnet_index].mux_channel_set = true;
+ mux_channel[rmnet_index].ul_flt_reg = false;
+ }
+ rmnet_ipa3_ctx->rmnet_index++;
+ break;
+ case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
+ IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n");
+ if ((extend_ioctl_data.u.data) &
+ RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM) {
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+ ipa_ep_cfg.hdr.hdr_len = 8;
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+ ipa_ep_cfg.cfg.cs_offload_en =
+ IPA_ENABLE_CS_OFFLOAD_UL;
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+ ipa_ep_cfg.cfg.cs_metadata_hdr_offset
+ = 1;
+ } else {
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+ ipa_ep_cfg.hdr.hdr_len = 4;
+ }
+ if ((extend_ioctl_data.u.data) &
+ RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION)
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+ ipa_ep_cfg.aggr.aggr_en =
+ IPA_ENABLE_AGGR;
+ else
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+ ipa_ep_cfg.aggr.aggr_en =
+ IPA_BYPASS_AGGR;
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.
+ hdr_ofst_metadata_valid = 1;
+ /* modem want offset at 0! */
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.
+ hdr_ofst_metadata = 0;
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.
+ dst = IPA_CLIENT_APPS_LAN_WAN_PROD;
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.
+ mode = IPA_BASIC;
+
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.client =
+ IPA_CLIENT_APPS_LAN_WAN_PROD;
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.notify =
+ apps_ipa_tx_complete_notify;
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.desc_fifo_sz =
+ IPA_SYS_TX_DATA_DESC_FIFO_SZ;
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.priv = dev;
+
+ rc = ipa3_setup_sys_pipe(
+ &rmnet_ipa3_ctx->apps_to_ipa_ep_cfg,
+ &rmnet_ipa3_ctx->apps_to_ipa3_hdl);
+ if (rc)
+ IPAWANERR("failed to config egress endpoint\n");
+
+ if (rmnet_ipa3_ctx->num_q6_rules != 0) {
+ /* already got Q6 UL filter rules*/
+ if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt
+ == false)
+ rc = ipa3_wwan_add_ul_flt_rule_to_ipa();
+ else
+ rc = 0;
+ rmnet_ipa3_ctx->egress_set = true;
+ if (rc)
+ IPAWANERR("install UL rules failed\n");
+ else
+ rmnet_ipa3_ctx->a7_ul_flt_set = true;
+ } else {
+ /* wait Q6 UL filter rules*/
+ rmnet_ipa3_ctx->egress_set = true;
+ IPAWANDBG("no UL-rules, egress_set(%d)\n",
+ rmnet_ipa3_ctx->egress_set);
+ }
+ break;
+ case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/* Set IDF */
+ IPAWANDBG("get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
+ if ((extend_ioctl_data.u.data) &
+ RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
+ ipa_ep_cfg.cfg.cs_offload_en =
+ IPA_ENABLE_CS_OFFLOAD_DL;
+
+ if ((extend_ioctl_data.u.data) &
+ RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
+ IPAWANERR("get AGG size %d count %d\n",
+ extend_ioctl_data.u.
+ ingress_format.agg_size,
+ extend_ioctl_data.u.
+ ingress_format.agg_count);
+ if (!ipa_disable_apps_wan_cons_deaggr(
+ extend_ioctl_data.u.
+ ingress_format.agg_size,
+ extend_ioctl_data.
+ u.ingress_format.agg_count)) {
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
+ ipa_ep_cfg.aggr.aggr_byte_limit =
+ extend_ioctl_data.u.ingress_format.
+ agg_size;
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
+ ipa_ep_cfg.aggr.aggr_pkt_limit =
+ extend_ioctl_data.u.ingress_format.
+ agg_count;
+ }
+ }
+
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
+ hdr_len = 4;
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
+ hdr_ofst_metadata_valid = 1;
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.
+ hdr.hdr_ofst_metadata = 1;
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
+ hdr_ofst_pkt_size_valid = 1;
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
+ hdr_ofst_pkt_size = 2;
+
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
+ hdr_total_len_or_pad_valid = true;
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
+ hdr_total_len_or_pad = 0;
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
+ hdr_payload_len_inc_padding = true;
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
+ hdr_total_len_or_pad_offset = 0;
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
+ hdr_little_endian = 0;
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.
+ metadata_mask.metadata_mask = 0xFF000000;
+
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.client =
+ IPA_CLIENT_APPS_WAN_CONS;
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.notify =
+ apps_ipa_packet_receive_notify;
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.priv = dev;
+
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.napi_enabled =
+ ipa3_rmnet_res.ipa_napi_enable;
+ if (rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.napi_enabled)
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
+ desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
+ else
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
+ desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+
+ mutex_lock(
+ &rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+ if (atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
+ IPAWANDBG("In SSR sequence/recovery\n");
+ mutex_unlock(&rmnet_ipa3_ctx->
+ ipa_to_apps_pipe_handle_guard);
+ rc = -EFAULT;
+ break;
+ }
+ rc = ipa3_setup_sys_pipe(
+ &rmnet_ipa3_ctx->ipa_to_apps_ep_cfg,
+ &rmnet_ipa3_ctx->ipa3_to_apps_hdl);
+ mutex_unlock(&rmnet_ipa3_ctx->
+ ipa_to_apps_pipe_handle_guard);
+ if (rc)
+ IPAWANERR("failed to configure ingress\n");
+ break;
+ case RMNET_IOCTL_SET_XLAT_DEV_INFO:
+ wan_msg = kzalloc(sizeof(struct ipa_wan_msg),
+ GFP_KERNEL);
+ if (!wan_msg) {
+ IPAWANERR("Failed to allocate memory.\n");
+ return -ENOMEM;
+ }
+ len = sizeof(wan_msg->upstream_ifname) >
+ sizeof(extend_ioctl_data.u.if_name) ?
+ sizeof(extend_ioctl_data.u.if_name) :
+ sizeof(wan_msg->upstream_ifname);
+ strlcpy(wan_msg->upstream_ifname,
+ extend_ioctl_data.u.if_name, len);
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ msg_meta.msg_type = WAN_XLAT_CONNECT;
+ msg_meta.msg_len = sizeof(struct ipa_wan_msg);
+ rc = ipa3_send_msg(&msg_meta, wan_msg,
+ ipa3_wwan_msg_free_cb);
+ if (rc) {
+ IPAWANERR("Failed to send XLAT_CONNECT msg\n");
+ kfree(wan_msg);
+ }
+ break;
+ /* Get agg count */
+ case RMNET_IOCTL_GET_AGGREGATION_COUNT:
+ break;
+ /* Set agg count */
+ case RMNET_IOCTL_SET_AGGREGATION_COUNT:
+ break;
+ /* Get agg size */
+ case RMNET_IOCTL_GET_AGGREGATION_SIZE:
+ break;
+ /* Set agg size */
+ case RMNET_IOCTL_SET_AGGREGATION_SIZE:
+ break;
+ /* Do flow control */
+ case RMNET_IOCTL_FLOW_CONTROL:
+ break;
+ /* For legacy use */
+ case RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL:
+ break;
+ /* Get HW/SW map */
+ case RMNET_IOCTL_GET_HWSW_MAP:
+ break;
+ /* Set RX Headroom */
+ case RMNET_IOCTL_SET_RX_HEADROOM:
+ break;
+ default:
+ IPAWANERR("[%s] unsupported extended cmd[%d]",
+ dev->name,
+ extend_ioctl_data.extended_ioctl);
+ rc = -EINVAL;
+ }
+ break;
+ default:
+ IPAWANERR("[%s] unsupported cmd[%d]",
+ dev->name, cmd);
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+static const struct net_device_ops ipa3_wwan_ops_ip = {
+ .ndo_open = ipa3_wwan_open,
+ .ndo_stop = ipa3_wwan_stop,
+ .ndo_start_xmit = ipa3_wwan_xmit,
+ .ndo_tx_timeout = ipa3_wwan_tx_timeout,
+ .ndo_do_ioctl = ipa3_wwan_ioctl,
+ .ndo_change_mtu = ipa3_wwan_change_mtu,
+ .ndo_set_mac_address = 0,
+ .ndo_validate_addr = 0,
+};
+
+/**
+ * wwan_setup() - Setups the wwan network driver.
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * None
+ */
+
+static void ipa3_wwan_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &ipa3_wwan_ops_ip;
+ ether_setup(dev);
+ /* set this after calling ether_setup */
+ dev->header_ops = 0; /* No header */
+ dev->type = ARPHRD_RAWIP;
+ dev->hard_header_len = 0;
+ dev->mtu = WWAN_DATA_LEN;
+ dev->addr_len = 0;
+ dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ dev->needed_headroom = HEADROOM_FOR_QMAP;
+ dev->needed_tailroom = TAILROOM;
+ dev->watchdog_timeo = 1000;
+}
+
+/* IPA_RM related functions start*/
+static void ipa3_q6_prod_rm_request_resource(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_q6_con_rm_request,
+ ipa3_q6_prod_rm_request_resource);
+static void ipa3_q6_prod_rm_release_resource(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_q6_con_rm_release,
+ ipa3_q6_prod_rm_release_resource);
+
+static void ipa3_q6_prod_rm_request_resource(struct work_struct *work)
+{
+ int ret = 0;
+
+ ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD);
+ if (ret < 0 && ret != -EINPROGRESS) {
+ IPAWANERR("%s: ipa_rm_request_resource failed %d\n", __func__,
+ ret);
+ return;
+ }
+}
+
+static int ipa3_q6_rm_request_resource(void)
+{
+ queue_delayed_work(rmnet_ipa3_ctx->rm_q6_wq,
+ &ipa3_q6_con_rm_request, 0);
+ return 0;
+}
+
+static void ipa3_q6_prod_rm_release_resource(struct work_struct *work)
+{
+ int ret = 0;
+
+ ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD);
+ if (ret < 0 && ret != -EINPROGRESS) {
+ IPAWANERR("%s: ipa_rm_release_resource failed %d\n", __func__,
+ ret);
+ return;
+ }
+}
+
+
+static int ipa3_q6_rm_release_resource(void)
+{
+ queue_delayed_work(rmnet_ipa3_ctx->rm_q6_wq,
+ &ipa3_q6_con_rm_release, 0);
+ return 0;
+}
+
+
+static void ipa3_q6_rm_notify_cb(void *user_data,
+ enum ipa_rm_event event,
+ unsigned long data)
+{
+ switch (event) {
+ case IPA_RM_RESOURCE_GRANTED:
+ IPAWANDBG_LOW("%s: Q6_PROD GRANTED CB\n", __func__);
+ break;
+ case IPA_RM_RESOURCE_RELEASED:
+ IPAWANDBG_LOW("%s: Q6_PROD RELEASED CB\n", __func__);
+ break;
+ default:
+ return;
+ }
+}
+static int ipa3_q6_initialize_rm(void)
+{
+ struct ipa_rm_create_params create_params;
+ struct ipa_rm_perf_profile profile;
+ int result;
+
+ /* Initialize IPA_RM workqueue */
+ rmnet_ipa3_ctx->rm_q6_wq = create_singlethread_workqueue("clnt_req");
+ if (!rmnet_ipa3_ctx->rm_q6_wq)
+ return -ENOMEM;
+
+ memset(&create_params, 0, sizeof(create_params));
+ create_params.name = IPA_RM_RESOURCE_Q6_PROD;
+ create_params.reg_params.notify_cb = &ipa3_q6_rm_notify_cb;
+ result = ipa_rm_create_resource(&create_params);
+ if (result)
+ goto create_rsrc_err1;
+ memset(&create_params, 0, sizeof(create_params));
+ create_params.name = IPA_RM_RESOURCE_Q6_CONS;
+ create_params.release_resource = &ipa3_q6_rm_release_resource;
+ create_params.request_resource = &ipa3_q6_rm_request_resource;
+ result = ipa_rm_create_resource(&create_params);
+ if (result)
+ goto create_rsrc_err2;
+ /* add dependency*/
+ result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_APPS_CONS);
+ if (result)
+ goto add_dpnd_err;
+ /* setup Performance profile */
+ memset(&profile, 0, sizeof(profile));
+ profile.max_supported_bandwidth_mbps = 100;
+ result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
+ &profile);
+ if (result)
+ goto set_perf_err;
+ result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS,
+ &profile);
+ if (result)
+ goto set_perf_err;
+ return result;
+
+set_perf_err:
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_APPS_CONS);
+add_dpnd_err:
+ result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
+ if (result < 0)
+ IPAWANERR("Error deleting resource %d, ret=%d\n",
+ IPA_RM_RESOURCE_Q6_CONS, result);
+create_rsrc_err2:
+ result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
+ if (result < 0)
+ IPAWANERR("Error deleting resource %d, ret=%d\n",
+ IPA_RM_RESOURCE_Q6_PROD, result);
+create_rsrc_err1:
+ destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
+ return result;
+}
+
+void ipa3_q6_deinitialize_rm(void)
+{
+ int ret;
+
+ ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_APPS_CONS);
+ if (ret < 0)
+ IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
+ IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS,
+ ret);
+ ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
+ if (ret < 0)
+ IPAWANERR("Error deleting resource %d, ret=%d\n",
+ IPA_RM_RESOURCE_Q6_CONS, ret);
+ ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
+ if (ret < 0)
+ IPAWANERR("Error deleting resource %d, ret=%d\n",
+ IPA_RM_RESOURCE_Q6_PROD, ret);
+ destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
+}
+
+static void ipa3_wake_tx_queue(struct work_struct *work)
+{
+ if (IPA_NETDEV()) {
+ __netif_tx_lock_bh(netdev_get_tx_queue(IPA_NETDEV(), 0));
+ netif_wake_queue(IPA_NETDEV());
+ __netif_tx_unlock_bh(netdev_get_tx_queue(IPA_NETDEV(), 0));
+ }
+}
+
+/**
+ * ipa3_rm_resource_granted() - Called upon
+ * IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped.
+ *
+ * @work: work object supplied ny workqueue
+ *
+ * Return codes:
+ * None
+ */
+static void ipa3_rm_resource_granted(void *dev)
+{
+ IPAWANDBG_LOW("Resource Granted - starting queue\n");
+ schedule_work(&ipa3_tx_wakequeue_work);
+}
+
+/**
+ * ipa3_rm_notify() - Callback function for RM events. Handles
+ * IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events.
+ * IPA_RM_RESOURCE_GRANTED is handled in the context of shared
+ * workqueue.
+ *
+ * @dev: network device
+ * @event: IPA RM event
+ * @data: Additional data provided by IPA RM
+ *
+ * Return codes:
+ * None
+ */
+static void ipa3_rm_notify(void *dev, enum ipa_rm_event event,
+ unsigned long data)
+{
+ struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
+
+ pr_debug("%s: event %d\n", __func__, event);
+ switch (event) {
+ case IPA_RM_RESOURCE_GRANTED:
+ if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) {
+ complete_all(&wwan_ptr->resource_granted_completion);
+ break;
+ }
+ ipa3_rm_resource_granted(dev);
+ break;
+ case IPA_RM_RESOURCE_RELEASED:
+ break;
+ default:
+ pr_err("%s: unknown event %d\n", __func__, event);
+ break;
+ }
+}
+
+/* IPA_RM related functions end*/
+
+static int ipa3_ssr_notifier_cb(struct notifier_block *this,
+ unsigned long code,
+ void *data);
+
+static struct notifier_block ipa3_ssr_notifier = {
+ .notifier_call = ipa3_ssr_notifier_cb,
+};
+
+static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
+ struct ipa3_rmnet_plat_drv_res *ipa_rmnet_drv_res)
+{
+ ipa_rmnet_drv_res->ipa_rmnet_ssr =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,rmnet-ipa-ssr");
+ pr_info("IPA SSR support = %s\n",
+ ipa_rmnet_drv_res->ipa_rmnet_ssr ? "True" : "False");
+ ipa_rmnet_drv_res->ipa_loaduC =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,ipa-loaduC");
+ pr_info("IPA ipa-loaduC = %s\n",
+ ipa_rmnet_drv_res->ipa_loaduC ? "True" : "False");
+
+ ipa_rmnet_drv_res->ipa_advertise_sg_support =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,ipa-advertise-sg-support");
+ pr_info("IPA SG support = %s\n",
+ ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False");
+ return 0;
+}
+
+struct ipa3_rmnet_context ipa3_rmnet_ctx;
+static int ipa3_wwan_probe(struct platform_device *pdev);
+struct platform_device *m_pdev;
+
+static void ipa3_delayed_probe(struct work_struct *work)
+{
+ (void)ipa3_wwan_probe(m_pdev);
+}
+
+static DECLARE_WORK(ipa3_scheduled_probe, ipa3_delayed_probe);
+
+static void ipa3_ready_cb(void *user_data)
+{
+ struct platform_device *pdev = (struct platform_device *)(user_data);
+
+ m_pdev = pdev;
+
+ IPAWANDBG("IPA ready callback has been triggered!\n");
+
+ schedule_work(&ipa3_scheduled_probe);
+}
+
+/**
+ * ipa3_wwan_probe() - Initialized the module and registers as a
+ * network interface to the network stack
+ *
+ * Note: In case IPA driver hasn't initialized already, the probe function
+ * will return immediately after registering a callback to be invoked when
+ * IPA driver initialization is complete.
+ *
+ * Return codes:
+ * 0: success
+ * -ENOMEM: No memory available
+ * -EFAULT: Internal error
+ */
+static int ipa3_wwan_probe(struct platform_device *pdev)
+{
+ int ret, i;
+ struct net_device *dev;
+ struct ipa_rm_create_params ipa_rm_params; /* IPA_RM */
+ struct ipa_rm_perf_profile profile; /* IPA_RM */
+
+ pr_info("rmnet_ipa3 started initialization\n");
+
+ if (!ipa3_is_ready()) {
+ IPAWANDBG("IPA driver not ready, registering callback\n");
+ ret = ipa_register_ipa_ready_cb(ipa3_ready_cb, (void *)pdev);
+
+ /*
+ * If we received -EEXIST, IPA has initialized. So we need
+ * to continue the probing process.
+ */
+ if (ret != -EEXIST) {
+ if (ret)
+ IPAWANERR("IPA CB reg failed - %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = get_ipa_rmnet_dts_configuration(pdev, &ipa3_rmnet_res);
+ ipa3_rmnet_ctx.ipa_rmnet_ssr = ipa3_rmnet_res.ipa_rmnet_ssr;
+
+ ret = ipa3_init_q6_smem();
+ if (ret) {
+ IPAWANERR("ipa3_init_q6_smem failed!\n");
+ return ret;
+ }
+
+ /* initialize tx/rx endpoint setup */
+ memset(&rmnet_ipa3_ctx->apps_to_ipa_ep_cfg, 0,
+ sizeof(struct ipa_sys_connect_params));
+ memset(&rmnet_ipa3_ctx->ipa_to_apps_ep_cfg, 0,
+ sizeof(struct ipa_sys_connect_params));
+
+ /* initialize ex property setup */
+ rmnet_ipa3_ctx->num_q6_rules = 0;
+ rmnet_ipa3_ctx->old_num_q6_rules = 0;
+ rmnet_ipa3_ctx->rmnet_index = 0;
+ rmnet_ipa3_ctx->egress_set = false;
+ rmnet_ipa3_ctx->a7_ul_flt_set = false;
+ for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++)
+ memset(&rmnet_ipa3_ctx->mux_channel[i], 0,
+ sizeof(struct ipa3_rmnet_mux_val));
+
+ /* start A7 QMI service/client */
+ if (ipa3_rmnet_res.ipa_loaduC)
+ /* Android platform loads uC */
+ ipa3_qmi_service_init(QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01);
+ else
+ /* LE platform not loads uC */
+ ipa3_qmi_service_init(QMI_IPA_PLATFORM_TYPE_LE_V01);
+
+ /* construct default WAN RT tbl for IPACM */
+ ret = ipa3_setup_a7_qmap_hdr();
+ if (ret)
+ goto setup_a7_qmap_hdr_err;
+ ret = ipa3_setup_dflt_wan_rt_tables();
+ if (ret)
+ goto setup_dflt_wan_rt_tables_err;
+
+ if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
+ /* Start transport-driver fd ioctl for ipacm for first init */
+ ret = ipa3_wan_ioctl_init();
+ if (ret)
+ goto wan_ioctl_init_err;
+ } else {
+ /* Enable sending QMI messages after SSR */
+ ipa3_wan_ioctl_enable_qmi_messages();
+ }
+
+ /* initialize wan-driver netdev */
+ dev = alloc_netdev(sizeof(struct ipa3_wwan_private),
+ IPA_WWAN_DEV_NAME,
+ NET_NAME_UNKNOWN,
+ ipa3_wwan_setup);
+ if (!dev) {
+ IPAWANERR("no memory for netdev\n");
+ ret = -ENOMEM;
+ goto alloc_netdev_err;
+ }
+ rmnet_ipa3_ctx->wwan_priv = netdev_priv(dev);
+ memset(rmnet_ipa3_ctx->wwan_priv, 0,
+ sizeof(*(rmnet_ipa3_ctx->wwan_priv)));
+ IPAWANDBG("wwan_ptr (private) = %p", rmnet_ipa3_ctx->wwan_priv);
+ rmnet_ipa3_ctx->wwan_priv->net = dev;
+ rmnet_ipa3_ctx->wwan_priv->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
+ rmnet_ipa3_ctx->wwan_priv->outstanding_low = DEFAULT_OUTSTANDING_LOW;
+ atomic_set(&rmnet_ipa3_ctx->wwan_priv->outstanding_pkts, 0);
+ spin_lock_init(&rmnet_ipa3_ctx->wwan_priv->lock);
+ init_completion(
+ &rmnet_ipa3_ctx->wwan_priv->resource_granted_completion);
+
+ if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
+ /* IPA_RM configuration starts */
+ ret = ipa3_q6_initialize_rm();
+ if (ret) {
+ IPAWANERR("%s: ipa3_q6_initialize_rm failed, ret: %d\n",
+ __func__, ret);
+ goto q6_init_err;
+ }
+ }
+
+ memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params));
+ ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD;
+ ipa_rm_params.reg_params.user_data = dev;
+ ipa_rm_params.reg_params.notify_cb = ipa3_rm_notify;
+ ret = ipa_rm_create_resource(&ipa_rm_params);
+ if (ret) {
+ pr_err("%s: unable to create resourse %d in IPA RM\n",
+ __func__, IPA_RM_RESOURCE_WWAN_0_PROD);
+ goto create_rsrc_err;
+ }
+ ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD,
+ IPA_RM_INACTIVITY_TIMER);
+ if (ret) {
+ pr_err("%s: ipa rm timer init failed %d on resourse %d\n",
+ __func__, ret, IPA_RM_RESOURCE_WWAN_0_PROD);
+ goto timer_init_err;
+ }
+ /* add dependency */
+ ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ if (ret)
+ goto add_dpnd_err;
+ /* setup Performance profile */
+ memset(&profile, 0, sizeof(profile));
+ profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
+ ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD,
+ &profile);
+ if (ret)
+ goto set_perf_err;
+ /* IPA_RM configuration ends */
+
+ /* Enable SG support in netdevice. */
+ if (ipa3_rmnet_res.ipa_advertise_sg_support)
+ dev->hw_features |= NETIF_F_SG;
+
+ if (ipa3_rmnet_res.ipa_napi_enable)
+ netif_napi_add(dev, &(rmnet_ipa3_ctx->wwan_priv->napi),
+ ipa3_rmnet_poll, NAPI_WEIGHT);
+ ret = register_netdev(dev);
+ if (ret) {
+ IPAWANERR("unable to register ipa_netdev %d rc=%d\n",
+ 0, ret);
+ goto set_perf_err;
+ }
+
+ IPAWANDBG("IPA-WWAN devices (%s) initialization ok :>>>>\n", dev->name);
+ if (ret) {
+ IPAWANERR("default configuration failed rc=%d\n",
+ ret);
+ goto config_err;
+ }
+ atomic_set(&rmnet_ipa3_ctx->is_initialized, 1);
+ if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
+ /* offline charging mode */
+ ipa3_proxy_clk_unvote();
+ }
+ atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
+
+ pr_info("rmnet_ipa completed initialization\n");
+ return 0;
+config_err:
+ if (ipa3_rmnet_res.ipa_napi_enable)
+ netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi));
+ unregister_netdev(dev);
+set_perf_err:
+ ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ if (ret)
+ IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
+ IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
+ ret);
+add_dpnd_err:
+ ret = ipa_rm_inactivity_timer_destroy(
+ IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */
+ if (ret)
+ IPAWANERR("Error ipa_rm_inactivity_timer_destroy %d, ret=%d\n",
+ IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+timer_init_err:
+ ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+ if (ret)
+ IPAWANERR("Error deleting resource %d, ret=%d\n",
+ IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+create_rsrc_err:
+ ipa3_q6_deinitialize_rm();
+q6_init_err:
+ free_netdev(dev);
+ rmnet_ipa3_ctx->wwan_priv = NULL;
+alloc_netdev_err:
+ ipa3_wan_ioctl_deinit();
+wan_ioctl_init_err:
+ ipa3_del_dflt_wan_rt_tables();
+setup_dflt_wan_rt_tables_err:
+ ipa3_del_a7_qmap_hdr();
+setup_a7_qmap_hdr_err:
+ ipa3_qmi_service_exit();
+ atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
+ return ret;
+}
+
+static int ipa3_wwan_remove(struct platform_device *pdev)
+{
+ int ret;
+
+ pr_info("rmnet_ipa started deinitialization\n");
+ mutex_lock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+ ret = ipa3_teardown_sys_pipe(rmnet_ipa3_ctx->ipa3_to_apps_hdl);
+ if (ret < 0)
+ IPAWANERR("Failed to teardown IPA->APPS pipe\n");
+ else
+ rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
+ if (ipa3_rmnet_res.ipa_napi_enable)
+ netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi));
+ mutex_unlock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+ unregister_netdev(IPA_NETDEV());
+ ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ if (ret < 0)
+ IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
+ IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
+ ret);
+ ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD);
+ if (ret < 0)
+ IPAWANERR(
+ "Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n",
+ IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+ ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+ if (ret < 0)
+ IPAWANERR("Error deleting resource %d, ret=%d\n",
+ IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+ cancel_work_sync(&ipa3_tx_wakequeue_work);
+ cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
+ if (IPA_NETDEV())
+ free_netdev(IPA_NETDEV());
+ rmnet_ipa3_ctx->wwan_priv = NULL;
+ /* No need to remove wwan_ioctl during SSR */
+ if (!atomic_read(&rmnet_ipa3_ctx->is_ssr))
+ ipa3_wan_ioctl_deinit();
+ ipa3_del_dflt_wan_rt_tables();
+ ipa3_del_a7_qmap_hdr();
+ ipa3_del_mux_qmap_hdrs();
+ if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false)
+ ipa3_wwan_del_ul_flt_rule_to_ipa();
+ ipa3_cleanup_deregister_intf();
+ atomic_set(&rmnet_ipa3_ctx->is_initialized, 0);
+ pr_info("rmnet_ipa completed deinitialization\n");
+ return 0;
+}
+
+/**
+* rmnet_ipa_ap_suspend() - suspend callback for runtime_pm
+* @dev: pointer to device
+*
+* This callback will be invoked by the runtime_pm framework when an AP suspend
+* operation is invoked, usually by pressing a suspend button.
+*
+* Returns -EAGAIN to runtime_pm framework in case there are pending packets
+* in the Tx queue. This will postpone the suspend operation until all the
+* pending packets will be transmitted.
+*
+* In case there are no packets to send, releases the WWAN0_PROD entity.
+* As an outcome, the number of IPA active clients should be decremented
+* until IPA clocks can be gated.
+*/
+static int rmnet_ipa_ap_suspend(struct device *dev)
+{
+ struct net_device *netdev = IPA_NETDEV();
+ struct ipa3_wwan_private *wwan_ptr;
+
+ IPAWANDBG_LOW("Enter...\n");
+ if (netdev == NULL) {
+ IPAWANERR("netdev is NULL.\n");
+ return 0;
+ }
+
+ wwan_ptr = netdev_priv(netdev);
+ if (wwan_ptr == NULL) {
+ IPAWANERR("wwan_ptr is NULL.\n");
+ return 0;
+ }
+
+ /* Do not allow A7 to suspend in case there are oustanding packets */
+ if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) {
+ IPAWANDBG("Outstanding packets, postponing AP suspend.\n");
+ return -EAGAIN;
+ }
+
+ /* Make sure that there is no Tx operation ongoing */
+ netif_tx_lock_bh(netdev);
+ ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+ netif_tx_unlock_bh(netdev);
+ IPAWANDBG_LOW("Exit\n");
+
+ return 0;
+}
+
+/**
+* rmnet_ipa_ap_resume() - resume callback for runtime_pm
+* @dev: pointer to device
+*
+* This callback will be invoked by the runtime_pm framework when an AP resume
+* operation is invoked.
+*
+* Enables the network interface queue and returns success to the
+* runtime_pm framework.
+*/
+static int rmnet_ipa_ap_resume(struct device *dev)
+{
+ struct net_device *netdev = IPA_NETDEV();
+
+ IPAWANDBG_LOW("Enter...\n");
+ if (netdev)
+ netif_wake_queue(netdev);
+ IPAWANDBG_LOW("Exit\n");
+
+ return 0;
+}
+
+static void ipa_stop_polling_stats(void)
+{
+ cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
+ ipa3_rmnet_ctx.polling_interval = 0;
+}
+
+static const struct of_device_id rmnet_ipa_dt_match[] = {
+ {.compatible = "qcom,rmnet-ipa3"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, rmnet_ipa_dt_match);
+
+static const struct dev_pm_ops rmnet_ipa_pm_ops = {
+ .suspend_noirq = rmnet_ipa_ap_suspend,
+ .resume_noirq = rmnet_ipa_ap_resume,
+};
+
+static struct platform_driver rmnet_ipa_driver = {
+ .driver = {
+ .name = "rmnet_ipa3",
+ .owner = THIS_MODULE,
+ .pm = &rmnet_ipa_pm_ops,
+ .of_match_table = rmnet_ipa_dt_match,
+ },
+ .probe = ipa3_wwan_probe,
+ .remove = ipa3_wwan_remove,
+};
+
+static int ipa3_ssr_notifier_cb(struct notifier_block *this,
+ unsigned long code,
+ void *data)
+{
+ if (!ipa3_rmnet_ctx.ipa_rmnet_ssr)
+ return NOTIFY_DONE;
+
+ switch (code) {
+ case SUBSYS_BEFORE_SHUTDOWN:
+ IPAWANINFO("IPA received MPSS BEFORE_SHUTDOWN\n");
+ atomic_set(&rmnet_ipa3_ctx->is_ssr, 1);
+ ipa3_q6_pre_shutdown_cleanup();
+ if (IPA_NETDEV())
+ netif_stop_queue(IPA_NETDEV());
+ ipa3_qmi_stop_workqueues();
+ ipa3_wan_ioctl_stop_qmi_messages();
+ ipa_stop_polling_stats();
+ if (atomic_read(&rmnet_ipa3_ctx->is_initialized))
+ platform_driver_unregister(&rmnet_ipa_driver);
+ IPAWANINFO("IPA BEFORE_SHUTDOWN handling is complete\n");
+ break;
+ case SUBSYS_AFTER_SHUTDOWN:
+ IPAWANINFO("IPA Received MPSS AFTER_SHUTDOWN\n");
+ if (atomic_read(&rmnet_ipa3_ctx->is_ssr))
+ ipa3_q6_post_shutdown_cleanup();
+ IPAWANINFO("IPA AFTER_SHUTDOWN handling is complete\n");
+ break;
+ case SUBSYS_BEFORE_POWERUP:
+ IPAWANINFO("IPA received MPSS BEFORE_POWERUP\n");
+ if (atomic_read(&rmnet_ipa3_ctx->is_ssr))
+ /* clean up cached QMI msg/handlers */
+ ipa3_qmi_service_exit();
+ /*hold a proxy vote for the modem*/
+ ipa3_proxy_clk_vote();
+ IPAWANINFO("IPA BEFORE_POWERUP handling is complete\n");
+ break;
+ case SUBSYS_AFTER_POWERUP:
+ IPAWANINFO("%s:%d IPA received MPSS AFTER_POWERUP\n",
+ __func__, __LINE__);
+ if (!atomic_read(&rmnet_ipa3_ctx->is_initialized) &&
+ atomic_read(&rmnet_ipa3_ctx->is_ssr))
+ platform_driver_register(&rmnet_ipa_driver);
+
+ IPAWANINFO("IPA AFTER_POWERUP handling is complete\n");
+ break;
+ default:
+ IPAWANDBG("Unsupported subsys notification, IPA received: %lu",
+ code);
+ break;
+ }
+
+ IPAWANDBG_LOW("Exit\n");
+ return NOTIFY_DONE;
+}
+
+/**
+ * rmnet_ipa_free_msg() - Free the msg sent to user space via ipa_send_msg
+ * @buff: pointer to buffer containing the message
+ * @len: message len
+ * @type: message type
+ *
+ * This function is invoked when ipa_send_msg is complete (Provided as a
+ * free function pointer along with the message).
+ */
+static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type)
+{
+ if (!buff) {
+ IPAWANERR("Null buffer\n");
+ return;
+ }
+
+ if (type != IPA_TETHERING_STATS_UPDATE_STATS &&
+ type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) {
+ IPAWANERR("Wrong type given. buff %p type %d\n",
+ buff, type);
+ }
+ kfree(buff);
+}
+
+/**
+ * rmnet_ipa_get_stats_and_update() - Gets pipe stats from Modem
+ *
+ * This function queries the IPA Modem driver for the pipe stats
+ * via QMI, and updates the user space IPA entity.
+ */
+static void rmnet_ipa_get_stats_and_update(void)
+{
+ struct ipa_get_data_stats_req_msg_v01 req;
+ struct ipa_get_data_stats_resp_msg_v01 *resp;
+ struct ipa_msg_meta msg_meta;
+ int rc;
+
+ resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
+ GFP_KERNEL);
+ if (!resp) {
+ IPAWANERR("Can't allocate memory for stats message\n");
+ return;
+ }
+
+ memset(&req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
+ memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
+
+ req.ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
+
+ rc = ipa3_qmi_get_data_stats(&req, resp);
+
+ if (!rc) {
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS;
+ msg_meta.msg_len =
+ sizeof(struct ipa_get_data_stats_resp_msg_v01);
+ rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
+ if (rc) {
+ IPAWANERR("ipa_send_msg failed: %d\n", rc);
+ kfree(resp);
+ return;
+ }
+ }
+}
+
+/**
+ * tethering_stats_poll_queue() - Stats polling function
+ * @work - Work entry
+ *
+ * This function is scheduled periodically (per the interval) in
+ * order to poll the IPA Modem driver for the pipe stats.
+ */
+static void tethering_stats_poll_queue(struct work_struct *work)
+{
+ rmnet_ipa_get_stats_and_update();
+
+ /* Schedule again only if there's an active polling interval */
+ if (ipa3_rmnet_ctx.polling_interval != 0)
+ schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work,
+ msecs_to_jiffies(ipa3_rmnet_ctx.polling_interval*1000));
+}
+
+/**
+ * rmnet_ipa_get_network_stats_and_update() - Get network stats from IPA Modem
+ *
+ * This function retrieves the data usage (used quota) from the IPA Modem driver
+ * via QMI, and updates IPA user space entity.
+ */
+static void rmnet_ipa_get_network_stats_and_update(void)
+{
+ struct ipa_get_apn_data_stats_req_msg_v01 req;
+ struct ipa_get_apn_data_stats_resp_msg_v01 *resp;
+ struct ipa_msg_meta msg_meta;
+ int rc;
+
+ resp = kzalloc(sizeof(struct ipa_get_apn_data_stats_resp_msg_v01),
+ GFP_KERNEL);
+ if (!resp) {
+ IPAWANERR("Can't allocate memory for network stats message\n");
+ return;
+ }
+
+ memset(&req, 0, sizeof(struct ipa_get_apn_data_stats_req_msg_v01));
+ memset(resp, 0, sizeof(struct ipa_get_apn_data_stats_resp_msg_v01));
+
+ req.mux_id_list_valid = true;
+ req.mux_id_list_len = 1;
+ req.mux_id_list[0] = ipa3_rmnet_ctx.metered_mux_id;
+
+ rc = ipa3_qmi_get_network_stats(&req, resp);
+
+ if (!rc) {
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS;
+ msg_meta.msg_len =
+ sizeof(struct ipa_get_apn_data_stats_resp_msg_v01);
+ rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
+ if (rc) {
+ IPAWANERR("ipa_send_msg failed: %d\n", rc);
+ kfree(resp);
+ return;
+ }
+ }
+}
+
+/**
+ * rmnet_ipa3_poll_tethering_stats() - Tethering stats polling IOCTL handler
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_POLL_TETHERING_STATS.
+ * In case polling interval received is 0, polling will stop
+ * (If there's a polling in progress, it will allow it to finish), and then will
+ * fetch network stats, and update the IPA user space.
+ *
+ * Return codes:
+ * 0: Success
+ */
+int rmnet_ipa3_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data)
+{
+ ipa3_rmnet_ctx.polling_interval = data->polling_interval_secs;
+
+ cancel_delayed_work_sync(&ipa_tether_stats_poll_wakequeue_work);
+
+ if (ipa3_rmnet_ctx.polling_interval == 0) {
+ ipa3_qmi_stop_data_qouta();
+ rmnet_ipa_get_network_stats_and_update();
+ rmnet_ipa_get_stats_and_update();
+ return 0;
+ }
+
+ schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work, 0);
+ return 0;
+}
+
+/**
+ * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SET_DATA_QUOTA.
+ * It translates the given interface name to the Modem MUX ID and
+ * sends the request of the quota to the IPA Modem driver via QMI.
+ *
+ * Return codes:
+ * 0: Success
+ * -EFAULT: Invalid interface name provided
+ * other: See ipa_qmi_set_data_quota
+ */
+int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data)
+{
+ u32 mux_id;
+ int index;
+ struct ipa_set_data_usage_quota_req_msg_v01 req;
+
+ index = find_vchannel_name_index(data->interface_name);
+ IPAWANERR("iface name %s, quota %lu\n",
+ data->interface_name,
+ (unsigned long int) data->quota_mbytes);
+
+ if (index == MAX_NUM_OF_MUX_CHANNEL) {
+ IPAWANERR("%s is an invalid iface name\n",
+ data->interface_name);
+ return -EFAULT;
+ }
+
+ mux_id = rmnet_ipa3_ctx->mux_channel[index].mux_id;
+ ipa3_rmnet_ctx.metered_mux_id = mux_id;
+
+ memset(&req, 0, sizeof(struct ipa_set_data_usage_quota_req_msg_v01));
+ req.apn_quota_list_valid = true;
+ req.apn_quota_list_len = 1;
+ req.apn_quota_list[0].mux_id = mux_id;
+ req.apn_quota_list[0].num_Mbytes = data->quota_mbytes;
+
+ return ipa3_qmi_set_data_quota(&req);
+}
+
+ /* rmnet_ipa_set_tether_client_pipe() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SET_DATA_QUOTA.
+ * It translates the given interface name to the Modem MUX ID and
+ * sends the request of the quota to the IPA Modem driver via QMI.
+ *
+ * Return codes:
+ * 0: Success
+ * -EFAULT: Invalid interface name provided
+ * other: See ipa_qmi_set_data_quota
+ */
+int rmnet_ipa3_set_tether_client_pipe(
+ struct wan_ioctl_set_tether_client_pipe *data)
+{
+ int number, i;
+
+ IPAWANDBG("client %d, UL %d, DL %d, reset %d\n",
+ data->ipa_client,
+ data->ul_src_pipe_len,
+ data->dl_dst_pipe_len,
+ data->reset_client);
+ number = data->ul_src_pipe_len;
+ for (i = 0; i < number; i++) {
+ IPAWANDBG("UL index-%d pipe %d\n", i,
+ data->ul_src_pipe_list[i]);
+ if (data->reset_client)
+ ipa3_set_client(data->ul_src_pipe_list[i],
+ 0, false);
+ else
+ ipa3_set_client(data->ul_src_pipe_list[i],
+ data->ipa_client, true);
+ }
+ number = data->dl_dst_pipe_len;
+ for (i = 0; i < number; i++) {
+ IPAWANDBG("DL index-%d pipe %d\n", i,
+ data->dl_dst_pipe_list[i]);
+ if (data->reset_client)
+ ipa3_set_client(data->dl_dst_pipe_list[i],
+ 0, false);
+ else
+ ipa3_set_client(data->dl_dst_pipe_list[i],
+ data->ipa_client, false);
+ }
+ return 0;
+}
+
+int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
+ bool reset)
+{
+ struct ipa_get_data_stats_req_msg_v01 *req;
+ struct ipa_get_data_stats_resp_msg_v01 *resp;
+ int pipe_len, rc;
+
+ req = kzalloc(sizeof(struct ipa_get_data_stats_req_msg_v01),
+ GFP_KERNEL);
+ if (!req) {
+ IPAWANERR("Can't allocate memory for stats message\n");
+ return -ENOMEM;
+ }
+ resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
+ GFP_KERNEL);
+ if (!resp) {
+ IPAWANERR("Can't allocate memory for stats message\n");
+ kfree(req);
+ return -ENOMEM;
+ }
+ memset(req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
+ memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
+
+ req->ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
+ if (reset) {
+ req->reset_stats_valid = true;
+ req->reset_stats = true;
+ IPAWANERR("reset the pipe stats\n");
+ } else {
+ /* print tethered-client enum */
+ IPAWANDBG_LOW("Tethered-client enum(%d)\n", data->ipa_client);
+ }
+
+ rc = ipa3_qmi_get_data_stats(req, resp);
+ if (rc) {
+ IPAWANERR("can't get ipa_qmi_get_data_stats\n");
+ kfree(req);
+ kfree(resp);
+ return rc;
+ } else if (reset) {
+ kfree(req);
+ kfree(resp);
+ return 0;
+ }
+
+ if (resp->dl_dst_pipe_stats_list_valid) {
+ for (pipe_len = 0; pipe_len < resp->dl_dst_pipe_stats_list_len;
+ pipe_len++) {
+ IPAWANDBG_LOW("Check entry(%d) dl_dst_pipe(%d)\n",
+ pipe_len, resp->dl_dst_pipe_stats_list
+ [pipe_len].pipe_index);
+ IPAWANDBG_LOW("dl_p_v4(%lu)v6(%lu)\n",
+ (unsigned long int) resp->
+ dl_dst_pipe_stats_list[pipe_len].
+ num_ipv4_packets,
+ (unsigned long int) resp->
+ dl_dst_pipe_stats_list[pipe_len].
+ num_ipv6_packets);
+ IPAWANDBG_LOW("dl_b_v4(%lu)v6(%lu)\n",
+ (unsigned long int) resp->
+ dl_dst_pipe_stats_list[pipe_len].
+ num_ipv4_bytes,
+ (unsigned long int) resp->
+ dl_dst_pipe_stats_list[pipe_len].
+ num_ipv6_bytes);
+ if (ipa_get_client_uplink(resp->
+ dl_dst_pipe_stats_list[pipe_len].
+ pipe_index) == false) {
+ if (data->ipa_client == ipa_get_client(resp->
+ dl_dst_pipe_stats_list[pipe_len].
+ pipe_index)) {
+ /* update the DL stats */
+ data->ipv4_rx_packets += resp->
+ dl_dst_pipe_stats_list[pipe_len].
+ num_ipv4_packets;
+ data->ipv6_rx_packets += resp->
+ dl_dst_pipe_stats_list[pipe_len].
+ num_ipv6_packets;
+ data->ipv4_rx_bytes += resp->
+ dl_dst_pipe_stats_list[pipe_len].
+ num_ipv4_bytes;
+ data->ipv6_rx_bytes += resp->
+ dl_dst_pipe_stats_list[pipe_len].
+ num_ipv6_bytes;
+ }
+ }
+ }
+ }
+ IPAWANDBG_LOW("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
+ (unsigned long int) data->ipv4_rx_packets,
+ (unsigned long int) data->ipv6_rx_packets,
+ (unsigned long int) data->ipv4_rx_bytes,
+ (unsigned long int) data->ipv6_rx_bytes);
+
+ if (resp->ul_src_pipe_stats_list_valid) {
+ for (pipe_len = 0; pipe_len < resp->ul_src_pipe_stats_list_len;
+ pipe_len++) {
+ IPAWANDBG_LOW("Check entry(%d) ul_dst_pipe(%d)\n",
+ pipe_len,
+ resp->ul_src_pipe_stats_list[pipe_len].
+ pipe_index);
+ IPAWANDBG_LOW("ul_p_v4(%lu)v6(%lu)\n",
+ (unsigned long int) resp->
+ ul_src_pipe_stats_list[pipe_len].
+ num_ipv4_packets,
+ (unsigned long int) resp->
+ ul_src_pipe_stats_list[pipe_len].
+ num_ipv6_packets);
+ IPAWANDBG_LOW("ul_b_v4(%lu)v6(%lu)\n",
+ (unsigned long int) resp->
+ ul_src_pipe_stats_list[pipe_len].
+ num_ipv4_bytes,
+ (unsigned long int) resp->
+ ul_src_pipe_stats_list[pipe_len].
+ num_ipv6_bytes);
+ if (ipa_get_client_uplink(resp->
+ ul_src_pipe_stats_list[pipe_len].
+ pipe_index) == true) {
+ if (data->ipa_client == ipa_get_client(resp->
+ ul_src_pipe_stats_list[pipe_len].
+ pipe_index)) {
+ /* update the DL stats */
+ data->ipv4_tx_packets += resp->
+ ul_src_pipe_stats_list[pipe_len].
+ num_ipv4_packets;
+ data->ipv6_tx_packets += resp->
+ ul_src_pipe_stats_list[pipe_len].
+ num_ipv6_packets;
+ data->ipv4_tx_bytes += resp->
+ ul_src_pipe_stats_list[pipe_len].
+ num_ipv4_bytes;
+ data->ipv6_tx_bytes += resp->
+ ul_src_pipe_stats_list[pipe_len].
+ num_ipv6_bytes;
+ }
+ }
+ }
+ }
+ IPAWANDBG_LOW("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
+ (unsigned long int) data->ipv4_tx_packets,
+ (unsigned long int) data->ipv6_tx_packets,
+ (unsigned long int) data->ipv4_tx_bytes,
+ (unsigned long int) data->ipv6_tx_bytes);
+ kfree(req);
+ kfree(resp);
+ return 0;
+}
+
+/**
+ * ipa3_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota
+ * @mux_id - The MUX ID on which the quota has been reached
+ *
+ * This function broadcasts a Netlink event using the kobject of the
+ * rmnet_ipa interface in order to alert the user space that the quota
+ * on the specific interface which matches the mux_id has been reached.
+ *
+ */
+void ipa3_broadcast_quota_reach_ind(u32 mux_id)
+{
+ char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE];
+ char iface_name_m[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
+ char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
+ char *envp[IPA_UEVENT_NUM_EVNP] = {
+ alert_msg, iface_name_l, iface_name_m, NULL };
+ int res;
+ int index;
+
+ index = ipa3_find_mux_channel_index(mux_id);
+
+ if (index == MAX_NUM_OF_MUX_CHANNEL) {
+ IPAWANERR("%u is an mux ID\n", mux_id);
+ return;
+ }
+
+ res = snprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE,
+ "ALERT_NAME=%s", "quotaReachedAlert");
+ if (res >= IPA_QUOTA_REACH_ALERT_MAX_SIZE) {
+ IPAWANERR("message too long (%d)", res);
+ return;
+ }
+ /* posting msg for L-release for CNE */
+ res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
+ "UPSTREAM=%s", rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
+ if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
+ IPAWANERR("message too long (%d)", res);
+ return;
+ }
+ /* posting msg for M-release for CNE */
+ res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
+ "INTERFACE=%s", rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
+ if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
+ IPAWANERR("message too long (%d)", res);
+ return;
+ }
+
+ IPAWANERR("putting nlmsg: <%s> <%s> <%s>\n",
+ alert_msg, iface_name_l, iface_name_m);
+ kobject_uevent_env(&(IPA_NETDEV()->dev.kobj),
+ KOBJ_CHANGE, envp);
+}
+
+/**
+ * ipa3_q6_handshake_complete() - Perform operations once Q6 is up
+ * @ssr_bootup - Indicates whether this is a cold boot-up or post-SSR.
+ *
+ * This function is invoked once the handshake between the IPA AP driver
+ * and IPA Q6 driver is complete. At this point, it is possible to perform
+ * operations which can't be performed until IPA Q6 driver is up.
+ *
+ */
+void ipa3_q6_handshake_complete(bool ssr_bootup)
+{
+ /* It is required to recover the network stats after SSR recovery */
+ if (ssr_bootup) {
+ /*
+ * In case the uC is required to be loaded by the Modem,
+ * the proxy vote will be removed only when uC loading is
+ * complete and indication is received by the AP. After SSR,
+ * uC is already loaded. Therefore, proxy vote can be removed
+ * once Modem init is complete.
+ */
+ ipa3_proxy_clk_unvote();
+
+ /*
+ * It is required to recover the network stats after
+ * SSR recovery
+ */
+ rmnet_ipa_get_network_stats_and_update();
+ }
+}
+
+static int __init ipa3_wwan_init(void)
+{
+ rmnet_ipa3_ctx = kzalloc(sizeof(*rmnet_ipa3_ctx), GFP_KERNEL);
+ if (!rmnet_ipa3_ctx) {
+ IPAWANERR("no memory\n");
+ return -ENOMEM;
+ }
+
+ atomic_set(&rmnet_ipa3_ctx->is_initialized, 0);
+ atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
+
+ mutex_init(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+ rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
+ /* Register for Modem SSR */
+ rmnet_ipa3_ctx->subsys_notify_handle = subsys_notif_register_notifier(
+ SUBSYS_MODEM,
+ &ipa3_ssr_notifier);
+ if (!IS_ERR(rmnet_ipa3_ctx->subsys_notify_handle))
+ return platform_driver_register(&rmnet_ipa_driver);
+ else
+ return (int)PTR_ERR(rmnet_ipa3_ctx->subsys_notify_handle);
+}
+
+static void __exit ipa3_wwan_cleanup(void)
+{
+ int ret;
+
+ mutex_destroy(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+ ret = subsys_notif_unregister_notifier(
+ rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier);
+ if (ret)
+ IPAWANERR(
+ "Error subsys_notif_unregister_notifier system %s, ret=%d\n",
+ SUBSYS_MODEM, ret);
+ platform_driver_unregister(&rmnet_ipa_driver);
+ kfree(rmnet_ipa3_ctx);
+ rmnet_ipa3_ctx = NULL;
+}
+
+static void ipa3_wwan_msg_free_cb(void *buff, u32 len, u32 type)
+{
+ if (!buff)
+ IPAWANERR("Null buffer.\n");
+ kfree(buff);
+}
+
+static void ipa3_rmnet_rx_cb(void *priv)
+{
+ IPAWANDBG_LOW("\n");
+ napi_schedule(&(rmnet_ipa3_ctx->wwan_priv->napi));
+}
+
+static int ipa3_rmnet_poll(struct napi_struct *napi, int budget)
+{
+ int rcvd_pkts = 0;
+
+ rcvd_pkts = ipa_rx_poll(rmnet_ipa3_ctx->ipa3_to_apps_hdl,
+ NAPI_WEIGHT);
+ IPAWANDBG_LOW("rcvd packets: %d\n", rcvd_pkts);
+ return rcvd_pkts;
+}
+
+late_initcall(ipa3_wwan_init);
+module_exit(ipa3_wwan_cleanup);
+MODULE_DESCRIPTION("WWAN Network Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
new file mode 100644
index 0000000..80b07ab
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -0,0 +1,391 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/rmnet_ipa_fd_ioctl.h>
+#include "ipa_qmi_service.h"
+
+#define DRIVER_NAME "wwan_ioctl"
+
+#ifdef CONFIG_COMPAT
+#define WAN_IOC_ADD_FLT_RULE32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_ADD_FLT_RULE, \
+ compat_uptr_t)
+#define WAN_IOC_ADD_FLT_RULE_INDEX32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_ADD_FLT_INDEX, \
+ compat_uptr_t)
+#define WAN_IOC_POLL_TETHERING_STATS32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_POLL_TETHERING_STATS, \
+ compat_uptr_t)
+#define WAN_IOC_SET_DATA_QUOTA32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_SET_DATA_QUOTA, \
+ compat_uptr_t)
+#define WAN_IOC_SET_TETHER_CLIENT_PIPE32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_SET_TETHER_CLIENT_PIPE, \
+ compat_uptr_t)
+#define WAN_IOC_QUERY_TETHER_STATS32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_QUERY_TETHER_STATS, \
+ compat_uptr_t)
+#define WAN_IOC_RESET_TETHER_STATS32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_RESET_TETHER_STATS, \
+ compat_uptr_t)
+#define WAN_IOC_QUERY_DL_FILTER_STATS32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_QUERY_DL_FILTER_STATS, \
+ compat_uptr_t)
+#endif
+
+static unsigned int dev_num = 1;
+static struct cdev ipa3_wan_ioctl_cdev;
+static unsigned int ipa3_process_ioctl = 1;
+static struct class *class;
+static dev_t device;
+
+static long ipa3_wan_ioctl(struct file *filp,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ int retval = 0;
+ u32 pyld_sz;
+ u8 *param = NULL;
+
+ IPAWANDBG("device %s got ioctl events :>>>\n",
+ DRIVER_NAME);
+
+ if (!ipa3_process_ioctl) {
+ IPAWANDBG("modem is in SSR, ignoring ioctl\n");
+ return -EAGAIN;
+ }
+
+ switch (cmd) {
+ case WAN_IOC_ADD_FLT_RULE:
+ IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE :>>>\n",
+ DRIVER_NAME);
+ pyld_sz = sizeof(struct ipa_install_fltr_rule_req_msg_v01);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_qmi_filter_request_send(
+ (struct ipa_install_fltr_rule_req_msg_v01 *)param)) {
+ IPAWANDBG("IPACM->Q6 add filter rule failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case WAN_IOC_ADD_FLT_RULE_INDEX:
+ IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE_INDEX :>>>\n",
+ DRIVER_NAME);
+ pyld_sz = sizeof(struct ipa_fltr_installed_notif_req_msg_v01);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_qmi_filter_notify_send(
+ (struct ipa_fltr_installed_notif_req_msg_v01 *)param)) {
+ IPAWANDBG("IPACM->Q6 rule index fail\n");
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case WAN_IOC_VOTE_FOR_BW_MBPS:
+ IPAWANDBG("device %s got WAN_IOC_VOTE_FOR_BW_MBPS :>>>\n",
+ DRIVER_NAME);
+ pyld_sz = sizeof(uint32_t);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_vote_for_bus_bw((uint32_t *)param)) {
+ IPAWANERR("Failed to vote for bus BW\n");
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case WAN_IOC_POLL_TETHERING_STATS:
+ IPAWANDBG_LOW("got WAN_IOCTL_POLL_TETHERING_STATS :>>>\n");
+ pyld_sz = sizeof(struct wan_ioctl_poll_tethering_stats);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (rmnet_ipa3_poll_tethering_stats(
+ (struct wan_ioctl_poll_tethering_stats *)param)) {
+ IPAWANERR("WAN_IOCTL_POLL_TETHERING_STATS failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case WAN_IOC_SET_DATA_QUOTA:
+ IPAWANDBG_LOW("got WAN_IOCTL_SET_DATA_QUOTA :>>>\n");
+ pyld_sz = sizeof(struct wan_ioctl_set_data_quota);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (rmnet_ipa3_set_data_quota(
+ (struct wan_ioctl_set_data_quota *)param)) {
+ IPAWANERR("WAN_IOC_SET_DATA_QUOTA failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case WAN_IOC_SET_TETHER_CLIENT_PIPE:
+ IPAWANDBG_LOW("got WAN_IOC_SET_TETHER_CLIENT_PIPE :>>>\n");
+ pyld_sz = sizeof(struct wan_ioctl_set_tether_client_pipe);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (rmnet_ipa3_set_tether_client_pipe(
+ (struct wan_ioctl_set_tether_client_pipe *)param)) {
+ IPAWANERR("WAN_IOC_SET_TETHER_CLIENT_PIPE failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case WAN_IOC_QUERY_TETHER_STATS:
+ IPAWANDBG_LOW("got WAN_IOC_QUERY_TETHER_STATS :>>>\n");
+ pyld_sz = sizeof(struct wan_ioctl_query_tether_stats);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (rmnet_ipa3_query_tethering_stats(
+ (struct wan_ioctl_query_tether_stats *)param, false)) {
+ IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n");
+ retval = -EFAULT;
+ break;
+ }
+
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case WAN_IOC_RESET_TETHER_STATS:
+ IPAWANDBG_LOW("device %s got WAN_IOC_RESET_TETHER_STATS :>>>\n",
+ DRIVER_NAME);
+ pyld_sz = sizeof(struct wan_ioctl_reset_tether_stats);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (rmnet_ipa3_query_tethering_stats(NULL, true)) {
+ IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ default:
+ retval = -ENOTTY;
+ }
+ kfree(param);
+ return retval;
+}
+
+#ifdef CONFIG_COMPAT
+long ipa3_compat_wan_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case WAN_IOC_ADD_FLT_RULE32:
+ cmd = WAN_IOC_ADD_FLT_RULE;
+ break;
+ case WAN_IOC_ADD_FLT_RULE_INDEX32:
+ cmd = WAN_IOC_ADD_FLT_RULE_INDEX;
+ break;
+ case WAN_IOC_POLL_TETHERING_STATS32:
+ cmd = WAN_IOC_POLL_TETHERING_STATS;
+ break;
+ case WAN_IOC_SET_DATA_QUOTA32:
+ cmd = WAN_IOC_SET_DATA_QUOTA;
+ break;
+ case WAN_IOC_SET_TETHER_CLIENT_PIPE32:
+ cmd = WAN_IOC_SET_TETHER_CLIENT_PIPE;
+ break;
+ case WAN_IOC_QUERY_TETHER_STATS32:
+ cmd = WAN_IOC_QUERY_TETHER_STATS;
+ break;
+ case WAN_IOC_RESET_TETHER_STATS32:
+ cmd = WAN_IOC_RESET_TETHER_STATS;
+ break;
+ case WAN_IOC_QUERY_DL_FILTER_STATS32:
+ cmd = WAN_IOC_QUERY_DL_FILTER_STATS;
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return ipa3_wan_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
+static int ipa3_wan_ioctl_open(struct inode *inode, struct file *filp)
+{
+ IPAWANDBG("\n IPA A7 ipa3_wan_ioctl open OK :>>>> ");
+ return 0;
+}
+
+const struct file_operations rmnet_ipa3_fops = {
+ .owner = THIS_MODULE,
+ .open = ipa3_wan_ioctl_open,
+ .read = NULL,
+ .unlocked_ioctl = ipa3_wan_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ipa3_compat_wan_ioctl,
+#endif
+};
+
+int ipa3_wan_ioctl_init(void)
+{
+ unsigned int wan_ioctl_major = 0;
+ int ret;
+ struct device *dev;
+
+ device = MKDEV(wan_ioctl_major, 0);
+
+ ret = alloc_chrdev_region(&device, 0, dev_num, DRIVER_NAME);
+ if (ret) {
+ IPAWANERR(":device_alloc err.\n");
+ goto dev_alloc_err;
+ }
+ wan_ioctl_major = MAJOR(device);
+
+ class = class_create(THIS_MODULE, DRIVER_NAME);
+ if (IS_ERR(class)) {
+ IPAWANERR(":class_create err.\n");
+ goto class_err;
+ }
+
+ dev = device_create(class, NULL, device,
+ NULL, DRIVER_NAME);
+ if (IS_ERR(dev)) {
+ IPAWANERR(":device_create err.\n");
+ goto device_err;
+ }
+
+ cdev_init(&ipa3_wan_ioctl_cdev, &rmnet_ipa3_fops);
+ ret = cdev_add(&ipa3_wan_ioctl_cdev, device, dev_num);
+ if (ret) {
+ IPAWANERR(":cdev_add err.\n");
+ goto cdev_add_err;
+ }
+
+ ipa3_process_ioctl = 1;
+
+ IPAWANDBG("IPA %s major(%d) initial ok :>>>>\n",
+ DRIVER_NAME, wan_ioctl_major);
+ return 0;
+
+cdev_add_err:
+ device_destroy(class, device);
+device_err:
+ class_destroy(class);
+class_err:
+ unregister_chrdev_region(device, dev_num);
+dev_alloc_err:
+ return -ENODEV;
+}
+
+void ipa3_wan_ioctl_stop_qmi_messages(void)
+{
+ ipa3_process_ioctl = 0;
+}
+
+void ipa3_wan_ioctl_enable_qmi_messages(void)
+{
+ ipa3_process_ioctl = 1;
+}
+
+void ipa3_wan_ioctl_deinit(void)
+{
+ cdev_del(&ipa3_wan_ioctl_cdev);
+ device_destroy(class, device);
+ class_destroy(class);
+ unregister_chrdev_region(device, dev_num);
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
new file mode 100644
index 0000000..3ed3e44
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
@@ -0,0 +1,253 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msm_ipa.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/ipa.h>
+#include <linux/netdevice.h>
+#include "ipa_i.h"
+
+#define TETH_BRIDGE_DRV_NAME "ipa_tethering_bridge"
+
+#define TETH_DBG(fmt, args...) \
+ pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args)
+#define TETH_DBG_FUNC_ENTRY() \
+ pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d ENTRY\n", __func__, __LINE__)
+#define TETH_DBG_FUNC_EXIT() \
+ pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d EXIT\n", __func__, __LINE__)
+#define TETH_ERR(fmt, args...) \
+ pr_err(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+
+/**
+ * struct ipa3_teth_bridge_ctx - Tethering bridge driver context information
+ * @class: kernel class pointer
+ * @dev_num: kernel device number
+ * @dev: kernel device struct pointer
+ * @cdev: kernel character device struct
+ */
+struct ipa3_teth_bridge_ctx {
+ struct class *class;
+ dev_t dev_num;
+ struct device *dev;
+ struct cdev cdev;
+};
+static struct ipa3_teth_bridge_ctx *ipa3_teth_ctx;
+
+/**
+* teth_bridge_ipa_cb() - Callback to handle IPA data path events
+* @priv - private data
+* @evt - event type
+* @data - event specific data (usually skb)
+*
+* This callback is called by IPA driver for exception packets from USB.
+* All exception packets are handled by Q6 and should not reach this function.
+* Packets will arrive to AP exception pipe only in case where packets are
+* sent from USB before Q6 has setup the call.
+*/
+static void teth_bridge_ipa_cb(void *priv, enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ struct sk_buff *skb = (struct sk_buff *)data;
+
+ TETH_DBG_FUNC_ENTRY();
+ if (evt != IPA_RECEIVE) {
+ TETH_ERR("unexpected event %d\n", evt);
+ WARN_ON(1);
+ return;
+ }
+
+ TETH_ERR("Unexpected exception packet from USB, dropping packet\n");
+ dev_kfree_skb_any(skb);
+ TETH_DBG_FUNC_EXIT();
+}
+
+/**
+* ipa3_teth_bridge_init() - Initialize the Tethering bridge driver
+* @params - in/out params for USB initialization API (please look at struct
+* definition for more info)
+*
+* USB driver gets a pointer to a callback function (usb_notify_cb) and an
+* associated data. USB driver installs this callback function in the call to
+* ipa3_connect().
+*
+* Builds IPA resource manager dependency graph.
+*
+* Return codes: 0: success,
+* -EINVAL - Bad parameter
+* Other negative value - Failure
+*/
+int ipa3_teth_bridge_init(struct teth_bridge_init_params *params)
+{
+ TETH_DBG_FUNC_ENTRY();
+
+ if (!params) {
+ TETH_ERR("Bad parameter\n");
+ TETH_DBG_FUNC_EXIT();
+ return -EINVAL;
+ }
+
+ params->usb_notify_cb = teth_bridge_ipa_cb;
+ params->private_data = NULL;
+ params->skip_ep_cfg = true;
+
+ TETH_DBG_FUNC_EXIT();
+ return 0;
+}
+
+/**
+* ipa3_teth_bridge_disconnect() - Disconnect tethering bridge module
+*/
+int ipa3_teth_bridge_disconnect(enum ipa_client_type client)
+{
+ TETH_DBG_FUNC_ENTRY();
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_USB_CONS);
+ TETH_DBG_FUNC_EXIT();
+
+ return 0;
+}
+
+/**
+* ipa3_teth_bridge_connect() - Connect bridge for a tethered Rmnet / MBIM call
+* @connect_params: Connection info
+*
+* Return codes: 0: success
+* -EINVAL: invalid parameters
+* -EPERM: Operation not permitted as the bridge is already
+* connected
+*/
+int ipa3_teth_bridge_connect(struct teth_bridge_connect_params *connect_params)
+{
+ int res = 0;
+
+ TETH_DBG_FUNC_ENTRY();
+
+ /* Build the dependency graph, first add_dependency call is sync
+ * in order to make sure the IPA clocks are up before we continue
+ * and notify the USB driver it may continue.
+ */
+ res = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ if (res < 0) {
+ TETH_ERR("ipa_rm_add_dependency() failed.\n");
+ goto bail;
+ }
+
+ /* this add_dependency call can't be sync since it will block until USB
+ * status is connected (which can happen only after the tethering
+ * bridge is connected), the clocks are already up so the call doesn't
+ * need to block.
+ */
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_USB_CONS);
+ if (res < 0 && res != -EINPROGRESS) {
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ TETH_ERR("ipa_rm_add_dependency() failed.\n");
+ goto bail;
+ }
+
+ res = 0;
+
+bail:
+ TETH_DBG_FUNC_EXIT();
+ return res;
+}
+
+static long ipa3_teth_bridge_ioctl(struct file *filp,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ IPAERR("No ioctls are supported!\n");
+ return -ENOIOCTLCMD;
+}
+
+static const struct file_operations ipa3_teth_bridge_drv_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = ipa3_teth_bridge_ioctl,
+};
+
+/**
+* ipa3_teth_bridge_driver_init() - Initialize tethering bridge driver
+*
+*/
+int ipa3_teth_bridge_driver_init(void)
+{
+ int res;
+
+ TETH_DBG("Tethering bridge driver init\n");
+ ipa3_teth_ctx = kzalloc(sizeof(*ipa3_teth_ctx), GFP_KERNEL);
+ if (!ipa3_teth_ctx) {
+ TETH_ERR("kzalloc err.\n");
+ return -ENOMEM;
+ }
+
+ ipa3_teth_ctx->class = class_create(THIS_MODULE, TETH_BRIDGE_DRV_NAME);
+
+ res = alloc_chrdev_region(&ipa3_teth_ctx->dev_num, 0, 1,
+ TETH_BRIDGE_DRV_NAME);
+ if (res) {
+ TETH_ERR("alloc_chrdev_region err.\n");
+ res = -ENODEV;
+ goto fail_alloc_chrdev_region;
+ }
+
+ ipa3_teth_ctx->dev = device_create(ipa3_teth_ctx->class,
+ NULL,
+ ipa3_teth_ctx->dev_num,
+ ipa3_teth_ctx,
+ TETH_BRIDGE_DRV_NAME);
+ if (IS_ERR(ipa3_teth_ctx->dev)) {
+ TETH_ERR(":device_create err.\n");
+ res = -ENODEV;
+ goto fail_device_create;
+ }
+
+ cdev_init(&ipa3_teth_ctx->cdev, &ipa3_teth_bridge_drv_fops);
+ ipa3_teth_ctx->cdev.owner = THIS_MODULE;
+ ipa3_teth_ctx->cdev.ops = &ipa3_teth_bridge_drv_fops;
+
+ res = cdev_add(&ipa3_teth_ctx->cdev, ipa3_teth_ctx->dev_num, 1);
+ if (res) {
+ TETH_ERR(":cdev_add err=%d\n", -res);
+ res = -ENODEV;
+ goto fail_cdev_add;
+ }
+ TETH_DBG("Tethering bridge driver init OK\n");
+
+ return 0;
+fail_cdev_add:
+ device_destroy(ipa3_teth_ctx->class, ipa3_teth_ctx->dev_num);
+fail_device_create:
+ unregister_chrdev_region(ipa3_teth_ctx->dev_num, 1);
+fail_alloc_chrdev_region:
+ kfree(ipa3_teth_ctx);
+ ipa3_teth_ctx = NULL;
+
+ return res;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Tethering bridge driver");
diff --git a/drivers/platform/msm/ipa/test/Makefile b/drivers/platform/msm/ipa/test/Makefile
new file mode 100644
index 0000000..e1686e6
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_IPA_UT) += ipa_ut_mod.o
+ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o
diff --git a/drivers/platform/msm/ipa/test/ipa_test_example.c b/drivers/platform/msm/ipa/test/ipa_test_example.c
new file mode 100644
index 0000000..0313375
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/ipa_test_example.c
@@ -0,0 +1,99 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_ut_framework.h"
+
+/**
+ * Example IPA Unit-test suite
+ * To be a reference for writing new suites and tests.
+ * This suite is also used as unit-test for the testing framework itself.
+ * Structure:
+ * 1- Define the setup and teardown functions
+ * Not Mandatory. Null may be used as well
+ * 2- For each test, define its Run() function
+ * 3- Use IPA_UT_DEFINE_SUITE_START() to start defining the suite
+ * 4- use IPA_UT_ADD_TEST() for adding tests within
+ * the suite definition block
+ * 5- IPA_UT_DEFINE_SUITE_END() close the suite definition
+ */
+
+static int ipa_test_example_dummy;
+
+static int ipa_test_example_suite_setup(void **ppriv)
+{
+ IPA_UT_DBG("Start Setup - set 0x1234F\n");
+
+ ipa_test_example_dummy = 0x1234F;
+ *ppriv = (void *)&ipa_test_example_dummy;
+
+ return 0;
+}
+
+static int ipa_test_example_teardown(void *priv)
+{
+ IPA_UT_DBG("Start Teardown\n");
+ IPA_UT_DBG("priv=0x%p - value=0x%x\n", priv, *((int *)priv));
+
+ return 0;
+}
+
+static int ipa_test_example_test1(void *priv)
+{
+ IPA_UT_LOG("priv=0x%p - value=0x%x\n", priv, *((int *)priv));
+ ipa_test_example_dummy++;
+
+ return 0;
+}
+
+static int ipa_test_example_test2(void *priv)
+{
+ IPA_UT_LOG("priv=0x%p - value=0x%x\n", priv, *((int *)priv));
+ ipa_test_example_dummy++;
+
+ return 0;
+}
+
+static int ipa_test_example_test3(void *priv)
+{
+ IPA_UT_LOG("priv=0x%p - value=0x%x\n", priv, *((int *)priv));
+ ipa_test_example_dummy++;
+
+ return 0;
+}
+
+static int ipa_test_example_test4(void *priv)
+{
+ IPA_UT_LOG("priv=0x%p - value=0x%x\n", priv, *((int *)priv));
+ ipa_test_example_dummy++;
+
+ IPA_UT_TEST_FAIL_REPORT("failed on test");
+
+ return -EFAULT;
+}
+
+/* Suite definition block */
+IPA_UT_DEFINE_SUITE_START(example, "Example suite",
+ ipa_test_example_suite_setup, ipa_test_example_teardown)
+{
+ IPA_UT_ADD_TEST(test1, "This is test number 1",
+ ipa_test_example_test1, false, IPA_HW_v1_0, IPA_HW_MAX),
+
+ IPA_UT_ADD_TEST(test2, "This is test number 2",
+ ipa_test_example_test2, false, IPA_HW_v1_0, IPA_HW_MAX),
+
+ IPA_UT_ADD_TEST(test3, "This is test number 3",
+ ipa_test_example_test3, false, IPA_HW_v1_1, IPA_HW_v2_6),
+
+ IPA_UT_ADD_TEST(test4, "This is test number 4",
+ ipa_test_example_test4, false, IPA_HW_v1_1, IPA_HW_MAX),
+
+} IPA_UT_DEFINE_SUITE_END(example);
diff --git a/drivers/platform/msm/ipa/test/ipa_test_mhi.c b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
new file mode 100644
index 0000000..5a41d64
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
@@ -0,0 +1,3306 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/ipa_mhi.h>
+#include <linux/ipa.h>
+#include "../ipa_v3/ipa_i.h"
+#include "../../gsi/gsi.h"
+#include "../../gsi/gsi_reg.h"
+#include "ipa_ut_framework.h"
+
+#define IPA_MHI_TEST_NUM_CHANNELS 8
+#define IPA_MHI_TEST_NUM_EVENT_RINGS 8
+#define IPA_MHI_TEST_FIRST_CHANNEL_ID 100
+#define IPA_MHI_TEST_FIRST_EVENT_RING_ID 100
+#define IPA_MHI_TEST_LAST_CHANNEL_ID \
+ (IPA_MHI_TEST_FIRST_CHANNEL_ID + IPA_MHI_TEST_NUM_CHANNELS - 1)
+#define IPA_MHI_TEST_LAST_EVENT_RING_ID \
+ (IPA_MHI_TEST_FIRST_EVENT_RING_ID + IPA_MHI_TEST_NUM_EVENT_RINGS - 1)
+#define IPA_MHI_TEST_MAX_DATA_BUF_SIZE 1500
+#define IPA_MHI_TEST_SEQ_TYPE_DMA 0x00000000
+
+#define IPA_MHI_TEST_LOOP_NUM 5
+#define IPA_MHI_RUN_TEST_UNIT_IN_LOOP(test_unit, rc, args...) \
+ do { \
+ int __i; \
+ for (__i = 0; __i < IPA_MHI_TEST_LOOP_NUM; __i++) { \
+ IPA_UT_LOG(#test_unit " START iter %d\n", __i); \
+ rc = test_unit(args); \
+ if (!rc) \
+ continue; \
+ IPA_UT_LOG(#test_unit " failed %d\n", rc); \
+ break; \
+ } \
+ } while (0)
+
+/**
+ * check for MSI interrupt for one or both channels:
+ * OUT channel MSI my be missed as it
+ * will be overwritten by the IN channel MSI
+ */
+#define IPA_MHI_TEST_CHECK_MSI_INTR(__both, __timeout) \
+ do { \
+ int i; \
+ for (i = 0; i < 20; i++) { \
+ if (*((u32 *)test_mhi_ctx->msi.base) == \
+ (0x10000000 | \
+ (IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1))) { \
+ __timeout = false; \
+ break; \
+ } \
+ if (__both && (*((u32 *)test_mhi_ctx->msi.base) == \
+ (0x10000000 | \
+ (IPA_MHI_TEST_FIRST_EVENT_RING_ID)))) { \
+ /* sleep to be sure IN MSI is generated */ \
+ msleep(20); \
+ __timeout = false; \
+ break; \
+ } \
+ msleep(20); \
+ } \
+ } while (0)
+
+static DECLARE_COMPLETION(mhi_test_ready_comp);
+static DECLARE_COMPLETION(mhi_test_wakeup_comp);
+
+/**
+ * enum ipa_mhi_ring_elements_type - MHI ring elements types.
+ */
+enum ipa_mhi_ring_elements_type {
+ IPA_MHI_RING_ELEMENT_NO_OP = 1,
+ IPA_MHI_RING_ELEMENT_TRANSFER = 2
+};
+
+/**
+ * enum ipa_mhi_channel_direction - MHI channel directions
+ */
+enum ipa_mhi_channel_direction {
+ IPA_MHI_OUT_CHAHNNEL = 1,
+ IPA_MHI_IN_CHAHNNEL = 2,
+};
+
+/**
+ * struct ipa_mhi_channel_context_array - MHI Channel context array entry
+ *
+ * mapping is taken from MHI spec
+ */
+struct ipa_mhi_channel_context_array {
+ u32 chstate:8; /*0-7*/
+ u32 brsmode:2; /*8-9*/
+ u32 pollcfg:6; /*10-15*/
+ u32 reserved:16; /*16-31*/
+ u32 chtype; /*channel type (inbound/outbound)*/
+ u32 erindex; /*event ring index*/
+ u64 rbase; /*ring base address in the host addr spc*/
+ u64 rlen; /*ring length in bytes*/
+ u64 rp; /*read pointer in the host system addr spc*/
+ u64 wp; /*write pointer in the host system addr spc*/
+} __packed;
+
+/**
+ * struct ipa_mhi_event_context_array - MGI event ring context array entry
+ *
+ * mapping is taken from MHI spec
+ */
+struct ipa_mhi_event_context_array {
+ u16 intmodc;
+ u16 intmodt;/* Interrupt moderation timer (in microseconds) */
+ u32 ertype;
+ u32 msivec; /* MSI vector for interrupt (MSI data)*/
+ u64 rbase; /* ring base address in host address space*/
+ u64 rlen; /* ring length in bytes*/
+ u64 rp; /* read pointer in the host system address space*/
+ u64 wp; /* write pointer in the host system address space*/
+} __packed;
+
+/**
+ *
+ * struct ipa_mhi_mmio_register_set - MHI configuration registers,
+ * control registers, status registers, pointers to doorbell arrays,
+ * pointers to channel and event context arrays.
+ *
+ * The structure is defined in mhi spec (register names are taken from there).
+ * Only values accessed by HWP or test are documented
+ */
+struct ipa_mhi_mmio_register_set {
+ u32 mhireglen;
+ u32 reserved_08_04;
+ u32 mhiver;
+ u32 reserved_10_0c;
+ struct mhicfg {
+ u8 nch;
+ u8 reserved_15_8;
+ u8 ner;
+ u8 reserved_31_23;
+ } __packed mhicfg;
+
+ u32 reserved_18_14;
+ u32 chdboff;
+ u32 reserved_20_1C;
+ u32 erdboff;
+ u32 reserved_28_24;
+ u32 bhioff;
+ u32 reserved_30_2C;
+ u32 debugoff;
+ u32 reserved_38_34;
+
+ struct mhictrl {
+ u32 rs : 1;
+ u32 reset : 1;
+ u32 reserved_7_2 : 6;
+ u32 mhistate : 8;
+ u32 reserved_31_16 : 16;
+ } __packed mhictrl;
+
+ u64 reserved_40_3c;
+ u32 reserved_44_40;
+
+ struct mhistatus {
+ u32 ready : 1;
+ u32 reserved_3_2 : 1;
+ u32 syserr : 1;
+ u32 reserved_7_3 : 5;
+ u32 mhistate : 8;
+ u32 reserved_31_16 : 16;
+ } __packed mhistatus;
+
+ /**
+ * Register is not accessed by HWP.
+ * In test register carries the handle for
+ * the buffer of channel context array
+ */
+ u32 reserved_50_4c;
+
+ u32 mhierror;
+
+ /**
+ * Register is not accessed by HWP.
+ * In test register carries the handle for
+ * the buffer of event ring context array
+ */
+ u32 reserved_58_54;
+
+ /**
+ * 64-bit pointer to the channel context array in the host memory space
+ * host sets the pointer to the channel context array during
+ * initialization.
+ */
+ u64 ccabap;
+ /**
+ * 64-bit pointer to the event context array in the host memory space
+ * host sets the pointer to the event context array during
+ * initialization
+ */
+ u64 ecabap;
+ /**
+ * Register is not accessed by HWP.
+ * In test register carries the pointer of virtual address
+ * for the buffer of channel context array
+ */
+ u64 crcbap;
+ /**
+ * Register is not accessed by HWP.
+ * In test register carries the pointer of virtual address
+ * for the buffer of event ring context array
+ */
+ u64 crdb;
+
+ u64 reserved_80_78;
+
+ struct mhiaddr {
+ /**
+ * Base address (64-bit) of the memory region in
+ * the host address space where the MHI control
+ * data structures are allocated by the host,
+ * including channel context array, event context array,
+ * and rings.
+ * The device uses this information to set up its internal
+ * address translation tables.
+ * value must be aligned to 4 Kbytes.
+ */
+ u64 mhicrtlbase;
+ /**
+ * Upper limit address (64-bit) of the memory region in
+ * the host address space where the MHI control
+ * data structures are allocated by the host.
+ * The device uses this information to setup its internal
+ * address translation tables.
+ * The most significant 32 bits of MHICTRLBASE and
+ * MHICTRLLIMIT registers must be equal.
+ */
+ u64 mhictrllimit;
+ u64 reserved_18_10;
+ /**
+ * Base address (64-bit) of the memory region in
+ * the host address space where the MHI data buffers
+ * are allocated by the host.
+ * The device uses this information to setup its
+ * internal address translation tables.
+ * value must be aligned to 4 Kbytes.
+ */
+ u64 mhidatabase;
+ /**
+ * Upper limit address (64-bit) of the memory region in
+ * the host address space where the MHI data buffers
+ * are allocated by the host.
+ * The device uses this information to setup its
+ * internal address translation tables.
+ * The most significant 32 bits of MHIDATABASE and
+ * MHIDATALIMIT registers must be equal.
+ */
+ u64 mhidatalimit;
+ u64 reserved_30_28;
+ } __packed mhiaddr;
+
+} __packed;
+
+/**
+ * struct ipa_mhi_event_ring_element - MHI Event ring element
+ *
+ * mapping is taken from MHI spec
+ */
+struct ipa_mhi_event_ring_element {
+ /**
+ * pointer to ring element that generated event in
+ * the host system address space
+ */
+ u64 ptr;
+ union {
+ struct {
+ u32 len : 24;
+ u32 code : 8;
+ } __packed bits;
+ u32 dword;
+ } __packed dword_8;
+ u16 reserved;
+ u8 type;
+ u8 chid;
+} __packed;
+
+/**
+* struct ipa_mhi_transfer_ring_element - MHI Transfer ring element
+*
+* mapping is taken from MHI spec
+*/
+struct ipa_mhi_transfer_ring_element {
+ u64 ptr; /*pointer to buffer in the host system address space*/
+ u16 len; /*transaction length in bytes*/
+ u16 reserved0;
+ union {
+ struct {
+ u16 chain : 1;
+ u16 reserved_7_1 : 7;
+ u16 ieob : 1;
+ u16 ieot : 1;
+ u16 bei : 1;
+ u16 reserved_15_11 : 5;
+ } __packed bits;
+ u16 word;
+ } __packed word_C;
+ u8 type;
+ u8 reserved1;
+} __packed;
+
+/**
+ * struct ipa_test_mhi_context - MHI test context
+ */
+struct ipa_test_mhi_context {
+ void __iomem *gsi_mmio;
+ struct ipa_mem_buffer msi;
+ struct ipa_mem_buffer ch_ctx_array;
+ struct ipa_mem_buffer ev_ctx_array;
+ struct ipa_mem_buffer mmio_buf;
+ struct ipa_mem_buffer xfer_ring_bufs[IPA_MHI_TEST_NUM_CHANNELS];
+ struct ipa_mem_buffer ev_ring_bufs[IPA_MHI_TEST_NUM_EVENT_RINGS];
+ struct ipa_mem_buffer in_buffer;
+ struct ipa_mem_buffer out_buffer;
+ u32 prod_hdl;
+ u32 cons_hdl;
+};
+
+static struct ipa_test_mhi_context *test_mhi_ctx;
+
+static void ipa_mhi_test_cb(void *priv,
+ enum ipa_mhi_event_type event, unsigned long data)
+{
+ IPA_UT_DBG("Entry\n");
+
+ if (event == IPA_MHI_EVENT_DATA_AVAILABLE)
+ complete_all(&mhi_test_wakeup_comp);
+ else if (event == IPA_MHI_EVENT_READY)
+ complete_all(&mhi_test_ready_comp);
+ else
+ WARN_ON(1);
+}
+
+static void ipa_test_mhi_free_mmio_space(void)
+{
+ IPA_UT_DBG("Entry\n");
+
+ if (!test_mhi_ctx)
+ return;
+
+ dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->mmio_buf.size,
+ test_mhi_ctx->mmio_buf.base,
+ test_mhi_ctx->mmio_buf.phys_base);
+
+ dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->ev_ctx_array.size,
+ test_mhi_ctx->ev_ctx_array.base,
+ test_mhi_ctx->ev_ctx_array.phys_base);
+
+ dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->ch_ctx_array.size,
+ test_mhi_ctx->ch_ctx_array.base,
+ test_mhi_ctx->ch_ctx_array.phys_base);
+
+ dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->msi.size,
+ test_mhi_ctx->msi.base, test_mhi_ctx->msi.phys_base);
+}
+
+static int ipa_test_mhi_alloc_mmio_space(void)
+{
+ int rc = 0;
+ struct ipa_mem_buffer *msi;
+ struct ipa_mem_buffer *ch_ctx_array;
+ struct ipa_mem_buffer *ev_ctx_array;
+ struct ipa_mem_buffer *mmio_buf;
+ struct ipa_mhi_mmio_register_set *p_mmio;
+
+ IPA_UT_DBG("Entry\n");
+
+ msi = &test_mhi_ctx->msi;
+ ch_ctx_array = &test_mhi_ctx->ch_ctx_array;
+ ev_ctx_array = &test_mhi_ctx->ev_ctx_array;
+ mmio_buf = &test_mhi_ctx->mmio_buf;
+
+ /* Allocate MSI */
+ msi->size = 4;
+ msi->base = dma_alloc_coherent(ipa3_ctx->pdev, msi->size,
+ &msi->phys_base, GFP_KERNEL);
+ if (!msi->base) {
+ IPA_UT_ERR("no mem for msi\n");
+ return -ENOMEM;
+ }
+
+ IPA_UT_DBG("msi: base 0x%pK phys_addr 0x%pad size %d\n",
+ msi->base, &msi->phys_base, msi->size);
+
+ /* allocate buffer for channel context */
+ ch_ctx_array->size = sizeof(struct ipa_mhi_channel_context_array) *
+ IPA_MHI_TEST_NUM_CHANNELS;
+ ch_ctx_array->base = dma_alloc_coherent(ipa3_ctx->pdev,
+ ch_ctx_array->size, &ch_ctx_array->phys_base, GFP_KERNEL);
+ if (!ch_ctx_array->base) {
+ IPA_UT_ERR("no mem for ch ctx array\n");
+ rc = -ENOMEM;
+ goto fail_free_msi;
+ }
+ IPA_UT_DBG("channel ctx array: base 0x%pK phys_addr %pad size %d\n",
+ ch_ctx_array->base, &ch_ctx_array->phys_base,
+ ch_ctx_array->size);
+
+ /* allocate buffer for event context */
+ ev_ctx_array->size = sizeof(struct ipa_mhi_event_context_array) *
+ IPA_MHI_TEST_NUM_EVENT_RINGS;
+ ev_ctx_array->base = dma_alloc_coherent(ipa3_ctx->pdev,
+ ev_ctx_array->size, &ev_ctx_array->phys_base, GFP_KERNEL);
+ if (!ev_ctx_array->base) {
+ IPA_UT_ERR("no mem for ev ctx array\n");
+ rc = -ENOMEM;
+ goto fail_free_ch_ctx_arr;
+ }
+ IPA_UT_DBG("event ctx array: base 0x%pK phys_addr %pad size %d\n",
+ ev_ctx_array->base, &ev_ctx_array->phys_base,
+ ev_ctx_array->size);
+
+ /* allocate buffer for mmio */
+ mmio_buf->size = sizeof(struct ipa_mhi_mmio_register_set);
+ mmio_buf->base = dma_alloc_coherent(ipa3_ctx->pdev, mmio_buf->size,
+ &mmio_buf->phys_base, GFP_KERNEL);
+ if (!mmio_buf->base) {
+ IPA_UT_ERR("no mem for mmio buf\n");
+ rc = -ENOMEM;
+ goto fail_free_ev_ctx_arr;
+ }
+ IPA_UT_DBG("mmio buffer: base 0x%pK phys_addr %pad size %d\n",
+ mmio_buf->base, &mmio_buf->phys_base, mmio_buf->size);
+
+ /* initlize table */
+ p_mmio = (struct ipa_mhi_mmio_register_set *)mmio_buf->base;
+
+ /**
+ * 64-bit pointer to the channel context array in the host memory space;
+ * Host sets the pointer to the channel context array
+ * during initialization.
+ */
+ p_mmio->ccabap = (u32)ch_ctx_array->phys_base -
+ (IPA_MHI_TEST_FIRST_CHANNEL_ID *
+ sizeof(struct ipa_mhi_channel_context_array));
+ IPA_UT_DBG("pMmio->ccabap 0x%llx\n", p_mmio->ccabap);
+
+ /**
+ * 64-bit pointer to the event context array in the host memory space;
+ * Host sets the pointer to the event context array
+ * during initialization
+ */
+ p_mmio->ecabap = (u32)ev_ctx_array->phys_base -
+ (IPA_MHI_TEST_FIRST_EVENT_RING_ID *
+ sizeof(struct ipa_mhi_event_context_array));
+ IPA_UT_DBG("pMmio->ecabap 0x%llx\n", p_mmio->ecabap);
+
+ /**
+ * Register is not accessed by HWP.
+ * In test register carries the pointer of
+ * virtual address for the buffer of channel context array
+ */
+ p_mmio->crcbap = (unsigned long)ch_ctx_array->base;
+
+ /**
+ * Register is not accessed by HWP.
+ * In test register carries the pointer of
+ * virtual address for the buffer of channel context array
+ */
+ p_mmio->crdb = (unsigned long)ev_ctx_array->base;
+
+ /* test is running only on device. no need to translate addresses */
+ p_mmio->mhiaddr.mhicrtlbase = 0x04;
+ p_mmio->mhiaddr.mhictrllimit = 0xFFFFFFFF;
+ p_mmio->mhiaddr.mhidatabase = 0x04;
+ p_mmio->mhiaddr.mhidatalimit = 0xFFFFFFFF;
+
+ return rc;
+
+fail_free_ev_ctx_arr:
+ dma_free_coherent(ipa3_ctx->pdev, ev_ctx_array->size,
+ ev_ctx_array->base, ev_ctx_array->phys_base);
+ ev_ctx_array->base = NULL;
+fail_free_ch_ctx_arr:
+ dma_free_coherent(ipa3_ctx->pdev, ch_ctx_array->size,
+ ch_ctx_array->base, ch_ctx_array->phys_base);
+ ch_ctx_array->base = NULL;
+fail_free_msi:
+ dma_free_coherent(ipa3_ctx->pdev, msi->size, msi->base,
+ msi->phys_base);
+ msi->base = NULL;
+ return rc;
+}
+
+static void ipa_mhi_test_destroy_channel_context(
+ struct ipa_mem_buffer transfer_ring_bufs[],
+ struct ipa_mem_buffer event_ring_bufs[],
+ u8 channel_id,
+ u8 event_ring_id)
+{
+ u32 ev_ring_idx;
+ u32 ch_idx;
+
+ IPA_UT_DBG("Entry\n");
+
+ if ((channel_id < IPA_MHI_TEST_FIRST_CHANNEL_ID) ||
+ (channel_id > IPA_MHI_TEST_LAST_CHANNEL_ID)) {
+ IPA_UT_ERR("channal_id invalid %d\n", channel_id);
+ return;
+ }
+
+ if ((event_ring_id < IPA_MHI_TEST_FIRST_EVENT_RING_ID) ||
+ (event_ring_id > IPA_MHI_TEST_LAST_EVENT_RING_ID)) {
+ IPA_UT_ERR("event_ring_id invalid %d\n", event_ring_id);
+ return;
+ }
+
+ ch_idx = channel_id - IPA_MHI_TEST_FIRST_CHANNEL_ID;
+ ev_ring_idx = event_ring_id - IPA_MHI_TEST_FIRST_EVENT_RING_ID;
+
+ if (transfer_ring_bufs[ch_idx].base) {
+ dma_free_coherent(ipa3_ctx->pdev,
+ transfer_ring_bufs[ch_idx].size,
+ transfer_ring_bufs[ch_idx].base,
+ transfer_ring_bufs[ch_idx].phys_base);
+ transfer_ring_bufs[ch_idx].base = NULL;
+ }
+
+ if (event_ring_bufs[ev_ring_idx].base) {
+ dma_free_coherent(ipa3_ctx->pdev,
+ event_ring_bufs[ev_ring_idx].size,
+ event_ring_bufs[ev_ring_idx].base,
+ event_ring_bufs[ev_ring_idx].phys_base);
+ event_ring_bufs[ev_ring_idx].base = NULL;
+ }
+}
+
+static int ipa_mhi_test_config_channel_context(
+ struct ipa_mem_buffer *mmio,
+ struct ipa_mem_buffer transfer_ring_bufs[],
+ struct ipa_mem_buffer event_ring_bufs[],
+ u8 channel_id,
+ u8 event_ring_id,
+ u16 transfer_ring_size,
+ u16 event_ring_size,
+ u8 ch_type)
+{
+ struct ipa_mhi_mmio_register_set *p_mmio;
+ struct ipa_mhi_channel_context_array *p_channels;
+ struct ipa_mhi_event_context_array *p_events;
+ u32 ev_ring_idx;
+ u32 ch_idx;
+
+ IPA_UT_DBG("Entry\n");
+
+ if ((channel_id < IPA_MHI_TEST_FIRST_CHANNEL_ID) ||
+ (channel_id > IPA_MHI_TEST_LAST_CHANNEL_ID)) {
+ IPA_UT_DBG("channal_id invalid %d\n", channel_id);
+ return -EFAULT;
+ }
+
+ if ((event_ring_id < IPA_MHI_TEST_FIRST_EVENT_RING_ID) ||
+ (event_ring_id > IPA_MHI_TEST_LAST_EVENT_RING_ID)) {
+ IPA_UT_DBG("event_ring_id invalid %d\n", event_ring_id);
+ return -EFAULT;
+ }
+
+ p_mmio = (struct ipa_mhi_mmio_register_set *)mmio->base;
+ p_channels =
+ (struct ipa_mhi_channel_context_array *)
+ ((unsigned long)p_mmio->crcbap);
+ p_events = (struct ipa_mhi_event_context_array *)
+ ((unsigned long)p_mmio->crdb);
+
+ IPA_UT_DBG("p_mmio: %pK p_channels: %pK p_events: %pK\n",
+ p_mmio, p_channels, p_events);
+
+ ch_idx = channel_id - IPA_MHI_TEST_FIRST_CHANNEL_ID;
+ ev_ring_idx = event_ring_id - IPA_MHI_TEST_FIRST_EVENT_RING_ID;
+
+ IPA_UT_DBG("ch_idx: %u ev_ring_idx: %u\n", ch_idx, ev_ring_idx);
+ if (transfer_ring_bufs[ch_idx].base) {
+ IPA_UT_ERR("ChannelId %d is already allocated\n", channel_id);
+ return -EFAULT;
+ }
+
+ /* allocate and init event ring if needed */
+ if (!event_ring_bufs[ev_ring_idx].base) {
+ IPA_UT_LOG("Configuring event ring...\n");
+ event_ring_bufs[ev_ring_idx].size =
+ event_ring_size *
+ sizeof(struct ipa_mhi_event_ring_element);
+ event_ring_bufs[ev_ring_idx].base =
+ dma_alloc_coherent(ipa3_ctx->pdev,
+ event_ring_bufs[ev_ring_idx].size,
+ &event_ring_bufs[ev_ring_idx].phys_base,
+ GFP_KERNEL);
+ if (!event_ring_bufs[ev_ring_idx].base) {
+ IPA_UT_ERR("no mem for ev ring buf\n");
+ return -ENOMEM;
+ }
+ p_events[ev_ring_idx].intmodc = 1;
+ p_events[ev_ring_idx].intmodt = 0;
+ p_events[ev_ring_idx].msivec = event_ring_id;
+ p_events[ev_ring_idx].rbase =
+ (u32)event_ring_bufs[ev_ring_idx].phys_base;
+ p_events[ev_ring_idx].rlen =
+ event_ring_bufs[ev_ring_idx].size;
+ p_events[ev_ring_idx].rp =
+ (u32)event_ring_bufs[ev_ring_idx].phys_base;
+ p_events[ev_ring_idx].wp =
+ (u32)event_ring_bufs[ev_ring_idx].phys_base;
+ } else {
+ IPA_UT_LOG("Skip configuring event ring - already done\n");
+ }
+
+ transfer_ring_bufs[ch_idx].size =
+ transfer_ring_size *
+ sizeof(struct ipa_mhi_transfer_ring_element);
+ transfer_ring_bufs[ch_idx].base =
+ dma_alloc_coherent(ipa3_ctx->pdev,
+ transfer_ring_bufs[ch_idx].size,
+ &transfer_ring_bufs[ch_idx].phys_base,
+ GFP_KERNEL);
+ if (!transfer_ring_bufs[ch_idx].base) {
+ IPA_UT_ERR("no mem for xfer ring buf\n");
+ dma_free_coherent(ipa3_ctx->pdev,
+ event_ring_bufs[ev_ring_idx].size,
+ event_ring_bufs[ev_ring_idx].base,
+ event_ring_bufs[ev_ring_idx].phys_base);
+ event_ring_bufs[ev_ring_idx].base = NULL;
+ return -ENOMEM;
+ }
+
+ p_channels[ch_idx].erindex = event_ring_id;
+ p_channels[ch_idx].rbase = (u32)transfer_ring_bufs[ch_idx].phys_base;
+ p_channels[ch_idx].rlen = transfer_ring_bufs[ch_idx].size;
+ p_channels[ch_idx].rp = (u32)transfer_ring_bufs[ch_idx].phys_base;
+ p_channels[ch_idx].wp = (u32)transfer_ring_bufs[ch_idx].phys_base;
+ p_channels[ch_idx].chtype = ch_type;
+ p_channels[ch_idx].brsmode = IPA_MHI_BURST_MODE_DEFAULT;
+ p_channels[ch_idx].pollcfg = 0;
+
+ return 0;
+}
+
+static void ipa_mhi_test_destroy_data_structures(void)
+{
+ IPA_UT_DBG("Entry\n");
+
+ /* Destroy OUT data buffer */
+ if (test_mhi_ctx->out_buffer.base) {
+ dma_free_coherent(ipa3_ctx->pdev,
+ test_mhi_ctx->out_buffer.size,
+ test_mhi_ctx->out_buffer.base,
+ test_mhi_ctx->out_buffer.phys_base);
+ test_mhi_ctx->out_buffer.base = NULL;
+ }
+
+ /* Destroy IN data buffer */
+ if (test_mhi_ctx->in_buffer.base) {
+ dma_free_coherent(ipa3_ctx->pdev,
+ test_mhi_ctx->in_buffer.size,
+ test_mhi_ctx->in_buffer.base,
+ test_mhi_ctx->in_buffer.phys_base);
+ test_mhi_ctx->in_buffer.base = NULL;
+ }
+
+ /* Destroy IN channel ctx */
+ ipa_mhi_test_destroy_channel_context(
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1);
+
+ /* Destroy OUT channel ctx */
+ ipa_mhi_test_destroy_channel_context(
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID);
+}
+
+static int ipa_mhi_test_setup_data_structures(void)
+{
+ int rc = 0;
+
+ IPA_UT_DBG("Entry\n");
+
+ /* Config OUT Channel Context */
+ rc = ipa_mhi_test_config_channel_context(
+ &test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID,
+ 0x100,
+ 0x80,
+ IPA_MHI_OUT_CHAHNNEL);
+ if (rc) {
+ IPA_UT_ERR("Fail to config OUT ch ctx - err %d", rc);
+ return rc;
+ }
+
+ /* Config IN Channel Context */
+ rc = ipa_mhi_test_config_channel_context(
+ &test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1,
+ 0x100,
+ 0x80,
+ IPA_MHI_IN_CHAHNNEL);
+ if (rc) {
+ IPA_UT_ERR("Fail to config IN ch ctx - err %d", rc);
+ goto fail_destroy_out_ch_ctx;
+ }
+
+ /* allocate IN data buffer */
+ test_mhi_ctx->in_buffer.size = IPA_MHI_TEST_MAX_DATA_BUF_SIZE;
+ test_mhi_ctx->in_buffer.base = dma_alloc_coherent(
+ ipa3_ctx->pdev, test_mhi_ctx->in_buffer.size,
+ &test_mhi_ctx->in_buffer.phys_base, GFP_KERNEL);
+ if (!test_mhi_ctx->in_buffer.base) {
+ IPA_UT_ERR("no mem for In data buffer\n");
+ rc = -ENOMEM;
+ goto fail_destroy_in_ch_ctx;
+ }
+ memset(test_mhi_ctx->in_buffer.base, 0,
+ IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+
+ /* allocate OUT data buffer */
+ test_mhi_ctx->out_buffer.size = IPA_MHI_TEST_MAX_DATA_BUF_SIZE;
+ test_mhi_ctx->out_buffer.base = dma_alloc_coherent(
+ ipa3_ctx->pdev, test_mhi_ctx->out_buffer.size,
+ &test_mhi_ctx->out_buffer.phys_base, GFP_KERNEL);
+ if (!test_mhi_ctx->out_buffer.base) {
+ IPA_UT_ERR("no mem for Out data buffer\n");
+ rc = -EFAULT;
+ goto fail_destroy_in_data_buf;
+ }
+ memset(test_mhi_ctx->out_buffer.base, 0,
+ IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+
+ return 0;
+
+fail_destroy_in_data_buf:
+ dma_free_coherent(ipa3_ctx->pdev,
+ test_mhi_ctx->in_buffer.size,
+ test_mhi_ctx->in_buffer.base,
+ test_mhi_ctx->in_buffer.phys_base);
+ test_mhi_ctx->in_buffer.base = NULL;
+fail_destroy_in_ch_ctx:
+ ipa_mhi_test_destroy_channel_context(
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1);
+fail_destroy_out_ch_ctx:
+ ipa_mhi_test_destroy_channel_context(
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID);
+ return 0;
+}
+
+/**
+ * ipa_test_mhi_suite_setup() - Suite setup function
+ */
+static int ipa_test_mhi_suite_setup(void **ppriv)
+{
+ int rc = 0;
+
+ IPA_UT_DBG("Start Setup\n");
+
+ if (!gsi_ctx) {
+ IPA_UT_ERR("No GSI ctx\n");
+ return -EINVAL;
+ }
+
+ if (!ipa3_ctx) {
+ IPA_UT_ERR("No IPA ctx\n");
+ return -EINVAL;
+ }
+
+ test_mhi_ctx = kzalloc(sizeof(struct ipa_test_mhi_context),
+ GFP_KERNEL);
+ if (!test_mhi_ctx) {
+ IPA_UT_ERR("failed allocated ctx\n");
+ return -ENOMEM;
+ }
+
+ test_mhi_ctx->gsi_mmio = ioremap_nocache(gsi_ctx->per.phys_addr,
+ gsi_ctx->per.size);
+ if (!test_mhi_ctx) {
+ IPA_UT_ERR("failed to remap GSI HW size=%lu\n",
+ gsi_ctx->per.size);
+ rc = -EFAULT;
+ goto fail_free_ctx;
+ }
+
+ rc = ipa_test_mhi_alloc_mmio_space();
+ if (rc) {
+ IPA_UT_ERR("failed to alloc mmio space");
+ goto fail_iounmap;
+ }
+
+ rc = ipa_mhi_test_setup_data_structures();
+ if (rc) {
+ IPA_UT_ERR("failed to setup data structures");
+ goto fail_free_mmio_spc;
+ }
+
+ *ppriv = test_mhi_ctx;
+ return 0;
+
+fail_free_mmio_spc:
+ ipa_test_mhi_free_mmio_space();
+fail_iounmap:
+ iounmap(test_mhi_ctx->gsi_mmio);
+fail_free_ctx:
+ kfree(test_mhi_ctx);
+ test_mhi_ctx = NULL;
+ return rc;
+}
+
+/**
+ * ipa_test_mhi_suite_teardown() - Suite teardown function
+ */
+static int ipa_test_mhi_suite_teardown(void *priv)
+{
+ IPA_UT_DBG("Start Teardown\n");
+
+ if (!test_mhi_ctx)
+ return 0;
+
+ ipa_mhi_test_destroy_data_structures();
+ ipa_test_mhi_free_mmio_space();
+ iounmap(test_mhi_ctx->gsi_mmio);
+ kfree(test_mhi_ctx);
+ test_mhi_ctx = NULL;
+
+ return 0;
+}
+
+/**
+ * ipa_mhi_test_initialize_driver() - MHI init and possibly start and connect
+ *
+ * To be run during tests
+ * 1. MHI init (Ready state)
+ * 2. Conditional MHO start and connect (M0 state)
+ */
+static int ipa_mhi_test_initialize_driver(bool skip_start_and_conn)
+{
+ int rc = 0;
+ struct ipa_mhi_init_params init_params;
+ struct ipa_mhi_start_params start_params;
+ struct ipa_mhi_connect_params prod_params;
+ struct ipa_mhi_connect_params cons_params;
+ struct ipa_mhi_mmio_register_set *p_mmio;
+ struct ipa_mhi_channel_context_array *p_ch_ctx_array;
+ bool is_dma;
+ u64 phys_addr;
+
+ IPA_UT_LOG("Entry\n");
+
+ p_mmio = test_mhi_ctx->mmio_buf.base;
+
+ /* start IPA MHI */
+ memset(&init_params, 0, sizeof(init_params));
+ init_params.msi.addr_low = test_mhi_ctx->msi.phys_base;
+ init_params.msi.data = 0x10000000;
+ init_params.msi.mask = ~0x10000000;
+ /* MMIO not needed for GSI */
+ init_params.first_ch_idx = IPA_MHI_TEST_FIRST_CHANNEL_ID;
+ init_params.first_er_idx = IPA_MHI_TEST_FIRST_EVENT_RING_ID;
+ init_params.assert_bit40 = false;
+ init_params.notify = ipa_mhi_test_cb;
+ init_params.priv = NULL;
+ init_params.test_mode = true;
+
+ rc = ipa_mhi_init(&init_params);
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_init failed %d\n", rc);
+ return rc;
+ }
+
+ IPA_UT_LOG("Wait async ready event\n");
+ if (wait_for_completion_timeout(&mhi_test_ready_comp, 10 * HZ) == 0) {
+ IPA_UT_LOG("timeout waiting for READY event");
+ IPA_UT_TEST_FAIL_REPORT("failed waiting for state ready");
+ return -ETIME;
+ }
+
+ if (ipa_mhi_is_using_dma(&is_dma)) {
+ IPA_UT_LOG("is_dma checkign failed. Is MHI loaded?\n");
+ IPA_UT_TEST_FAIL_REPORT("failed checking using dma");
+ return -EPERM;
+ }
+
+ if (is_dma) {
+ IPA_UT_LOG("init ipa_dma\n");
+ rc = ipa_dma_init();
+ if (rc && rc != -EFAULT) {
+ IPA_UT_LOG("ipa_dma_init failed, %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("failed init dma");
+ return rc;
+ }
+ IPA_UT_LOG("enable ipa_dma\n");
+ rc = ipa_dma_enable();
+ if (rc && rc != -EPERM) {
+ IPA_UT_LOG("ipa_dma_enable failed, %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("failed enable dma");
+ return rc;
+ }
+ }
+
+ if (!skip_start_and_conn) {
+ memset(&start_params, 0, sizeof(start_params));
+ start_params.channel_context_array_addr = p_mmio->ccabap;
+ start_params.event_context_array_addr = p_mmio->ecabap;
+
+ IPA_UT_LOG("BEFORE mhi_start\n");
+ rc = ipa_mhi_start(&start_params);
+ if (rc) {
+ IPA_UT_LOG("mhi_start failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail start mhi");
+ return rc;
+ }
+ IPA_UT_LOG("AFTER mhi_start\n");
+
+ phys_addr = p_mmio->ccabap + (IPA_MHI_TEST_FIRST_CHANNEL_ID *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ IPA_UT_LOG("ch: %d base: 0x%pK phys_addr 0x%llx chstate: %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ p_ch_ctx_array, phys_addr,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+
+ memset(&prod_params, 0, sizeof(prod_params));
+ prod_params.sys.client = IPA_CLIENT_MHI_PROD;
+ prod_params.sys.ipa_ep_cfg.mode.mode = IPA_DMA;
+ prod_params.sys.ipa_ep_cfg.mode.dst = IPA_CLIENT_MHI_CONS;
+ prod_params.sys.ipa_ep_cfg.seq.seq_type =
+ IPA_MHI_TEST_SEQ_TYPE_DMA;
+ prod_params.sys.ipa_ep_cfg.seq.set_dynamic = true;
+ prod_params.channel_id = IPA_MHI_TEST_FIRST_CHANNEL_ID;
+ IPA_UT_LOG("BEFORE connect_pipe (PROD): client:%d ch_id:%u\n",
+ prod_params.sys.client, prod_params.channel_id);
+ rc = ipa_mhi_connect_pipe(&prod_params,
+ &test_mhi_ctx->prod_hdl);
+ if (rc) {
+ IPA_UT_LOG("mhi_connect_pipe failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail connect PROD pipe");
+ return rc;
+ }
+
+ if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+ IPA_UT_LOG("MHI_PROD: chstate is not RUN chstate:%s\n",
+ ipa_mhi_get_state_str(
+ p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("PROD pipe state is not run");
+ return -EFAULT;
+ }
+
+ phys_addr = p_mmio->ccabap +
+ ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ IPA_UT_LOG("ch: %d base: 0x%pK phys_addr 0x%llx chstate: %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ p_ch_ctx_array, phys_addr,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+
+ memset(&cons_params, 0, sizeof(cons_params));
+ cons_params.sys.client = IPA_CLIENT_MHI_CONS;
+ cons_params.sys.skip_ep_cfg = true;
+ cons_params.channel_id = IPA_MHI_TEST_FIRST_CHANNEL_ID + 1;
+ IPA_UT_LOG("BEFORE connect_pipe (CONS): client:%d ch_id:%u\n",
+ cons_params.sys.client, cons_params.channel_id);
+ rc = ipa_mhi_connect_pipe(&cons_params,
+ &test_mhi_ctx->cons_hdl);
+ if (rc) {
+ IPA_UT_LOG("mhi_connect_pipe failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail connect CONS pipe");
+ return rc;
+ }
+
+ if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+ IPA_UT_LOG("MHI_CONS: chstate is not RUN chstate:%s\n",
+ ipa_mhi_get_state_str(
+ p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("CONS pipe state is not run");
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. MHI destroy
+ * 2. re-configure the channels
+ */
+static int ipa_mhi_test_destroy(struct ipa_test_mhi_context *ctx)
+{
+ struct ipa_mhi_mmio_register_set *p_mmio;
+ u64 phys_addr;
+ struct ipa_mhi_channel_context_array *p_ch_ctx_array;
+ int rc;
+
+ IPA_UT_LOG("Entry\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("Input err invalid ctx\n");
+ return -EINVAL;
+ }
+
+ p_mmio = ctx->mmio_buf.base;
+
+ phys_addr = p_mmio->ccabap +
+ ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = ctx->ch_ctx_array.base +
+ (phys_addr - ctx->ch_ctx_array.phys_base);
+ IPA_UT_LOG("channel id %d (CONS): chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+
+ phys_addr = p_mmio->ccabap +
+ ((IPA_MHI_TEST_FIRST_CHANNEL_ID) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = ctx->ch_ctx_array.base +
+ (phys_addr - ctx->ch_ctx_array.phys_base);
+ IPA_UT_LOG("channel id %d (PROD): chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+
+ IPA_UT_LOG("MHI Destroy\n");
+ ipa_mhi_destroy();
+ IPA_UT_LOG("Post MHI Destroy\n");
+
+ ctx->prod_hdl = 0;
+ ctx->cons_hdl = 0;
+
+ dma_free_coherent(ipa3_ctx->pdev, ctx->xfer_ring_bufs[1].size,
+ ctx->xfer_ring_bufs[1].base, ctx->xfer_ring_bufs[1].phys_base);
+ ctx->xfer_ring_bufs[1].base = NULL;
+
+ IPA_UT_LOG("config channel context for channel %d (MHI CONS)\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1);
+ rc = ipa_mhi_test_config_channel_context(
+ &ctx->mmio_buf,
+ ctx->xfer_ring_bufs,
+ ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1,
+ 0x100,
+ 0x80,
+ IPA_MHI_IN_CHAHNNEL);
+ if (rc) {
+ IPA_UT_LOG("config channel context failed %d, channel %d\n",
+ rc, IPA_MHI_TEST_FIRST_CHANNEL_ID + 1);
+ IPA_UT_TEST_FAIL_REPORT("fail config CONS channel ctx");
+ return -EFAULT;
+ }
+
+ dma_free_coherent(ipa3_ctx->pdev, ctx->xfer_ring_bufs[0].size,
+ ctx->xfer_ring_bufs[0].base, ctx->xfer_ring_bufs[0].phys_base);
+ ctx->xfer_ring_bufs[0].base = NULL;
+
+ IPA_UT_LOG("config channel context for channel %d (MHI PROD)\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID);
+ rc = ipa_mhi_test_config_channel_context(
+ &ctx->mmio_buf,
+ ctx->xfer_ring_bufs,
+ ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID,
+ 0x100,
+ 0x80,
+ IPA_MHI_OUT_CHAHNNEL);
+ if (rc) {
+ IPA_UT_LOG("config channel context failed %d, channel %d\n",
+ rc, IPA_MHI_TEST_FIRST_CHANNEL_ID);
+ IPA_UT_TEST_FAIL_REPORT("fail config PROD channel ctx");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. Destroy
+ * 2. Initialize (to Ready or M0 states)
+ */
+static int ipa_mhi_test_reset(struct ipa_test_mhi_context *ctx,
+ bool skip_start_and_conn)
+{
+ int rc;
+
+ IPA_UT_LOG("Entry\n");
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy fail");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(skip_start_and_conn);
+ if (rc) {
+ IPA_UT_LOG("driver init failed skip_start_and_con=%d rc=%d\n",
+ skip_start_and_conn, rc);
+ IPA_UT_TEST_FAIL_REPORT("init fail");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. disconnect cons channel
+ * 2. config cons channel
+ * 3. disconnect prod channel
+ * 4. config prod channel
+ * 5. connect prod
+ * 6. connect cons
+ */
+static int ipa_mhi_test_channel_reset(void)
+{
+ int rc;
+ struct ipa_mhi_connect_params prod_params;
+ struct ipa_mhi_connect_params cons_params;
+ struct ipa_mhi_mmio_register_set *p_mmio;
+ struct ipa_mhi_channel_context_array *p_ch_ctx_array;
+ u64 phys_addr;
+
+ p_mmio = test_mhi_ctx->mmio_buf.base;
+
+ IPA_UT_LOG("Before pipe disconnect (CONS) client hdl=%u=\n",
+ test_mhi_ctx->cons_hdl);
+ rc = ipa_mhi_disconnect_pipe(test_mhi_ctx->cons_hdl);
+ if (rc) {
+ IPA_UT_LOG("disconnect_pipe failed (CONS) %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("CONS pipe disconnect fail");
+ return -EFAULT;
+ }
+ test_mhi_ctx->cons_hdl = 0;
+
+ phys_addr = p_mmio->ccabap +
+ ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
+ IPA_UT_LOG("chstate is not disabled! ch %d chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("CONS pipe state is not disabled");
+ return -EFAULT;
+ }
+
+ dma_free_coherent(ipa3_ctx->pdev,
+ test_mhi_ctx->xfer_ring_bufs[1].size,
+ test_mhi_ctx->xfer_ring_bufs[1].base,
+ test_mhi_ctx->xfer_ring_bufs[1].phys_base);
+ test_mhi_ctx->xfer_ring_bufs[1].base = NULL;
+ rc = ipa_mhi_test_config_channel_context(
+ &test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1,
+ 0x100,
+ 0x80,
+ IPA_MHI_IN_CHAHNNEL);
+ if (rc) {
+ IPA_UT_LOG("config_channel_context IN failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail config CONS channel context");
+ return -EFAULT;
+ }
+ IPA_UT_LOG("Before pipe disconnect (CONS) client hdl=%u=\n",
+ test_mhi_ctx->prod_hdl);
+ rc = ipa_mhi_disconnect_pipe(test_mhi_ctx->prod_hdl);
+ if (rc) {
+ IPA_UT_LOG("disconnect_pipe failed (PROD) %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("PROD pipe disconnect fail");
+ return -EFAULT;
+ }
+ test_mhi_ctx->prod_hdl = 0;
+
+ phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
+ IPA_UT_LOG("chstate is not disabled! ch %d chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("PROD pipe state is not disabled");
+ return -EFAULT;
+ }
+
+ dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->xfer_ring_bufs[0].size,
+ test_mhi_ctx->xfer_ring_bufs[0].base,
+ test_mhi_ctx->xfer_ring_bufs[0].phys_base);
+ test_mhi_ctx->xfer_ring_bufs[0].base = NULL;
+ rc = ipa_mhi_test_config_channel_context(
+ &test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID,
+ 0x100,
+ 0x80,
+ IPA_MHI_OUT_CHAHNNEL);
+ if (rc) {
+ IPA_UT_LOG("config_channel_context OUT failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("PROD pipe state is not disabled");
+ return -EFAULT;
+ }
+
+ memset(&prod_params, 0, sizeof(prod_params));
+ prod_params.sys.client = IPA_CLIENT_MHI_PROD;
+ prod_params.sys.ipa_ep_cfg.mode.mode = IPA_DMA;
+ prod_params.sys.ipa_ep_cfg.mode.dst = IPA_CLIENT_MHI_CONS;
+ prod_params.sys.ipa_ep_cfg.seq.seq_type = IPA_MHI_TEST_SEQ_TYPE_DMA;
+ prod_params.sys.ipa_ep_cfg.seq.set_dynamic = true;
+ prod_params.channel_id = IPA_MHI_TEST_FIRST_CHANNEL_ID;
+ IPA_UT_LOG("BEFORE connect PROD\n");
+ rc = ipa_mhi_connect_pipe(&prod_params, &test_mhi_ctx->prod_hdl);
+ if (rc) {
+ IPA_UT_LOG("connect_pipe failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail connect PROD pipe");
+ return rc;
+ }
+
+ phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+ IPA_UT_LOG("chstate is not run! ch %d chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("PROD pipe state is not run");
+ return -EFAULT;
+ }
+
+ memset(&cons_params, 0, sizeof(cons_params));
+ cons_params.sys.client = IPA_CLIENT_MHI_CONS;
+ cons_params.sys.skip_ep_cfg = true;
+ cons_params.channel_id = IPA_MHI_TEST_FIRST_CHANNEL_ID + 1;
+ IPA_UT_LOG("BEFORE connect CONS\n");
+ rc = ipa_mhi_connect_pipe(&cons_params, &test_mhi_ctx->cons_hdl);
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_connect_pipe failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail connect CONS pipe");
+ return rc;
+ }
+
+ phys_addr = p_mmio->ccabap +
+ ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+ IPA_UT_LOG("chstate is not run! ch %d chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("CONS pipe state is not run");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * Send data
+ */
+static int ipa_mhi_test_q_transfer_re(struct ipa_mem_buffer *mmio,
+ struct ipa_mem_buffer xfer_ring_bufs[],
+ struct ipa_mem_buffer ev_ring_bufs[],
+ u8 channel_id,
+ struct ipa_mem_buffer buf_array[],
+ int buf_array_size,
+ bool ieob,
+ bool ieot,
+ bool bei,
+ bool trigger_db)
+{
+ struct ipa_mhi_transfer_ring_element *curr_re;
+ struct ipa_mhi_mmio_register_set *p_mmio;
+ struct ipa_mhi_channel_context_array *p_channels;
+ struct ipa_mhi_event_context_array *p_events;
+ u32 channel_idx;
+ u32 event_ring_index;
+ u32 wp_ofst;
+ u32 rp_ofst;
+ u32 next_wp_ofst;
+ int i;
+ u32 num_of_ed_to_queue;
+
+ IPA_UT_LOG("Entry\n");
+
+ p_mmio = (struct ipa_mhi_mmio_register_set *)mmio->base;
+ p_channels = (struct ipa_mhi_channel_context_array *)
+ ((unsigned long)p_mmio->crcbap);
+ p_events = (struct ipa_mhi_event_context_array *)
+ ((unsigned long)p_mmio->crdb);
+
+ if (ieob)
+ num_of_ed_to_queue = buf_array_size;
+ else
+ num_of_ed_to_queue = ieot ? 1 : 0;
+
+ if (channel_id >=
+ (IPA_MHI_TEST_FIRST_CHANNEL_ID + IPA_MHI_TEST_NUM_CHANNELS) ||
+ channel_id < IPA_MHI_TEST_FIRST_CHANNEL_ID) {
+ IPA_UT_LOG("Invalud Channel ID %d\n", channel_id);
+ return -EFAULT;
+ }
+
+ channel_idx = channel_id - IPA_MHI_TEST_FIRST_CHANNEL_ID;
+
+ if (!xfer_ring_bufs[channel_idx].base) {
+ IPA_UT_LOG("Channel is not allocated\n");
+ return -EFAULT;
+ }
+ if (p_channels[channel_idx].brsmode == IPA_MHI_BURST_MODE_DEFAULT ||
+ p_channels[channel_idx].brsmode == IPA_MHI_BURST_MODE_ENABLE)
+ num_of_ed_to_queue += 1; /* for OOB/DB mode event */
+
+ /* First queue EDs */
+ event_ring_index = p_channels[channel_idx].erindex -
+ IPA_MHI_TEST_FIRST_EVENT_RING_ID;
+
+ wp_ofst = (u32)(p_events[event_ring_index].wp -
+ p_events[event_ring_index].rbase);
+
+ if (p_events[event_ring_index].rlen & 0xFFFFFFFF00000000) {
+ IPA_UT_LOG("invalid ev rlen %llu\n",
+ p_events[event_ring_index].rlen);
+ return -EFAULT;
+ }
+
+ next_wp_ofst = (wp_ofst + num_of_ed_to_queue *
+ sizeof(struct ipa_mhi_event_ring_element)) %
+ (u32)p_events[event_ring_index].rlen;
+
+ /* set next WP */
+ p_events[event_ring_index].wp =
+ (u32)p_events[event_ring_index].rbase + next_wp_ofst;
+
+ /* write value to event ring doorbell */
+ IPA_UT_LOG("DB to event 0x%llx: base %pa ofst 0x%x\n",
+ p_events[event_ring_index].wp,
+ &(gsi_ctx->per.phys_addr), GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(
+ event_ring_index + IPA_MHI_GSI_ER_START, 0));
+ iowrite32(p_events[event_ring_index].wp,
+ test_mhi_ctx->gsi_mmio +
+ GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(
+ event_ring_index + IPA_MHI_GSI_ER_START, 0));
+
+ for (i = 0; i < buf_array_size; i++) {
+ /* calculate virtual pointer for current WP and RP */
+ wp_ofst = (u32)(p_channels[channel_idx].wp -
+ p_channels[channel_idx].rbase);
+ rp_ofst = (u32)(p_channels[channel_idx].rp -
+ p_channels[channel_idx].rbase);
+ (void)rp_ofst;
+ curr_re = (struct ipa_mhi_transfer_ring_element *)
+ ((unsigned long)xfer_ring_bufs[channel_idx].base +
+ wp_ofst);
+ if (p_channels[channel_idx].rlen & 0xFFFFFFFF00000000) {
+ IPA_UT_LOG("invalid ch rlen %llu\n",
+ p_channels[channel_idx].rlen);
+ return -EFAULT;
+ }
+ next_wp_ofst = (wp_ofst +
+ sizeof(struct ipa_mhi_transfer_ring_element)) %
+ (u32)p_channels[channel_idx].rlen;
+
+ /* write current RE */
+ curr_re->type = IPA_MHI_RING_ELEMENT_TRANSFER;
+ curr_re->len = (u16)buf_array[i].size;
+ curr_re->ptr = (u32)buf_array[i].phys_base;
+ curr_re->word_C.bits.bei = bei;
+ curr_re->word_C.bits.ieob = ieob;
+ curr_re->word_C.bits.ieot = ieot;
+
+ /* set next WP */
+ p_channels[channel_idx].wp =
+ p_channels[channel_idx].rbase + next_wp_ofst;
+
+ if (i == (buf_array_size - 1)) {
+ /* last buffer */
+ curr_re->word_C.bits.chain = 0;
+ if (trigger_db) {
+ IPA_UT_LOG(
+ "DB to channel 0x%llx: base %pa ofst 0x%x\n"
+ , p_channels[channel_idx].wp
+ , &(gsi_ctx->per.phys_addr)
+ , GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(
+ channel_idx, 0));
+ iowrite32(p_channels[channel_idx].wp,
+ test_mhi_ctx->gsi_mmio +
+ GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(
+ channel_idx, 0));
+ }
+ } else {
+ curr_re->word_C.bits.chain = 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * Send data in loopback (from In to OUT) and compare
+ */
+static int ipa_mhi_test_loopback_data_transfer(void)
+{
+ struct ipa_mem_buffer *p_mmio;
+ int i;
+ int rc;
+ static int val;
+ bool timeout = true;
+
+ IPA_UT_LOG("Entry\n");
+
+ p_mmio = &test_mhi_ctx->mmio_buf;
+
+ /* invalidate spare register value (for msi) */
+ memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size);
+
+ val++;
+
+ memset(test_mhi_ctx->in_buffer.base, 0,
+ IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+ for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++)
+ memset(test_mhi_ctx->out_buffer.base + i, (val + i) & 0xFF, 1);
+
+ /* queue RE for IN side and trigger doorbell */
+ rc = ipa_mhi_test_q_transfer_re(p_mmio,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ &test_mhi_ctx->in_buffer,
+ 1,
+ true,
+ true,
+ false,
+ true);
+
+ if (rc) {
+ IPA_UT_LOG("q_transfer_re failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail IN q xfer re");
+ return rc;
+ }
+
+ /* queue REs for OUT side and trigger doorbell */
+ rc = ipa_mhi_test_q_transfer_re(p_mmio,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ &test_mhi_ctx->out_buffer,
+ 1,
+ true,
+ true,
+ false,
+ true);
+
+ if (rc) {
+ IPA_UT_LOG("q_transfer_re failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail OUT q xfer re");
+ return rc;
+ }
+
+ IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout);
+ if (timeout) {
+ IPA_UT_LOG("transfer timeout. MSI = 0x%x\n",
+ *((u32 *)test_mhi_ctx->msi.base));
+ IPA_UT_TEST_FAIL_REPORT("xfter timeout");
+ return -EFAULT;
+ }
+
+ /* compare the two buffers */
+ if (memcmp(test_mhi_ctx->in_buffer.base, test_mhi_ctx->out_buffer.base,
+ IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) {
+ IPA_UT_LOG("buffer are not equal\n");
+ IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * Do suspend and check channel states to be suspend if should success
+ */
+static int ipa_mhi_test_suspend(bool force, bool should_success)
+{
+ int rc;
+ struct ipa_mhi_mmio_register_set *p_mmio;
+ struct ipa_mhi_channel_context_array *p_ch_ctx_array;
+ u64 phys_addr;
+
+ IPA_UT_LOG("Entry\n");
+
+ rc = ipa_mhi_suspend(force);
+ if (should_success && rc != 0) {
+ IPA_UT_LOG("ipa_mhi_suspend failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("suspend failed");
+ return -EFAULT;
+ }
+
+ if (!should_success && rc != -EAGAIN) {
+ IPA_UT_LOG("ipa_mhi_suspenddid not return -EAGAIN fail %d\n",
+ rc);
+ IPA_UT_TEST_FAIL_REPORT("suspend succeeded unexpectedly");
+ return -EFAULT;
+ }
+
+ p_mmio = test_mhi_ctx->mmio_buf.base;
+
+ phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ if (should_success) {
+ if (p_ch_ctx_array->chstate !=
+ IPA_HW_MHI_CHANNEL_STATE_SUSPEND) {
+ IPA_UT_LOG("chstate is not suspend. ch %d chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("channel state not suspend");
+ return -EFAULT;
+ }
+ if (!force && p_ch_ctx_array->rp != p_ch_ctx_array->wp) {
+ IPA_UT_LOG("rp not updated ch %d rp 0x%llx wp 0x%llx\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ p_ch_ctx_array->rp, p_ch_ctx_array->wp);
+ IPA_UT_TEST_FAIL_REPORT("rp was not updated");
+ return -EFAULT;
+ }
+ } else {
+ if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+ IPA_UT_LOG("chstate is not running! ch %d chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("channel state not run");
+ return -EFAULT;
+ }
+ }
+
+ phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ if (should_success) {
+ if (p_ch_ctx_array->chstate !=
+ IPA_HW_MHI_CHANNEL_STATE_SUSPEND) {
+ IPA_UT_LOG("chstate is not running! ch %d chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("channel state not suspend");
+ return -EFAULT;
+ }
+ if (!force && p_ch_ctx_array->rp != p_ch_ctx_array->wp) {
+ IPA_UT_LOG("rp not updated ch %d rp 0x%llx wp 0x%llx\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ p_ch_ctx_array->rp, p_ch_ctx_array->wp);
+ IPA_UT_TEST_FAIL_REPORT("rp was not updated");
+ return -EFAULT;
+ }
+ } else {
+ if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+ IPA_UT_LOG("chstate is not running! ch %d chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("channel state not run");
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * Do resume and check channel state to be running
+ */
+static int ipa_test_mhi_resume(void)
+{
+ int rc;
+ struct ipa_mhi_mmio_register_set *p_mmio;
+ struct ipa_mhi_channel_context_array *p_ch_ctx_array;
+ u64 phys_addr;
+
+ rc = ipa_mhi_resume();
+ if (rc) {
+ IPA_UT_LOG("resume failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("resume failed");
+ return -EFAULT;
+ }
+
+ p_mmio = test_mhi_ctx->mmio_buf.base;
+
+ phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+ IPA_UT_LOG("chstate is not running! ch %d chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("channel state not run");
+ return -EFAULT;
+ }
+
+ phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+ IPA_UT_LOG("chstate is not running! ch %d chstate %s\n",
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+ IPA_UT_TEST_FAIL_REPORT("channel state not run");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. suspend
+ * 2. queue RE for IN and OUT and send data
+ * 3. should get MSI timeout due to suspend
+ * 4. resume
+ * 5. should get the MSIs now
+ * 6. comapre the IN and OUT buffers
+ */
+static int ipa_mhi_test_suspend_resume(void)
+{
+ int rc;
+ int i;
+ bool timeout = true;
+
+ IPA_UT_LOG("Entry\n");
+
+ IPA_UT_LOG("BEFORE suspend\n");
+ rc = ipa_mhi_test_suspend(false, true);
+ if (rc) {
+ IPA_UT_LOG("suspend failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("suspend failed");
+ return rc;
+ }
+ IPA_UT_LOG("AFTER suspend\n");
+
+ /* invalidate spare register value (for msi) */
+ memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size);
+
+ memset(test_mhi_ctx->in_buffer.base, 0, IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+ for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++)
+ memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1);
+
+ /* queue RE for IN side and trigger doorbell */
+ rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ &test_mhi_ctx->in_buffer,
+ 1,
+ true,
+ true,
+ false,
+ true);
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail IN q xfer re");
+ return rc;
+ }
+
+ /* queue REs for OUT side and trigger doorbell */
+ rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ &test_mhi_ctx->out_buffer,
+ 1,
+ true,
+ true,
+ false,
+ true);
+
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail OUT q xfer re");
+ return rc;
+ }
+
+ IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout);
+ if (!timeout) {
+ IPA_UT_LOG("Error: transfer success on suspend\n");
+ IPA_UT_TEST_FAIL_REPORT("xfer suceeded unexpectedly");
+ return -EFAULT;
+ }
+
+ IPA_UT_LOG("BEFORE resume\n");
+ rc = ipa_test_mhi_resume();
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_resume failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("resume fail");
+ return rc;
+ }
+ IPA_UT_LOG("AFTER resume\n");
+
+ IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout);
+ if (timeout) {
+ IPA_UT_LOG("Error: transfer timeout\n");
+ IPA_UT_TEST_FAIL_REPORT("xfer timeout");
+ return -EFAULT;
+ }
+
+ /* compare the two buffers */
+ if (memcmp(test_mhi_ctx->in_buffer.base,
+ test_mhi_ctx->out_buffer.base,
+ IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) {
+ IPA_UT_LOG("Error: buffers are not equal\n");
+ IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. enable aggregation
+ * 2. queue IN RE (ring element)
+ * 3. allocate skb with data
+ * 4. send it (this will create open aggr frame)
+ */
+static int ipa_mhi_test_create_aggr_open_frame(void)
+{
+ struct ipa_ep_cfg_aggr ep_aggr;
+ struct sk_buff *skb;
+ int rc;
+ int i;
+ u32 aggr_state_active;
+
+ IPA_UT_LOG("Entry\n");
+
+ memset(&ep_aggr, 0, sizeof(ep_aggr));
+ ep_aggr.aggr_en = IPA_ENABLE_AGGR;
+ ep_aggr.aggr = IPA_GENERIC;
+ ep_aggr.aggr_pkt_limit = 2;
+
+ rc = ipa3_cfg_ep_aggr(test_mhi_ctx->cons_hdl, &ep_aggr);
+ if (rc) {
+ IPA_UT_LOG("failed to configure aggr");
+ IPA_UT_TEST_FAIL_REPORT("failed to configure aggr");
+ return rc;
+ }
+
+ /* invalidate spare register value (for msi) */
+ memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size);
+
+ /* queue RE for IN side and trigger doorbell */
+ rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ &test_mhi_ctx->in_buffer,
+ 1,
+ true,
+ true,
+ false,
+ true);
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail IN q xfer re");
+ return rc;
+ }
+
+ skb = dev_alloc_skb(IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+ if (!skb) {
+ IPA_UT_LOG("non mem for skb\n");
+ IPA_UT_TEST_FAIL_REPORT("fail alloc skb");
+ return -ENOMEM;
+ }
+ skb_put(skb, IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+ for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++) {
+ memset(skb->data + i, i & 0xFF, 1);
+ memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1);
+ }
+
+ rc = ipa_tx_dp(IPA_CLIENT_MHI_CONS, skb, NULL);
+ if (rc) {
+ IPA_UT_LOG("ipa_tx_dp failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("ipa tx dp fail");
+ return rc;
+ }
+
+ msleep(20);
+
+ aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+ IPA_UT_LOG("IPA_STATE_AGGR_ACTIVE 0x%x\n", aggr_state_active);
+ if (aggr_state_active == 0) {
+ IPA_UT_LOG("No aggregation frame open!\n");
+ IPA_UT_TEST_FAIL_REPORT("No aggregation frame open");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. create open aggr by sending data
+ * 2. suspend - if force it should succeed, otherwize it fails
+ * 3. if force - wait for wakeup event - it should arrive
+ * 4. if force - resume
+ * 5. force close the aggr.
+ * 6. wait for MSI - it should arrive
+ * 7. compare IN and OUT buffers
+ * 8. disable aggr.
+ */
+static int ipa_mhi_test_suspend_aggr_open(bool force)
+{
+ int rc;
+ struct ipa_ep_cfg_aggr ep_aggr;
+ bool timeout = true;
+
+ IPA_UT_LOG("Entry\n");
+
+ rc = ipa_mhi_test_create_aggr_open_frame();
+ if (rc) {
+ IPA_UT_LOG("failed create open aggr\n");
+ IPA_UT_TEST_FAIL_REPORT("fail create open aggr");
+ return rc;
+ }
+
+ if (force)
+ reinit_completion(&mhi_test_wakeup_comp);
+
+ IPA_UT_LOG("BEFORE suspend\n");
+ /**
+ * if suspend force, then suspend should succeed.
+ * otherwize it should fail due to open aggr.
+ */
+ rc = ipa_mhi_test_suspend(force, force);
+ if (rc) {
+ IPA_UT_LOG("suspend failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("suspend fail");
+ return rc;
+ }
+ IPA_UT_LOG("AFTER suspend\n");
+
+ if (force) {
+ if (!wait_for_completion_timeout(&mhi_test_wakeup_comp, HZ)) {
+ IPA_UT_LOG("timeout waiting for wakeup event\n");
+ IPA_UT_TEST_FAIL_REPORT("timeout waitinf wakeup event");
+ return -ETIME;
+ }
+
+ IPA_UT_LOG("BEFORE resume\n");
+ rc = ipa_test_mhi_resume();
+ if (rc) {
+ IPA_UT_LOG("resume failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("resume failed");
+ return rc;
+ }
+ IPA_UT_LOG("AFTER resume\n");
+ }
+
+ ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << test_mhi_ctx->cons_hdl));
+
+ IPA_MHI_TEST_CHECK_MSI_INTR(false, timeout);
+ if (timeout) {
+ IPA_UT_LOG("fail: transfer not completed\n");
+ IPA_UT_TEST_FAIL_REPORT("timeout on transferring data");
+ return -EFAULT;
+ }
+
+ /* compare the two buffers */
+ if (memcmp(test_mhi_ctx->in_buffer.base,
+ test_mhi_ctx->out_buffer.base,
+ IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) {
+ IPA_UT_LOG("fail: buffer are not equal\n");
+ IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer");
+ return -EFAULT;
+ }
+
+ memset(&ep_aggr, 0, sizeof(ep_aggr));
+ rc = ipa3_cfg_ep_aggr(test_mhi_ctx->cons_hdl, &ep_aggr);
+ if (rc) {
+ IPA_UT_LOG("failed to configure aggr");
+ IPA_UT_TEST_FAIL_REPORT("fail to disable aggr");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. suspend
+ * 2. queue IN RE (ring element)
+ * 3. allocate skb with data
+ * 4. send it (this will create open aggr frame)
+ * 5. wait for wakeup event - it should arrive
+ * 6. resume
+ * 7. wait for MSI - it should arrive
+ * 8. compare IN and OUT buffers
+ */
+static int ipa_mhi_test_suspend_host_wakeup(void)
+{
+ int rc;
+ int i;
+ bool timeout = true;
+ struct sk_buff *skb;
+
+ reinit_completion(&mhi_test_wakeup_comp);
+
+ IPA_UT_LOG("BEFORE suspend\n");
+ rc = ipa_mhi_test_suspend(false, true);
+ if (rc) {
+ IPA_UT_LOG("suspend failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("suspend fail");
+ return rc;
+ }
+ IPA_UT_LOG("AFTER suspend\n");
+
+ /* invalidate spare register value (for msi) */
+ memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size);
+
+ memset(test_mhi_ctx->in_buffer.base, 0, IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+ /* queue RE for IN side and trigger doorbell*/
+ rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ &test_mhi_ctx->in_buffer,
+ 1,
+ true,
+ true,
+ false,
+ true);
+
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail IN q xfer re");
+ return rc;
+ }
+
+ skb = dev_alloc_skb(IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+ if (!skb) {
+ IPA_UT_LOG("non mem for skb\n");
+ IPA_UT_TEST_FAIL_REPORT("no mem for skb");
+ return -ENOMEM;
+ }
+ skb_put(skb, IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+ for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++) {
+ memset(skb->data + i, i & 0xFF, 1);
+ memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1);
+ }
+
+ rc = ipa_tx_dp(IPA_CLIENT_MHI_CONS, skb, NULL);
+ if (rc) {
+ IPA_UT_LOG("ipa_tx_dp failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("ipa tx dp fail");
+ return rc;
+ }
+
+ if (wait_for_completion_timeout(&mhi_test_wakeup_comp, HZ) == 0) {
+ IPA_UT_LOG("timeout waiting for wakeup event\n");
+ IPA_UT_TEST_FAIL_REPORT("timeout waiting for wakeup event");
+ return -ETIME;
+ }
+
+ IPA_UT_LOG("BEFORE resume\n");
+ rc = ipa_test_mhi_resume();
+ if (rc) {
+ IPA_UT_LOG("resume failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("resume fail");
+ return rc;
+ }
+ IPA_UT_LOG("AFTER resume\n");
+
+ /* check for MSI interrupt one channels */
+ IPA_MHI_TEST_CHECK_MSI_INTR(false, timeout);
+ if (timeout) {
+ IPA_UT_LOG("fail: transfer timeout\n");
+ IPA_UT_TEST_FAIL_REPORT("timeout on xfer");
+ return -EFAULT;
+ }
+
+ /* compare the two buffers */
+ if (memcmp(test_mhi_ctx->in_buffer.base,
+ test_mhi_ctx->out_buffer.base,
+ IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) {
+ IPA_UT_LOG("fail: buffer are not equal\n");
+ IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. queue OUT RE/buffer
+ * 2. wait for MSI on OUT
+ * 3. Do 1. and 2. till got MSI wait timeout (ch full / holb)
+ */
+static int ipa_mhi_test_create_full_channel(int *submitted_packets)
+{
+ int i;
+ bool timeout = true;
+ int rc;
+
+ if (!submitted_packets) {
+ IPA_UT_LOG("Input error\n");
+ return -EINVAL;
+ }
+
+ *submitted_packets = 0;
+
+ for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++)
+ memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1);
+
+ do {
+ /* invalidate spare register value (for msi) */
+ memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size);
+
+ IPA_UT_LOG("submitting OUT buffer\n");
+ timeout = true;
+ /* queue REs for OUT side and trigger doorbell */
+ rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID,
+ &test_mhi_ctx->out_buffer,
+ 1,
+ true,
+ true,
+ false,
+ true);
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n",
+ rc);
+ IPA_UT_TEST_FAIL_REPORT("fail OUT q re");
+ return rc;
+ }
+ (*submitted_packets)++;
+
+ IPA_UT_LOG("waiting for MSI\n");
+ for (i = 0; i < 10; i++) {
+ if (*((u32 *)test_mhi_ctx->msi.base) ==
+ (0x10000000 |
+ (IPA_MHI_TEST_FIRST_EVENT_RING_ID))) {
+ IPA_UT_LOG("got MSI\n");
+ timeout = false;
+ break;
+ }
+ msleep(20);
+ }
+ } while (!timeout);
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. queue OUT RE/buffer
+ * 2. wait for MSI on OUT
+ * 3. Do 1. and 2. till got MSI wait timeout (ch full)
+ * 4. suspend - it should fail with -EAGAIN - M1 is rejected
+ * 5. foreach submitted pkt, do the next steps
+ * 6. queue IN RE/buffer
+ * 7. wait for MSI
+ * 8. compare IN and OUT buffers
+ */
+static int ipa_mhi_test_suspend_full_channel(bool force)
+{
+ int rc;
+ bool timeout;
+ int submitted_packets = 0;
+
+ rc = ipa_mhi_test_create_full_channel(&submitted_packets);
+ if (rc) {
+ IPA_UT_LOG("fail create full channel\n");
+ IPA_UT_TEST_FAIL_REPORT("fail create full channel");
+ return rc;
+ }
+
+ IPA_UT_LOG("BEFORE suspend\n");
+ rc = ipa_mhi_test_suspend(force, false);
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_suspend did not returned -EAGAIN. rc %d\n",
+ rc);
+ IPA_UT_TEST_FAIL_REPORT("test suspend fail");
+ return -EFAULT;
+ }
+ IPA_UT_LOG("AFTER suspend\n");
+
+ while (submitted_packets) {
+ memset(test_mhi_ctx->in_buffer.base, 0,
+ IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+
+ /* invalidate spare register value (for msi) */
+ memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size);
+
+ timeout = true;
+ /* queue RE for IN side and trigger doorbell */
+ rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ &test_mhi_ctx->in_buffer,
+ 1,
+ true,
+ true,
+ false,
+ true);
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n",
+ rc);
+ IPA_UT_TEST_FAIL_REPORT("fail IN q re");
+ return rc;
+ }
+
+ IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout);
+ if (timeout) {
+ IPA_UT_LOG("transfer failed - timeout\n");
+ IPA_UT_TEST_FAIL_REPORT("timeout on xfer");
+ return -EFAULT;
+ }
+
+ /* compare the two buffers */
+ if (memcmp(test_mhi_ctx->in_buffer.base,
+ test_mhi_ctx->out_buffer.base,
+ IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) {
+ IPA_UT_LOG("buffer are not equal\n");
+ IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer");
+ return -EFAULT;
+ }
+
+ submitted_packets--;
+ }
+
+ return 0;
+}
+
+/**
+ * To be called from test
+ * 1. suspend
+ * 2. reset to M0 state
+ */
+static int ipa_mhi_test_suspend_and_reset(struct ipa_test_mhi_context *ctx)
+{
+ int rc;
+
+ IPA_UT_LOG("BEFORE suspend\n");
+ rc = ipa_mhi_test_suspend(false, true);
+ if (rc) {
+ IPA_UT_LOG("suspend failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("suspend fail");
+ return rc;
+ }
+ IPA_UT_LOG("AFTER suspend\n");
+
+ rc = ipa_mhi_test_reset(ctx, false);
+ if (rc) {
+ IPA_UT_LOG("reset failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("reset fail");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. manualy update wp
+ * 2. suspend - should succeed
+ * 3. restore wp value
+ */
+static int ipa_mhi_test_suspend_wp_update(void)
+{
+ int rc;
+ struct ipa_mhi_mmio_register_set *p_mmio;
+ struct ipa_mhi_channel_context_array *p_ch_ctx_array;
+ u64 old_wp;
+ u64 phys_addr;
+
+ /* simulate a write by updating the wp */
+ p_mmio = test_mhi_ctx->mmio_buf.base;
+ phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) *
+ sizeof(struct ipa_mhi_channel_context_array));
+ p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+ (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+ old_wp = p_ch_ctx_array->wp;
+ p_ch_ctx_array->wp += 16;
+
+ IPA_UT_LOG("BEFORE suspend\n");
+ rc = ipa_mhi_test_suspend(false, false);
+ if (rc) {
+ IPA_UT_LOG("suspend failed rc %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("suspend fail");
+ p_ch_ctx_array->wp = old_wp;
+ return rc;
+ }
+ IPA_UT_LOG("AFTER suspend\n");
+
+ p_ch_ctx_array->wp = old_wp;
+
+ return 0;
+}
+
+/**
+ * To be run during test
+ * 1. create open aggr by sending data
+ * 2. channel reset (disconnect/connet)
+ * 3. validate no aggr. open after reset
+ * 4. disable aggr.
+ */
+static int ipa_mhi_test_channel_reset_aggr_open(void)
+{
+ int rc;
+ u32 aggr_state_active;
+ struct ipa_ep_cfg_aggr ep_aggr;
+
+ IPA_UT_LOG("Entry\n");
+
+ rc = ipa_mhi_test_create_aggr_open_frame();
+ if (rc) {
+ IPA_UT_LOG("failed create open aggr rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail creare open aggr frame");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_channel_reset();
+ if (rc) {
+ IPA_UT_LOG("channel reset failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("channel reset fail");
+ return rc;
+ }
+
+ aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+ IPADBG("IPA_STATE_AGGR_ACTIVE 0x%x\n", aggr_state_active);
+ if (aggr_state_active != 0) {
+ IPA_UT_LOG("aggregation frame open after reset!\n");
+ IPA_UT_LOG("IPA_STATE_AGGR_ACTIVE 0x%x\n", aggr_state_active);
+ IPA_UT_TEST_FAIL_REPORT("open aggr after reset");
+ return -EFAULT;
+ }
+
+ memset(&ep_aggr, 0, sizeof(ep_aggr));
+ rc = ipa3_cfg_ep_aggr(test_mhi_ctx->cons_hdl, &ep_aggr);
+ if (rc) {
+ IPA_UT_LOG("failed to configure aggr");
+ IPA_UT_TEST_FAIL_REPORT("fail to disable aggr");
+ return rc;
+ }
+
+ return rc;
+}
+
+/**
+ * To be run during test
+ * 1. queue OUT RE/buffer
+ * 2. wait for MSI on OUT
+ * 3. Do 1. and 2. till got MSI wait timeout (ch full)
+ * 4. channel reset
+ * disconnect and reconnect the prod and cons
+ * 5. queue IN RE/buffer and ring DB
+ * 6. wait for MSI - should get timeout as channels were reset
+ * 7. reset again
+ */
+static int ipa_mhi_test_channel_reset_ipa_holb(void)
+{
+ int rc;
+ int submitted_packets = 0;
+ bool timeout;
+
+ IPA_UT_LOG("Entry\n");
+
+ rc = ipa_mhi_test_create_full_channel(&submitted_packets);
+ if (rc) {
+ IPA_UT_LOG("fail create full channel rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail create full channel");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_channel_reset();
+ if (rc) {
+ IPA_UT_LOG("channel reset failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("channel reset fail");
+ return rc;
+ }
+
+ /* invalidate spare register value (for msi) */
+ memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size);
+ timeout = true;
+ /* queue RE for IN side and trigger doorbell */
+ rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf,
+ test_mhi_ctx->xfer_ring_bufs,
+ test_mhi_ctx->ev_ring_bufs,
+ IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+ &test_mhi_ctx->in_buffer,
+ 1,
+ true,
+ true,
+ false,
+ true);
+
+ if (rc) {
+ IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail IN q re");
+ return rc;
+ }
+ submitted_packets--;
+
+ IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout);
+ if (!timeout) {
+ IPA_UT_LOG("transfer succeed although we had reset\n");
+ IPA_UT_TEST_FAIL_REPORT("xfer succeed although we had reset");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_channel_reset();
+ if (rc) {
+ IPA_UT_LOG("channel reset failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("channel reset fail");
+ return rc;
+ }
+
+ return rc;
+}
+
+
+/**
+ * TEST: mhi reset in READY state
+ * 1. init to ready state (without start and connect)
+ * 2. reset (destroy and re-init)
+ * 2. destroy
+ */
+static int ipa_mhi_test_reset_ready_state(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(true);
+ if (rc) {
+ IPA_UT_LOG("init to Ready state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail to init to ready state");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_reset(ctx, true);
+ if (rc) {
+ IPA_UT_LOG("reset failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("reset (destroy/re-init) failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi reset in M0 state
+ * 1. init to M0 state (with start and connect)
+ * 2. reset (destroy and re-init)
+ * 2. destroy
+ */
+static int ipa_mhi_test_reset_m0_state(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT
+ ("fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_reset(ctx, false);
+ if (rc) {
+ IPA_UT_LOG("reset failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("reset (destroy/re-init) failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi in-loop reset in M0 state
+ * 1. init to M0 state (with start and connect)
+ * 2. reset (destroy and re-init) in-loop
+ * 3. destroy
+ */
+static int ipa_mhi_test_inloop_reset_m0_state(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT
+ ("fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_reset, rc, ctx, false);
+ if (rc) {
+ IPA_UT_LOG("in-loop reset failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "reset (destroy/re-init) in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data with reset
+ * 1. init to M0 state (with start and connect)
+ * 2. reset (destroy and re-init)
+ * 3. loopback data
+ * 4. reset (destroy and re-init)
+ * 5. loopback data again
+ * 6. destroy
+ */
+static int ipa_mhi_test_loopback_data_with_reset(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_reset(ctx, false);
+ if (rc) {
+ IPA_UT_LOG("reset failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("reset (destroy/re-init) failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_reset(ctx, false);
+ if (rc) {
+ IPA_UT_LOG("reset failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("reset (destroy/re-init) failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi reset in suspend state
+ * 1. init to M0 state (with start and connect)
+ * 2. suspend
+ * 3. reset (destroy and re-init)
+ * 4. destroy
+ */
+static int ipa_mhi_test_reset_on_suspend(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_suspend_and_reset(ctx);
+ if (rc) {
+ IPA_UT_LOG("suspend and reset failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("suspend and then reset failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi in-loop reset in suspend state
+ * 1. init to M0 state (with start and connect)
+ * 2. suspend
+ * 3. reset (destroy and re-init)
+ * 4. Do 2 and 3 in loop
+ * 3. destroy
+ */
+static int ipa_mhi_test_inloop_reset_on_suspend(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_and_reset, rc, ctx);
+ if (rc) {
+ IPA_UT_LOG("in-loop reset in suspend failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail to in-loop reset while suspend");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data with reset
+ * 1. init to M0 state (with start and connect)
+ * 2. suspend
+ * 3. reset (destroy and re-init)
+ * 4. loopback data
+ * 5. suspend
+ * 5. reset (destroy and re-init)
+ * 6. destroy
+ */
+static int ipa_mhi_test_loopback_data_with_reset_on_suspend(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_suspend_and_reset(ctx);
+ if (rc) {
+ IPA_UT_LOG("suspend and reset failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail to suspend and then reset");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_suspend_and_reset(ctx);
+ if (rc) {
+ IPA_UT_LOG("suspend and reset failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail to suspend and then reset");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop suspend/resume
+ * 1. init to M0 state (with start and connect)
+ * 2. in loop suspend/resume
+ * 3. loopback data
+ * 4. destroy
+ */
+static int ipa_mhi_test_in_loop_suspend_resume(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_resume, rc);
+ if (rc) {
+ IPA_UT_LOG("suspend resume failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT("in loop suspend/resume failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop suspend/resume with aggr open
+ * 1. init to M0 state (with start and connect)
+ * 2. in loop suspend/resume with open aggr.
+ * 3. loopback data
+ * 4. destroy
+ */
+static int ipa_mhi_test_in_loop_suspend_resume_aggr_open(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_aggr_open,
+ rc, false);
+ if (rc) {
+ IPA_UT_LOG("suspend resume with aggr open failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "in loop suspend/resume with open aggr failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop force suspend/resume with aggr open
+ * 1. init to M0 state (with start and connect)
+ * 2. in loop force suspend/resume with open aggr.
+ * 3. loopback data
+ * 4. destroy
+ */
+static int ipa_mhi_test_in_loop_force_suspend_resume_aggr_open(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_aggr_open,
+ rc, true);
+ if (rc) {
+ IPA_UT_LOG("force suspend resume with aggr open failed rc=%d",
+ rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "in loop force suspend/resume with open aggr failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop suspend/host wakeup resume
+ * 1. init to M0 state (with start and connect)
+ * 2. in loop suspend/resume with host wakeup
+ * 3. loopback data
+ * 4. destroy
+ */
+static int ipa_mhi_test_in_loop_suspend_host_wakeup(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_host_wakeup, rc);
+ if (rc) {
+ IPA_UT_LOG("suspend host wakeup resume failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "in loop suspend/resume with hsot wakeup failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop rejected suspend as full channel
+ * 1. init to M0 state (with start and connect)
+ * 2. in loop rejrected suspend
+ * 3. loopback data
+ * 4. destroy
+ */
+static int ipa_mhi_test_in_loop_reject_suspend_full_channel(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_full_channel,
+ rc, false);
+ if (rc) {
+ IPA_UT_LOG("full channel rejected suspend failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "in loop rejected suspend due to full channel failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop rejected force suspend as full channel
+ * 1. init to M0 state (with start and connect)
+ * 2. in loop force rejected suspend
+ * 3. loopback data
+ * 4. destroy
+ */
+static int ipa_mhi_test_in_loop_reject_force_suspend_full_channel(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_full_channel,
+ rc, true);
+ if (rc) {
+ IPA_UT_LOG("full channel rejected force suspend failed rc=%d",
+ rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "in loop force rejected suspend as full ch failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop suspend after wp manual update
+ * 1. init to M0 state (with start and connect)
+ * 2. in loop suspend after wp update
+ * 3. loopback data
+ * 4. destroy
+ */
+static int ipa_mhi_test_in_loop_suspend_resume_wp_update(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_wp_update, rc);
+ if (rc) {
+ IPA_UT_LOG("suspend after wp update failed rc=%d", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "in loop suspend after wp update failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop channel reset (disconnect/connect)
+ * 1. init to M0 state (with start and connect)
+ * 2. in loop channel reset (disconnect/connect)
+ * 3. loopback data
+ * 4. destroy
+ */
+static int ipa_mhi_test_in_loop_channel_reset(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_channel_reset, rc);
+ if (rc) {
+ IPA_UT_LOG("channel reset (disconnect/connect) failed rc=%d",
+ rc);
+ IPA_UT_TEST_FAIL_REPORT("in loop channel reset failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop channel reset (disconnect/connect)
+ * 1. init to M0 state (with start and connect)
+ * 2. in loop channel reset (disconnect/connect) with open aggr
+ * 3. loopback data
+ * 4. destroy
+ */
+static int ipa_mhi_test_in_loop_channel_reset_aggr_open(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_channel_reset_aggr_open, rc);
+ if (rc) {
+ IPA_UT_LOG("channel reset (disconnect/connect) failed rc=%d",
+ rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "in loop channel reset with open aggr failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop channel reset (disconnect/connect)
+ * 1. init to M0 state (with start and connect)
+ * 2. in loop channel reset (disconnect/connect) with channel in HOLB
+ * 3. loopback data
+ * 4. destroy
+ */
+static int ipa_mhi_test_in_loop_channel_reset_ipa_holb(void *priv)
+{
+ int rc;
+ struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+ IPA_UT_LOG("Test Start\n");
+
+ if (unlikely(!ctx)) {
+ IPA_UT_LOG("No context");
+ return -EFAULT;
+ }
+
+ rc = ipa_mhi_test_initialize_driver(false);
+ if (rc) {
+ IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "fail to init to M0 state (w/ start and connect)");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_channel_reset_ipa_holb, rc);
+ if (rc) {
+ IPA_UT_LOG("channel reset (disconnect/connect) failed rc=%d",
+ rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "in loop channel reset with channel HOLB failed");
+ return rc;
+ }
+
+ IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+ if (rc) {
+ IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+ return rc;
+ }
+
+ rc = ipa_mhi_test_destroy(ctx);
+ if (rc) {
+ IPA_UT_LOG("destroy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("destroy failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/* Suite definition block */
+IPA_UT_DEFINE_SUITE_START(mhi, "MHI for GSI",
+ ipa_test_mhi_suite_setup, ipa_test_mhi_suite_teardown)
+{
+ IPA_UT_ADD_TEST(reset_ready_state,
+ "reset test in Ready state",
+ ipa_mhi_test_reset_ready_state,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(reset_m0_state,
+ "reset test in M0 state",
+ ipa_mhi_test_reset_m0_state,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(inloop_reset_m0_state,
+ "several reset iterations in M0 state",
+ ipa_mhi_test_inloop_reset_m0_state,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(loopback_data_with_reset_on_m0,
+ "reset before and after loopback data in M0 state",
+ ipa_mhi_test_loopback_data_with_reset,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(reset_on_suspend,
+ "reset test in suspend state",
+ ipa_mhi_test_reset_on_suspend,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(inloop_reset_on_suspend,
+ "several reset iterations in suspend state",
+ ipa_mhi_test_inloop_reset_on_suspend,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(loopback_data_with_reset_on_suspend,
+ "reset before and after loopback data in suspend state",
+ ipa_mhi_test_loopback_data_with_reset_on_suspend,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(suspend_resume,
+ "several suspend/resume iterations",
+ ipa_mhi_test_in_loop_suspend_resume,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(suspend_resume_with_open_aggr,
+ "several suspend/resume iterations with open aggregation frame",
+ ipa_mhi_test_in_loop_suspend_resume_aggr_open,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(force_suspend_resume_with_open_aggr,
+ "several force suspend/resume iterations with open aggregation frame",
+ ipa_mhi_test_in_loop_force_suspend_resume_aggr_open,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(suspend_resume_with_host_wakeup,
+ "several suspend and host wakeup resume iterations",
+ ipa_mhi_test_in_loop_suspend_host_wakeup,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(reject_suspend_channel_full,
+ "several rejected suspend iterations due to full channel",
+ ipa_mhi_test_in_loop_reject_suspend_full_channel,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(reject_force_suspend_channel_full,
+ "several rejected force suspend iterations due to full channel",
+ ipa_mhi_test_in_loop_reject_force_suspend_full_channel,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(suspend_resume_manual_wp_update,
+ "several suspend/resume iterations with after simulating writing by wp manual update",
+ ipa_mhi_test_in_loop_suspend_resume_wp_update,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(channel_reset,
+ "several channel reset (disconnect/connect) iterations",
+ ipa_mhi_test_in_loop_channel_reset,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(channel_reset_aggr_open,
+ "several channel reset (disconnect/connect) iterations with open aggregation frame",
+ ipa_mhi_test_in_loop_channel_reset_aggr_open,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(channel_reset_ipa_holb,
+ "several channel reset (disconnect/connect) iterations with channel in HOLB state",
+ ipa_mhi_test_in_loop_channel_reset_ipa_holb,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+} IPA_UT_DEFINE_SUITE_END(mhi);
+
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_framework.c b/drivers/platform/msm/ipa/test/ipa_ut_framework.c
new file mode 100644
index 0000000..3bf9ac1
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/ipa_ut_framework.c
@@ -0,0 +1,1017 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/ipa.h>
+#include "../ipa_v3/ipa_i.h"
+#include "ipa_ut_framework.h"
+#include "ipa_ut_suite_list.h"
+#include "ipa_ut_i.h"
+
+
+#define IPA_UT_DEBUG_WRITE_BUF_SIZE 256
+#define IPA_UT_DEBUG_READ_BUF_SIZE 1024
+
+#define IPA_UT_READ_WRITE_DBG_FILE_MODE \
+ (S_IRUSR | S_IRGRP | S_IROTH | S_IWUSR | S_IWGRP)
+
+/**
+ * struct ipa_ut_context - I/S context
+ * @inited: Will wait till IPA is ready. Will create the enable file
+ * @enabled: All tests and suite debugfs files are created
+ * @lock: Lock for mutual exclustion
+ * @ipa_dbgfs_root: IPA root debugfs folder
+ * @test_dbgfs_root: UT root debugfs folder. Sub-folder of IPA root
+ * @test_dbgfs_suites: Suites root debugfs folder. Sub-folder of UT root
+ */
+struct ipa_ut_context {
+ bool inited;
+ bool enabled;
+ struct mutex lock;
+ struct dentry *ipa_dbgfs_root;
+ struct dentry *test_dbgfs_root;
+ struct dentry *test_dbgfs_suites;
+};
+
+static ssize_t ipa_ut_dbgfs_enable_read(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos);
+static ssize_t ipa_ut_dbgfs_enable_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos);
+static ssize_t ipa_ut_dbgfs_test_read(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos);
+static ssize_t ipa_ut_dbgfs_test_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos);
+static int ipa_ut_dbgfs_all_test_open(struct inode *inode,
+ struct file *filp);
+static int ipa_ut_dbgfs_regression_test_open(struct inode *inode,
+ struct file *filp);
+static ssize_t ipa_ut_dbgfs_meta_test_read(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos);
+static ssize_t ipa_ut_dbgfs_meta_test_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos);
+
+
+static const struct file_operations ipa_ut_dbgfs_enable_fops = {
+ .read = ipa_ut_dbgfs_enable_read,
+ .write = ipa_ut_dbgfs_enable_write,
+};
+static const struct file_operations ipa_ut_dbgfs_test_fops = {
+ .read = ipa_ut_dbgfs_test_read,
+ .write = ipa_ut_dbgfs_test_write,
+};
+static const struct file_operations ipa_ut_dbgfs_all_test_fops = {
+ .open = ipa_ut_dbgfs_all_test_open,
+ .read = ipa_ut_dbgfs_meta_test_read,
+ .write = ipa_ut_dbgfs_meta_test_write,
+};
+static const struct file_operations ipa_ut_dbgfs_regression_test_fops = {
+ .open = ipa_ut_dbgfs_regression_test_open,
+ .read = ipa_ut_dbgfs_meta_test_read,
+ .write = ipa_ut_dbgfs_meta_test_write,
+};
+
+static struct ipa_ut_context *ipa_ut_ctx;
+char *_IPA_UT_TEST_LOG_BUF_NAME;
+struct ipa_ut_tst_fail_report
+ _IPA_UT_TEST_FAIL_REPORT_DATA[_IPA_UT_TEST_FAIL_REPORT_SIZE];
+u32 _IPA_UT_TEST_FAIL_REPORT_IDX;
+
+/**
+ * ipa_ut_print_log_buf() - Dump given buffer via kernel error mechanism
+ * @buf: Buffer to print
+ *
+ * Tokenize the string according to new-line and then print
+ *
+ * Note: Assumes lock acquired
+ */
+static void ipa_ut_print_log_buf(char *buf)
+{
+ char *token;
+
+ if (!buf) {
+ IPA_UT_ERR("Input error - no buf\n");
+ return;
+ }
+
+ for (token = strsep(&buf, "\n"); token; token = strsep(&buf, "\n"))
+ pr_err("%s\n", token);
+}
+
+/**
+ * ipa_ut_dump_fail_report_stack() - dump the report info stack via kernel err
+ *
+ * Note: Assumes lock acquired
+ */
+static void ipa_ut_dump_fail_report_stack(void)
+{
+ int i;
+
+ IPA_UT_DBG("Entry\n");
+
+ if (_IPA_UT_TEST_FAIL_REPORT_IDX == 0) {
+ IPA_UT_DBG("no report info\n");
+ return;
+ }
+
+ for (i = 0 ; i < _IPA_UT_TEST_FAIL_REPORT_IDX; i++) {
+ if (i == 0)
+ pr_err("***** FAIL INFO STACK *****:\n");
+ else
+ pr_err("Called From:\n");
+
+ pr_err("\tFILE = %s\n\tFUNC = %s()\n\tLINE = %d\n",
+ _IPA_UT_TEST_FAIL_REPORT_DATA[i].file,
+ _IPA_UT_TEST_FAIL_REPORT_DATA[i].func,
+ _IPA_UT_TEST_FAIL_REPORT_DATA[i].line);
+ pr_err("\t%s\n", _IPA_UT_TEST_FAIL_REPORT_DATA[i].info);
+ }
+}
+
+/**
+ * ipa_ut_show_suite_exec_summary() - Show tests run summary
+ * @suite: suite to print its running summary
+ *
+ * Print list of succeeded tests, failed tests and skipped tests
+ *
+ * Note: Assumes lock acquired
+ */
+static void ipa_ut_show_suite_exec_summary(const struct ipa_ut_suite *suite)
+{
+ int i;
+
+ IPA_UT_DBG("Entry\n");
+
+ ipa_assert_on(!suite);
+
+ pr_info("\n\n");
+ pr_info("\t Suite '%s' summary\n", suite->meta_data->name);
+ pr_info("===========================\n");
+ pr_info("Successful tests\n");
+ pr_info("----------------\n");
+ for (i = 0 ; i < suite->tests_cnt ; i++) {
+ if (suite->tests[i].res != IPA_UT_TEST_RES_SUCCESS)
+ continue;
+ pr_info("\t%s\n", suite->tests[i].name);
+ }
+ pr_info("\nFailed tests\n");
+ pr_info("------------\n");
+ for (i = 0 ; i < suite->tests_cnt ; i++) {
+ if (suite->tests[i].res != IPA_UT_TEST_RES_FAIL)
+ continue;
+ pr_info("\t%s\n", suite->tests[i].name);
+ }
+ pr_info("\nSkipped tests\n");
+ pr_info("-------------\n");
+ for (i = 0 ; i < suite->tests_cnt ; i++) {
+ if (suite->tests[i].res != IPA_UT_TEST_RES_SKIP)
+ continue;
+ pr_info("\t%s\n", suite->tests[i].name);
+ }
+ pr_info("\n");
+}
+
+/**
+ * ipa_ut_dbgfs_meta_test_write() - Debugfs write func for a for a meta test
+ * @params: write fops
+ *
+ * Used to run all/regression tests in a suite
+ * Create log buffer that the test can use to store ongoing logs
+ * IPA clocks need to be voted.
+ * Run setup() once before running the tests and teardown() once after
+ * If no such call-backs then ignore it; if failed then fail the suite
+ * Print tests progress during running
+ * Test log and fail report will be showed only if the test failed.
+ * Finally show Summary of the suite tests running
+ *
+ * Note: If test supported IPA H/W version mismatch, skip it
+ * If a test lack run function, skip it
+ * If test doesn't belong to regression and it is regression run, skip it
+ * Note: Running mode: Do not stop running on failure
+ *
+ * Return: Negative in failure, given characters amount in success
+ */
+static ssize_t ipa_ut_dbgfs_meta_test_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct ipa_ut_suite *suite;
+ int i;
+ enum ipa_hw_type ipa_ver;
+ int rc = 0;
+ long meta_type;
+ bool tst_fail = false;
+
+ IPA_UT_DBG("Entry\n");
+
+ mutex_lock(&ipa_ut_ctx->lock);
+ suite = file->f_inode->i_private;
+ ipa_assert_on(!suite);
+ meta_type = (long)(file->private_data);
+ IPA_UT_DBG("Meta test type %ld\n", meta_type);
+
+ _IPA_UT_TEST_LOG_BUF_NAME = kzalloc(_IPA_UT_TEST_LOG_BUF_SIZE,
+ GFP_KERNEL);
+ if (!_IPA_UT_TEST_LOG_BUF_NAME) {
+ IPA_UT_ERR("failed to allocate %d bytes\n",
+ _IPA_UT_TEST_LOG_BUF_SIZE);
+ rc = -ENOMEM;
+ goto unlock_mutex;
+ }
+
+ if (!suite->tests_cnt || !suite->tests) {
+ pr_info("No tests for suite '%s'\n", suite->meta_data->name);
+ goto free_mem;
+ }
+
+ ipa_ver = ipa_get_hw_type();
+
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("IPA_UT");
+
+ if (suite->meta_data->setup) {
+ pr_info("*** Suite '%s': Run setup ***\n",
+ suite->meta_data->name);
+ rc = suite->meta_data->setup(&suite->meta_data->priv);
+ if (rc) {
+ IPA_UT_ERR("Setup failed for suite %s\n",
+ suite->meta_data->name);
+ rc = -EFAULT;
+ goto release_clock;
+ }
+ } else {
+ pr_info("*** Suite '%s': No Setup ***\n",
+ suite->meta_data->name);
+ }
+
+ pr_info("*** Suite '%s': Run %s tests ***\n\n",
+ suite->meta_data->name,
+ meta_type == IPA_UT_META_TEST_REGRESSION ? "regression" : "all"
+ );
+ for (i = 0 ; i < suite->tests_cnt ; i++) {
+ if (meta_type == IPA_UT_META_TEST_REGRESSION &&
+ !suite->tests[i].run_in_regression) {
+ pr_info(
+ "*** Test '%s': Skip - Not in regression ***\n\n"
+ , suite->tests[i].name);
+ suite->tests[i].res = IPA_UT_TEST_RES_SKIP;
+ continue;
+ }
+ if (suite->tests[i].min_ipa_hw_ver > ipa_ver ||
+ suite->tests[i].max_ipa_hw_ver < ipa_ver) {
+ pr_info(
+ "*** Test '%s': Skip - IPA VER mismatch ***\n\n"
+ , suite->tests[i].name);
+ suite->tests[i].res = IPA_UT_TEST_RES_SKIP;
+ continue;
+ }
+ if (!suite->tests[i].run) {
+ pr_info(
+ "*** Test '%s': Skip - No Run function ***\n\n"
+ , suite->tests[i].name);
+ suite->tests[i].res = IPA_UT_TEST_RES_SKIP;
+ continue;
+ }
+
+ _IPA_UT_TEST_LOG_BUF_NAME[0] = '\0';
+ _IPA_UT_TEST_FAIL_REPORT_IDX = 0;
+ pr_info("*** Test '%s': Running... ***\n",
+ suite->tests[i].name);
+ rc = suite->tests[i].run(suite->meta_data->priv);
+ if (rc) {
+ tst_fail = true;
+ suite->tests[i].res = IPA_UT_TEST_RES_FAIL;
+ ipa_ut_print_log_buf(_IPA_UT_TEST_LOG_BUF_NAME);
+ } else {
+ suite->tests[i].res = IPA_UT_TEST_RES_SUCCESS;
+ }
+
+ pr_info(">>>>>>**** TEST '%s': %s ****<<<<<<\n",
+ suite->tests[i].name, tst_fail ? "FAIL" : "SUCCESS");
+
+ if (tst_fail)
+ ipa_ut_dump_fail_report_stack();
+
+ pr_info("\n");
+ }
+
+ if (suite->meta_data->teardown) {
+ pr_info("*** Suite '%s': Run Teardown ***\n",
+ suite->meta_data->name);
+ rc = suite->meta_data->teardown(suite->meta_data->priv);
+ if (rc) {
+ IPA_UT_ERR("Teardown failed for suite %s\n",
+ suite->meta_data->name);
+ rc = -EFAULT;
+ goto release_clock;
+ }
+ } else {
+ pr_info("*** Suite '%s': No Teardown ***\n",
+ suite->meta_data->name);
+ }
+
+ ipa_ut_show_suite_exec_summary(suite);
+
+release_clock:
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IPA_UT");
+free_mem:
+ kfree(_IPA_UT_TEST_LOG_BUF_NAME);
+ _IPA_UT_TEST_LOG_BUF_NAME = NULL;
+unlock_mutex:
+ mutex_unlock(&ipa_ut_ctx->lock);
+ return ((!rc && !tst_fail) ? count : -EFAULT);
+}
+
+/**
+ * ipa_ut_dbgfs_meta_test_read() - Debugfs read func for a meta test
+ * @params: read fops
+ *
+ * Meta test, is a test that describes other test or bunch of tests.
+ * for example, the 'all' test. Running this test will run all
+ * the tests in the suite.
+ *
+ * Show information regard the suite. E.g. name and description
+ * If regression - List the regression tests names
+ *
+ * Return: Amount of characters written to user space buffer
+ */
+static ssize_t ipa_ut_dbgfs_meta_test_read(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ char *buf;
+ struct ipa_ut_suite *suite;
+ int nbytes;
+ ssize_t cnt;
+ long meta_type;
+ int i;
+
+ IPA_UT_DBG("Entry\n");
+
+ mutex_lock(&ipa_ut_ctx->lock);
+ suite = file->f_inode->i_private;
+ ipa_assert_on(!suite);
+ meta_type = (long)(file->private_data);
+ IPA_UT_DBG("Meta test type %ld\n", meta_type);
+
+ buf = kmalloc(IPA_UT_DEBUG_READ_BUF_SIZE, GFP_KERNEL);
+ if (!buf) {
+ IPA_UT_ERR("failed to allocate %d bytes\n",
+ IPA_UT_DEBUG_READ_BUF_SIZE);
+ cnt = 0;
+ goto unlock_mutex;
+ }
+
+ if (meta_type == IPA_UT_META_TEST_ALL) {
+ nbytes = scnprintf(buf, IPA_UT_DEBUG_READ_BUF_SIZE,
+ "\tMeta-test running all the tests in the suite:\n"
+ "\tSuite Name: %s\n"
+ "\tDescription: %s\n"
+ "\tNumber of test in suite: %zu\n",
+ suite->meta_data->name,
+ suite->meta_data->desc ?: "",
+ suite->tests_cnt);
+ } else {
+ nbytes = scnprintf(buf, IPA_UT_DEBUG_READ_BUF_SIZE,
+ "\tMeta-test running regression tests in the suite:\n"
+ "\tSuite Name: %s\n"
+ "\tDescription: %s\n"
+ "\tRegression tests:\n",
+ suite->meta_data->name,
+ suite->meta_data->desc ?: "");
+ for (i = 0 ; i < suite->tests_cnt ; i++) {
+ if (!suite->tests[i].run_in_regression)
+ continue;
+ nbytes += scnprintf(buf + nbytes,
+ IPA_UT_DEBUG_READ_BUF_SIZE - nbytes,
+ "\t\t%s\n", suite->tests[i].name);
+ }
+ }
+
+ cnt = simple_read_from_buffer(ubuf, count, ppos, buf, nbytes);
+ kfree(buf);
+
+unlock_mutex:
+ mutex_unlock(&ipa_ut_ctx->lock);
+ return cnt;
+}
+
+/**
+ * ipa_ut_dbgfs_regression_test_open() - Debugfs open function for
+ * 'regression' tests
+ * @params: open fops
+ *
+ * Mark "Regression tests" for meta-tests later operations.
+ *
+ * Return: Zero (always success).
+ */
+static int ipa_ut_dbgfs_regression_test_open(struct inode *inode,
+ struct file *filp)
+{
+ IPA_UT_DBG("Entry\n");
+
+ filp->private_data = (void *)(IPA_UT_META_TEST_REGRESSION);
+
+ return 0;
+}
+
+/**
+ * ipa_ut_dbgfs_all_test_open() - Debugfs open function for 'all' tests
+ * @params: open fops
+ *
+ * Mark "All tests" for meta-tests later operations.
+ *
+ * Return: Zero (always success).
+ */
+static int ipa_ut_dbgfs_all_test_open(struct inode *inode,
+ struct file *filp)
+{
+ IPA_UT_DBG("Entry\n");
+
+ filp->private_data = (void *)(IPA_UT_META_TEST_ALL);
+
+ return 0;
+}
+
+/**
+ * ipa_ut_dbgfs_test_write() - Debugfs write function for a test
+ * @params: write fops
+ *
+ * Used to run a test.
+ * Create log buffer that the test can use to store ongoing logs
+ * IPA clocks need to be voted.
+ * Run setup() before the test and teardown() after the tests.
+ * If no such call-backs then ignore it; if failed then fail the test
+ * If all succeeds, no printing to user
+ * If failed, test logs and failure report will be printed to user
+ *
+ * Note: Test must has run function and it's supported IPA H/W version
+ * must be matching. Otherwise test will fail.
+ *
+ * Return: Negative in failure, given characters amount in success
+ */
+static ssize_t ipa_ut_dbgfs_test_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct ipa_ut_test *test;
+ struct ipa_ut_suite *suite;
+ bool tst_fail = false;
+ int rc = 0;
+ enum ipa_hw_type ipa_ver;
+
+ IPA_UT_DBG("Entry\n");
+
+ mutex_lock(&ipa_ut_ctx->lock);
+ test = file->f_inode->i_private;
+ ipa_assert_on(!test);
+
+ _IPA_UT_TEST_LOG_BUF_NAME = kzalloc(_IPA_UT_TEST_LOG_BUF_SIZE,
+ GFP_KERNEL);
+ if (!_IPA_UT_TEST_LOG_BUF_NAME) {
+ IPA_UT_ERR("failed to allocate %d bytes\n",
+ _IPA_UT_TEST_LOG_BUF_SIZE);
+ rc = -ENOMEM;
+ goto unlock_mutex;
+ }
+
+ if (!test->run) {
+ IPA_UT_ERR("*** Test %s - No run func ***\n",
+ test->name);
+ rc = -EFAULT;
+ goto free_mem;
+ }
+
+ ipa_ver = ipa_get_hw_type();
+ if (test->min_ipa_hw_ver > ipa_ver ||
+ test->max_ipa_hw_ver < ipa_ver) {
+ IPA_UT_ERR("Cannot run test %s on IPA HW Ver %s\n",
+ test->name, ipa_get_version_string(ipa_ver));
+ rc = -EFAULT;
+ goto free_mem;
+ }
+
+ suite = test->suite;
+ if (!suite || !suite->meta_data) {
+ IPA_UT_ERR("test %s with invalid suite\n", test->name);
+ rc = -EINVAL;
+ goto free_mem;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("IPA_UT");
+
+ if (suite->meta_data->setup) {
+ IPA_UT_DBG("*** Suite '%s': Run setup ***\n",
+ suite->meta_data->name);
+ rc = suite->meta_data->setup(&suite->meta_data->priv);
+ if (rc) {
+ IPA_UT_ERR("Setup failed for suite %s\n",
+ suite->meta_data->name);
+ rc = -EFAULT;
+ goto release_clock;
+ }
+ } else {
+ IPA_UT_DBG("*** Suite '%s': No Setup ***\n",
+ suite->meta_data->name);
+ }
+
+ IPA_UT_DBG("*** Test '%s': Running... ***\n", test->name);
+ _IPA_UT_TEST_FAIL_REPORT_IDX = 0;
+ rc = test->run(suite->meta_data->priv);
+ if (rc)
+ tst_fail = true;
+ IPA_UT_DBG("*** Test %s - ***\n", tst_fail ? "FAIL" : "SUCCESS");
+ if (tst_fail) {
+ pr_info("=================>>>>>>>>>>>\n");
+ ipa_ut_print_log_buf(_IPA_UT_TEST_LOG_BUF_NAME);
+ pr_info("**** TEST %s FAILED ****\n", test->name);
+ ipa_ut_dump_fail_report_stack();
+ pr_info("<<<<<<<<<<<=================\n");
+ }
+
+ if (suite->meta_data->teardown) {
+ IPA_UT_DBG("*** Suite '%s': Run Teardown ***\n",
+ suite->meta_data->name);
+ rc = suite->meta_data->teardown(suite->meta_data->priv);
+ if (rc) {
+ IPA_UT_ERR("Teardown failed for suite %s\n",
+ suite->meta_data->name);
+ rc = -EFAULT;
+ goto release_clock;
+ }
+ } else {
+ IPA_UT_DBG("*** Suite '%s': No Teardown ***\n",
+ suite->meta_data->name);
+ }
+
+release_clock:
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IPA_UT");
+free_mem:
+ kfree(_IPA_UT_TEST_LOG_BUF_NAME);
+ _IPA_UT_TEST_LOG_BUF_NAME = NULL;
+unlock_mutex:
+ mutex_unlock(&ipa_ut_ctx->lock);
+ return ((!rc && !tst_fail) ? count : -EFAULT);
+}
+
+/**
+ * ipa_ut_dbgfs_test_read() - Debugfs read function for a test
+ * @params: read fops
+ *
+ * print information regard the test. E.g. name and description
+ *
+ * Return: Amount of characters written to user space buffer
+ */
+static ssize_t ipa_ut_dbgfs_test_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf;
+ struct ipa_ut_test *test;
+ int nbytes;
+ ssize_t cnt;
+
+ IPA_UT_DBG("Entry\n");
+
+ mutex_lock(&ipa_ut_ctx->lock);
+ test = file->f_inode->i_private;
+ ipa_assert_on(!test);
+
+ buf = kmalloc(IPA_UT_DEBUG_READ_BUF_SIZE, GFP_KERNEL);
+ if (!buf) {
+ IPA_UT_ERR("failed to allocate %d bytes\n",
+ IPA_UT_DEBUG_READ_BUF_SIZE);
+ cnt = 0;
+ goto unlock_mutex;
+ }
+
+ nbytes = scnprintf(buf, IPA_UT_DEBUG_READ_BUF_SIZE,
+ "\t Test Name: %s\n"
+ "\t Description: %s\n"
+ "\t Suite Name: %s\n"
+ "\t Run In Regression: %s\n"
+ "\t Supported IPA versions: [%s -> %s]\n",
+ test->name, test->desc ?: "", test->suite->meta_data->name,
+ test->run_in_regression ? "Yes" : "No",
+ ipa_get_version_string(test->min_ipa_hw_ver),
+ test->max_ipa_hw_ver == IPA_HW_MAX ? "MAX" :
+ ipa_get_version_string(test->max_ipa_hw_ver));
+
+ if (nbytes > count)
+ IPA_UT_ERR("User buf too small - return partial info\n");
+
+ cnt = simple_read_from_buffer(ubuf, count, ppos, buf, nbytes);
+ kfree(buf);
+
+unlock_mutex:
+ mutex_unlock(&ipa_ut_ctx->lock);
+ return cnt;
+}
+
+/**
+ * ipa_ut_framework_load_suites() - Load tests and expose them to user space
+ *
+ * Creates debugfs folder for each suite and then file for each test in it.
+ * Create debugfs "all" file for each suite for meta-test to run all tests.
+ *
+ * Note: Assumes lock acquired
+ *
+ * Return: Zero in success, otherwise in failure
+ */
+int ipa_ut_framework_load_suites(void)
+{
+ int suite_idx;
+ int tst_idx;
+ struct ipa_ut_suite *suite;
+ struct dentry *s_dent;
+ struct dentry *f_dent;
+
+ IPA_UT_DBG("Entry\n");
+
+ for (suite_idx = IPA_UT_SUITE_FIRST_INDEX;
+ suite_idx < IPA_UT_SUITES_COUNT; suite_idx++) {
+ suite = IPA_UT_GET_SUITE(suite_idx);
+
+ if (!suite->meta_data->name) {
+ IPA_UT_ERR("No suite name\n");
+ return -EFAULT;
+ }
+
+ s_dent = debugfs_create_dir(suite->meta_data->name,
+ ipa_ut_ctx->test_dbgfs_suites);
+
+ if (!s_dent || IS_ERR(s_dent)) {
+ IPA_UT_ERR("fail create dbg entry - suite %s\n",
+ suite->meta_data->name);
+ return -EFAULT;
+ }
+
+ for (tst_idx = 0; tst_idx < suite->tests_cnt ; tst_idx++) {
+ if (!suite->tests[tst_idx].name) {
+ IPA_UT_ERR("No test name on suite %s\n",
+ suite->meta_data->name);
+ return -EFAULT;
+ }
+ f_dent = debugfs_create_file(
+ suite->tests[tst_idx].name,
+ IPA_UT_READ_WRITE_DBG_FILE_MODE, s_dent,
+ &suite->tests[tst_idx],
+ &ipa_ut_dbgfs_test_fops);
+ if (!f_dent || IS_ERR(f_dent)) {
+ IPA_UT_ERR("fail create dbg entry - tst %s\n",
+ suite->tests[tst_idx].name);
+ return -EFAULT;
+ }
+ }
+
+ /* entry for meta-test all to run all tests in suites */
+ f_dent = debugfs_create_file(_IPA_UT_RUN_ALL_TEST_NAME,
+ IPA_UT_READ_WRITE_DBG_FILE_MODE, s_dent,
+ suite, &ipa_ut_dbgfs_all_test_fops);
+ if (!f_dent || IS_ERR(f_dent)) {
+ IPA_UT_ERR("fail to create dbg entry - %s\n",
+ _IPA_UT_RUN_ALL_TEST_NAME);
+ return -EFAULT;
+ }
+
+ /*
+ * entry for meta-test regression to run all regression
+ * tests in suites
+ */
+ f_dent = debugfs_create_file(_IPA_UT_RUN_REGRESSION_TEST_NAME,
+ IPA_UT_READ_WRITE_DBG_FILE_MODE, s_dent,
+ suite, &ipa_ut_dbgfs_regression_test_fops);
+ if (!f_dent || IS_ERR(f_dent)) {
+ IPA_UT_ERR("fail to create dbg entry - %s\n",
+ _IPA_UT_RUN_ALL_TEST_NAME);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ipa_ut_framework_enable() - Enable the framework
+ *
+ * Creates the tests and suites debugfs entries and load them.
+ * This will expose the tests to user space.
+ *
+ * Return: Zero in success, otherwise in failure
+ */
+static int ipa_ut_framework_enable(void)
+{
+ int ret = 0;
+
+ IPA_UT_DBG("Entry\n");
+
+ mutex_lock(&ipa_ut_ctx->lock);
+
+ if (ipa_ut_ctx->enabled) {
+ IPA_UT_ERR("Already enabled\n");
+ goto unlock_mutex;
+ }
+
+ ipa_ut_ctx->test_dbgfs_suites = debugfs_create_dir("suites",
+ ipa_ut_ctx->test_dbgfs_root);
+ if (!ipa_ut_ctx->test_dbgfs_suites ||
+ IS_ERR(ipa_ut_ctx->test_dbgfs_suites)) {
+ IPA_UT_ERR("failed to create suites debugfs dir\n");
+ ret = -EFAULT;
+ goto unlock_mutex;
+ }
+
+ if (ipa_ut_framework_load_suites()) {
+ IPA_UT_ERR("failed to load the suites into debugfs\n");
+ ret = -EFAULT;
+ goto fail_clean_suites_dbgfs;
+ }
+
+ ipa_ut_ctx->enabled = true;
+ goto unlock_mutex;
+
+fail_clean_suites_dbgfs:
+ debugfs_remove_recursive(ipa_ut_ctx->test_dbgfs_suites);
+unlock_mutex:
+ mutex_unlock(&ipa_ut_ctx->lock);
+ return ret;
+}
+
+/**
+ * ipa_ut_framework_disable() - Disable the framework
+ *
+ * Remove the tests and suites debugfs exposure.
+ *
+ * Return: Zero in success, otherwise in failure
+ */
+static int ipa_ut_framework_disable(void)
+{
+ int ret = 0;
+
+ IPA_UT_DBG("Entry\n");
+
+ mutex_lock(&ipa_ut_ctx->lock);
+
+ if (!ipa_ut_ctx->enabled) {
+ IPA_UT_ERR("Already disabled\n");
+ goto unlock_mutex;
+ }
+
+ debugfs_remove_recursive(ipa_ut_ctx->test_dbgfs_suites);
+
+ ipa_ut_ctx->enabled = false;
+
+unlock_mutex:
+ mutex_unlock(&ipa_ut_ctx->lock);
+ return ret;
+}
+
+/**
+ * ipa_ut_dbgfs_enable_write() - Debugfs enable file write fops
+ * @params: write fops
+ *
+ * Input should be number. If 0, then disable. Otherwise enable.
+ *
+ * Return: if failed then negative value, if succeeds, amount of given chars
+ */
+static ssize_t ipa_ut_dbgfs_enable_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ char lcl_buf[IPA_UT_DEBUG_WRITE_BUF_SIZE];
+ s8 option = 0;
+ int ret;
+
+ IPA_UT_DBG("Entry\n");
+
+ if (sizeof(lcl_buf) < count + 1) {
+ IPA_UT_ERR("No enough space\n");
+ return -E2BIG;
+ }
+
+ if (copy_from_user(lcl_buf, buf, count)) {
+ IPA_UT_ERR("fail to copy buf from user space\n");
+ return -EFAULT;
+ }
+
+ lcl_buf[count] = '\0';
+ if (kstrtos8(lcl_buf, 0, &option)) {
+ IPA_UT_ERR("fail convert str to s8\n");
+ return -EINVAL;
+ }
+
+ if (option == 0)
+ ret = ipa_ut_framework_disable();
+ else
+ ret = ipa_ut_framework_enable();
+
+ return ret ?: count;
+}
+
+/**
+ * ipa_ut_dbgfs_enable_read() - Debugfs enable file read fops
+ * @params: read fops
+ *
+ * To show to user space if the I/S is enabled or disabled.
+ *
+ * Return: amount of characters returned to user space
+ */
+static ssize_t ipa_ut_dbgfs_enable_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ const char *status;
+
+ IPA_UT_DBG("Entry\n");
+
+ mutex_lock(&ipa_ut_ctx->lock);
+ status = ipa_ut_ctx->enabled ?
+ "Enabled - Write 0 to disable\n" :
+ "Disabled - Write 1 to enable\n";
+ mutex_unlock(&ipa_ut_ctx->lock);
+ return simple_read_from_buffer(ubuf, count, ppos,
+ status, strlen(status));
+}
+
+/**
+ * ipa_ut_framework_init() - Unit-tests framework initialization
+ *
+ * Complete tests initialization: Each tests needs to point to it's
+ * corresponing suite.
+ * Creates the framework debugfs root directory under IPA directory.
+ * Create enable debugfs file - to enable/disable the framework.
+ *
+ * Return: Zero in success, otherwise in failure
+ */
+static int ipa_ut_framework_init(void)
+{
+ struct dentry *dfile_enable;
+ int ret;
+ int suite_idx;
+ int test_idx;
+ struct ipa_ut_suite *suite;
+
+ IPA_UT_DBG("Entry\n");
+
+ ipa_assert_on(!ipa_ut_ctx);
+
+ ipa_ut_ctx->ipa_dbgfs_root = ipa_debugfs_get_root();
+ if (!ipa_ut_ctx->ipa_dbgfs_root) {
+ IPA_UT_ERR("No IPA debugfs root entry\n");
+ return -EFAULT;
+ }
+
+ mutex_lock(&ipa_ut_ctx->lock);
+
+ /* tests needs to point to their corresponding suites structures */
+ for (suite_idx = IPA_UT_SUITE_FIRST_INDEX;
+ suite_idx < IPA_UT_SUITES_COUNT; suite_idx++) {
+ suite = IPA_UT_GET_SUITE(suite_idx);
+ ipa_assert_on(!suite);
+ if (!suite->tests) {
+ IPA_UT_DBG("No tests for suite %s\n",
+ suite->meta_data->name);
+ continue;
+ }
+ for (test_idx = 0; test_idx < suite->tests_cnt; test_idx++) {
+ suite->tests[test_idx].suite = suite;
+ IPA_UT_DBG("Updating test %s info for suite %s\n",
+ suite->tests[test_idx].name,
+ suite->meta_data->name);
+ }
+ }
+
+ ipa_ut_ctx->test_dbgfs_root = debugfs_create_dir("test",
+ ipa_ut_ctx->ipa_dbgfs_root);
+ if (!ipa_ut_ctx->test_dbgfs_root ||
+ IS_ERR(ipa_ut_ctx->test_dbgfs_root)) {
+ IPA_UT_ERR("failed to create test debugfs dir\n");
+ ret = -EFAULT;
+ goto unlock_mutex;
+ }
+
+ dfile_enable = debugfs_create_file("enable",
+ IPA_UT_READ_WRITE_DBG_FILE_MODE,
+ ipa_ut_ctx->test_dbgfs_root, 0, &ipa_ut_dbgfs_enable_fops);
+ if (!dfile_enable || IS_ERR(dfile_enable)) {
+ IPA_UT_ERR("failed to create enable debugfs file\n");
+ ret = -EFAULT;
+ goto fail_clean_dbgfs;
+ }
+
+ _IPA_UT_TEST_FAIL_REPORT_IDX = 0;
+ ipa_ut_ctx->inited = true;
+ IPA_UT_DBG("Done\n");
+ ret = 0;
+ goto unlock_mutex;
+
+fail_clean_dbgfs:
+ debugfs_remove_recursive(ipa_ut_ctx->test_dbgfs_root);
+unlock_mutex:
+ mutex_unlock(&ipa_ut_ctx->lock);
+ return ret;
+}
+
+/**
+ * ipa_ut_framework_destroy() - Destroy the UT framework info
+ *
+ * Disable it if enabled.
+ * Remove the debugfs entries using the root entry
+ */
+static void ipa_ut_framework_destroy(void)
+{
+ IPA_UT_DBG("Entry\n");
+
+ mutex_lock(&ipa_ut_ctx->lock);
+ if (ipa_ut_ctx->enabled)
+ ipa_ut_framework_disable();
+ if (ipa_ut_ctx->inited)
+ debugfs_remove_recursive(ipa_ut_ctx->test_dbgfs_root);
+ mutex_unlock(&ipa_ut_ctx->lock);
+}
+
+/**
+ * ipa_ut_ipa_ready_cb() - IPA ready CB
+ *
+ * Once IPA is ready starting initializing the unit-test framework
+ */
+static void ipa_ut_ipa_ready_cb(void *user_data)
+{
+ IPA_UT_DBG("Entry\n");
+ (void)ipa_ut_framework_init();
+}
+
+/**
+ * ipa_ut_module_init() - Module init
+ *
+ * Create the framework context, wait for IPA driver readiness
+ * and Initialize it.
+ * If IPA driver already ready, continue initialization immediately.
+ * if not, wait for IPA ready notification by IPA driver context
+ */
+static int __init ipa_ut_module_init(void)
+{
+ int ret;
+
+ IPA_UT_INFO("Loading IPA test module...\n");
+
+ ipa_ut_ctx = kzalloc(sizeof(struct ipa_ut_context), GFP_KERNEL);
+ if (!ipa_ut_ctx) {
+ IPA_UT_ERR("Failed to allocate ctx\n");
+ return -ENOMEM;
+ }
+ mutex_init(&ipa_ut_ctx->lock);
+
+ if (!ipa_is_ready()) {
+ IPA_UT_DBG("IPA driver not ready, registering callback\n");
+ ret = ipa_register_ipa_ready_cb(ipa_ut_ipa_ready_cb, NULL);
+
+ /*
+ * If we received -EEXIST, IPA has initialized. So we need
+ * to continue the initing process.
+ */
+ if (ret != -EEXIST) {
+ if (ret) {
+ IPA_UT_ERR("IPA CB reg failed - %d\n", ret);
+ kfree(ipa_ut_ctx);
+ ipa_ut_ctx = NULL;
+ }
+ return ret;
+ }
+ }
+
+ ret = ipa_ut_framework_init();
+ if (ret) {
+ IPA_UT_ERR("framework init failed\n");
+ kfree(ipa_ut_ctx);
+ ipa_ut_ctx = NULL;
+ }
+ return ret;
+}
+
+/**
+ * ipa_ut_module_exit() - Module exit function
+ *
+ * Destroys the Framework and removes its context
+ */
+static void ipa_ut_module_exit(void)
+{
+ IPA_UT_DBG("Entry\n");
+
+ if (!ipa_ut_ctx)
+ return;
+
+ ipa_ut_framework_destroy();
+ kfree(ipa_ut_ctx);
+ ipa_ut_ctx = NULL;
+}
+
+module_init(ipa_ut_module_init);
+module_exit(ipa_ut_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA Unit Test module");
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_framework.h b/drivers/platform/msm/ipa/test/ipa_ut_framework.h
new file mode 100644
index 0000000..e3884d6
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/ipa_ut_framework.h
@@ -0,0 +1,240 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_UT_FRAMEWORK_H_
+#define _IPA_UT_FRAMEWORK_H_
+
+#include <linux/kernel.h>
+#include "../ipa_common_i.h"
+#include "ipa_ut_i.h"
+
+#define IPA_UT_DRV_NAME "ipa_ut"
+
+#define IPA_UT_DBG(fmt, args...) \
+ do { \
+ pr_debug(IPA_UT_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPA_UT_DBG_LOW(fmt, args...) \
+ do { \
+ pr_debug(IPA_UT_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPA_UT_ERR(fmt, args...) \
+ do { \
+ pr_err(IPA_UT_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPA_UT_INFO(fmt, args...) \
+ do { \
+ pr_info(IPA_UT_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+/**
+ * struct ipa_ut_tst_fail_report - Information on test failure
+ * @valid: When a test posts a report, valid will be marked true
+ * @file: File name containing the failed test.
+ * @line: Number of line in the file where the test failed.
+ * @func: Function where the test failed in.
+ * @info: Information about the failure.
+ */
+struct ipa_ut_tst_fail_report {
+ bool valid;
+ const char *file;
+ int line;
+ const char *func;
+ const char *info;
+};
+
+/**
+ * Report on test failure
+ * To be used by tests to report a point were a test fail.
+ * Failures are saved in a stack manner.
+ * Dumping the failure info will dump the fail reports
+ * from all the function in the calling stack
+ */
+#define IPA_UT_TEST_FAIL_REPORT(__info) \
+ do { \
+ extern struct ipa_ut_tst_fail_report \
+ _IPA_UT_TEST_FAIL_REPORT_DATA \
+ [_IPA_UT_TEST_FAIL_REPORT_SIZE]; \
+ extern u32 _IPA_UT_TEST_FAIL_REPORT_IDX; \
+ struct ipa_ut_tst_fail_report *entry; \
+ if (_IPA_UT_TEST_FAIL_REPORT_IDX >= \
+ _IPA_UT_TEST_FAIL_REPORT_SIZE) \
+ break; \
+ entry = &(_IPA_UT_TEST_FAIL_REPORT_DATA \
+ [_IPA_UT_TEST_FAIL_REPORT_IDX]); \
+ entry->file = __FILENAME__; \
+ entry->line = __LINE__; \
+ entry->func = __func__; \
+ if (__info) \
+ entry->info = __info; \
+ else \
+ entry->info = ""; \
+ _IPA_UT_TEST_FAIL_REPORT_IDX++; \
+ } while (0)
+
+/**
+ * To be used by tests to log progress and ongoing information
+ * Logs are not printed to user, but saved to a buffer.
+ * I/S shall print the buffer at different occasions - e.g. in test failure
+ */
+#define IPA_UT_LOG(fmt, args...) \
+ do { \
+ extern char *_IPA_UT_TEST_LOG_BUF_NAME; \
+ char __buf[512]; \
+ IPA_UT_DBG(fmt, ## args); \
+ if (!_IPA_UT_TEST_LOG_BUF_NAME) {\
+ pr_err(IPA_UT_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ break; \
+ } \
+ scnprintf(__buf, sizeof(__buf), \
+ " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ strlcat(_IPA_UT_TEST_LOG_BUF_NAME, __buf, \
+ _IPA_UT_TEST_LOG_BUF_SIZE); \
+ } while (0)
+
+/**
+ * struct ipa_ut_suite_meta - Suite meta-data
+ * @name: Suite unique name
+ * @desc: Suite description
+ * @setup: Setup Call-back of the suite
+ * @teardown: Teardown Call-back of the suite
+ * @priv: Private pointer of the suite
+ *
+ * Setup/Teardown will be called once for the suite when running a tests of it.
+ * priv field is shared between the Setup/Teardown and the tests
+ */
+struct ipa_ut_suite_meta {
+ char *name;
+ char *desc;
+ int (*setup)(void **ppriv);
+ int (*teardown)(void *priv);
+ void *priv;
+};
+
+/* Test suite data structure declaration */
+struct ipa_ut_suite;
+
+/**
+ * struct ipa_ut_test - Test information
+ * @name: Test name
+ * @desc: Test description
+ * @run: Test execution call-back
+ * @run_in_regression: To run this test as part of regression?
+ * @min_ipa_hw_ver: Minimum IPA H/W version where the test is supported?
+ * @max_ipa_hw_ver: Maximum IPA H/W version where the test is supported?
+ * @suite: Pointer to suite containing this test
+ * @res: Test execution result. Will be updated after running a test as part
+ * of suite tests run
+ */
+struct ipa_ut_test {
+ char *name;
+ char *desc;
+ int (*run)(void *priv);
+ bool run_in_regression;
+ int min_ipa_hw_ver;
+ int max_ipa_hw_ver;
+ struct ipa_ut_suite *suite;
+ enum ipa_ut_test_result res;
+};
+
+/**
+ * struct ipa_ut_suite - Suite information
+ * @meta_data: Pointer to meta-data structure of the suite
+ * @tests: Pointer to array of tests belongs to the suite
+ * @tests_cnt: Number of tests
+ */
+struct ipa_ut_suite {
+ struct ipa_ut_suite_meta *meta_data;
+ struct ipa_ut_test *tests;
+ size_t tests_cnt;
+};
+
+
+/**
+ * Add a test to a suite.
+ * Will add entry to tests array and update its info with
+ * the given info, thus adding new test.
+ */
+#define IPA_UT_ADD_TEST(__name, __desc, __run, __run_in_regression, \
+ __min_ipa_hw_ver, __max_ipa__hw_ver) \
+ { \
+ .name = #__name, \
+ .desc = __desc, \
+ .run = __run, \
+ .run_in_regression = __run_in_regression, \
+ .min_ipa_hw_ver = __min_ipa_hw_ver, \
+ .max_ipa_hw_ver = __max_ipa__hw_ver, \
+ .suite = NULL, \
+ }
+
+/**
+ * Declare a suite
+ * Every suite need to be declared before it is registered.
+ */
+#define IPA_UT_DECLARE_SUITE(__name) \
+ extern struct ipa_ut_suite _IPA_UT_SUITE_DATA(__name)
+
+/**
+ * Register a suite
+ * Registering a suite is mandatory so it will be considered.
+ */
+#define IPA_UT_REGISTER_SUITE(__name) \
+ (&_IPA_UT_SUITE_DATA(__name))
+
+/**
+ * Start/End suite definition
+ * Will create the suite global structures and adds adding tests to it.
+ * Use IPA_UT_ADD_TEST() with these macros to add tests when defining
+ * a suite
+ */
+#define IPA_UT_DEFINE_SUITE_START(__name, __desc, __setup, __teardown) \
+ static struct ipa_ut_suite_meta _IPA_UT_SUITE_META_DATA(__name) = \
+ { \
+ .name = #__name, \
+ .desc = __desc, \
+ .setup = __setup, \
+ .teardown = __teardown, \
+ }; \
+ static struct ipa_ut_test _IPA_UT_SUITE_TESTS(__name)[] =
+#define IPA_UT_DEFINE_SUITE_END(__name) \
+ ; \
+ struct ipa_ut_suite _IPA_UT_SUITE_DATA(__name) = \
+ { \
+ .meta_data = &_IPA_UT_SUITE_META_DATA(__name), \
+ .tests = _IPA_UT_SUITE_TESTS(__name), \
+ .tests_cnt = ARRAY_SIZE(_IPA_UT_SUITE_TESTS(__name)), \
+ }
+
+#endif /* _IPA_UT_FRAMEWORK_H_ */
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_i.h b/drivers/platform/msm/ipa/test/ipa_ut_i.h
new file mode 100644
index 0000000..973debf
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/ipa_ut_i.h
@@ -0,0 +1,88 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_UT_I_H_
+#define _IPA_UT_I_H_
+
+/* Suite data global structure name */
+#define _IPA_UT_SUITE_DATA(__name) ipa_ut_ ##__name ##_data
+
+/* Suite meta-data global structure name */
+#define _IPA_UT_SUITE_META_DATA(__name) ipa_ut_ ##__name ##_meta_data
+
+/* Suite global array of tests */
+#define _IPA_UT_SUITE_TESTS(__name) ipa_ut_ ##__name ##_tests
+
+/* Global array of all suites */
+#define _IPA_UT_ALL_SUITES ipa_ut_all_suites_data
+
+/* Meta-test "all" name - test to run all tests in given suite */
+#define _IPA_UT_RUN_ALL_TEST_NAME "all"
+
+/**
+ * Meta-test "regression" name -
+ * test to run all regression tests in given suite
+ */
+#define _IPA_UT_RUN_REGRESSION_TEST_NAME "regression"
+
+
+/* Test Log buffer name and size */
+#define _IPA_UT_TEST_LOG_BUF_NAME ipa_ut_tst_log_buf
+#define _IPA_UT_TEST_LOG_BUF_SIZE 8192
+
+/* Global structure for test fail execution result information */
+#define _IPA_UT_TEST_FAIL_REPORT_DATA ipa_ut_tst_fail_report_data
+#define _IPA_UT_TEST_FAIL_REPORT_SIZE 5
+#define _IPA_UT_TEST_FAIL_REPORT_IDX ipa_ut_tst_fail_report_data_index
+
+/* Start/End definitions of the array of suites */
+#define IPA_UT_DEFINE_ALL_SUITES_START \
+ static struct ipa_ut_suite *_IPA_UT_ALL_SUITES[] =
+#define IPA_UT_DEFINE_ALL_SUITES_END
+
+/**
+ * Suites iterator - Array-like container
+ * First index, number of elements and element fetcher
+ */
+#define IPA_UT_SUITE_FIRST_INDEX 0
+#define IPA_UT_SUITES_COUNT \
+ ARRAY_SIZE(_IPA_UT_ALL_SUITES)
+#define IPA_UT_GET_SUITE(__index) \
+ _IPA_UT_ALL_SUITES[__index]
+
+/**
+ * enum ipa_ut_test_result - Test execution result
+ * @IPA_UT_TEST_RES_FAIL: Test executed and failed
+ * @IPA_UT_TEST_RES_SUCCESS: Test executed and succeeded
+ * @IPA_UT_TEST_RES_SKIP: Test was not executed.
+ *
+ * When running all tests in a suite, a specific test could
+ * be skipped and not executed. For example due to mismatch of
+ * IPA H/W version.
+ */
+enum ipa_ut_test_result {
+ IPA_UT_TEST_RES_FAIL,
+ IPA_UT_TEST_RES_SUCCESS,
+ IPA_UT_TEST_RES_SKIP,
+};
+
+/**
+ * enum ipa_ut_meta_test_type - Type of suite meta-test
+ * @IPA_UT_META_TEST_ALL: Represents all tests in suite
+ * @IPA_UT_META_TEST_REGRESSION: Represents all regression tests in suite
+ */
+enum ipa_ut_meta_test_type {
+ IPA_UT_META_TEST_ALL,
+ IPA_UT_META_TEST_REGRESSION,
+};
+
+#endif /* _IPA_UT_I_H_ */
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
new file mode 100644
index 0000000..944800f
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_UT_SUITE_LIST_H_
+#define _IPA_UT_SUITE_LIST_H_
+
+#include "ipa_ut_framework.h"
+#include "ipa_ut_i.h"
+
+/**
+ * Declare every suite here so that it will be found later below
+ * No importance for order.
+ */
+IPA_UT_DECLARE_SUITE(mhi);
+IPA_UT_DECLARE_SUITE(example);
+
+
+/**
+ * Register every suite inside the below block.
+ * Unregistered suites will be ignored
+ */
+IPA_UT_DEFINE_ALL_SUITES_START
+{
+ IPA_UT_REGISTER_SUITE(mhi),
+ IPA_UT_REGISTER_SUITE(example),
+} IPA_UT_DEFINE_ALL_SUITES_END;
+
+#endif /* _IPA_UT_SUITE_LIST_H_ */
diff --git a/include/linux/ecm_ipa.h b/include/linux/ecm_ipa.h
new file mode 100644
index 0000000..9fe9c37
--- /dev/null
+++ b/include/linux/ecm_ipa.h
@@ -0,0 +1,95 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ECM_IPA_H_
+#define _ECM_IPA_H_
+
+#include <linux/ipa.h>
+
+/*
+ * @priv: private data given upon ipa_connect
+ * @evt: event enum, should be IPA_WRITE_DONE
+ * @data: for tx path the data field is the sent socket buffer.
+ */
+typedef void (*ecm_ipa_callback)(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data);
+
+/*
+ * struct ecm_ipa_params - parameters for ecm_ipa initialization API
+ *
+ * @device_ready_notify: callback supplied by USB core driver.
+ * This callback shall be called by the Netdev once the device
+ * is ready to receive data from tethered PC.
+ * @ecm_ipa_rx_dp_notify: ecm_ipa will set this callback (out parameter).
+ * this callback shall be supplied for ipa_connect upon pipe
+ * connection (USB->IPA), once IPA driver receive data packets
+ * from USB pipe destined for Apps this callback will be called.
+ * @ecm_ipa_tx_dp_notify: ecm_ipa will set this callback (out parameter).
+ * this callback shall be supplied for ipa_connect upon pipe
+ * connection (IPA->USB), once IPA driver send packets destined
+ * for USB, IPA BAM will notify for Tx-complete.
+ * @priv: ecm_ipa will set this pointer (out parameter).
+ * This pointer will hold the network device for later interaction
+ * with ecm_ipa APIs
+ * @host_ethaddr: host Ethernet address in network order
+ * @device_ethaddr: device Ethernet address in network order
+ * @skip_ep_cfg: boolean field that determines if Apps-processor
+ * should or should not configure this end-point.
+ */
+struct ecm_ipa_params {
+ void (*device_ready_notify)(void);
+ ecm_ipa_callback ecm_ipa_rx_dp_notify;
+ ecm_ipa_callback ecm_ipa_tx_dp_notify;
+ u8 host_ethaddr[ETH_ALEN];
+ u8 device_ethaddr[ETH_ALEN];
+ void *private;
+ bool skip_ep_cfg;
+};
+
+
+#ifdef CONFIG_ECM_IPA
+
+int ecm_ipa_init(struct ecm_ipa_params *params);
+
+int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl,
+ void *priv);
+
+int ecm_ipa_disconnect(void *priv);
+
+void ecm_ipa_cleanup(void *priv);
+
+#else /* CONFIG_ECM_IPA*/
+
+static inline int ecm_ipa_init(struct ecm_ipa_params *params)
+{
+ return 0;
+}
+
+static inline int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl,
+ void *priv)
+{
+ return 0;
+}
+
+static inline int ecm_ipa_disconnect(void *priv)
+{
+ return 0;
+}
+
+static inline void ecm_ipa_cleanup(void *priv)
+{
+
+}
+#endif /* CONFIG_ECM_IPA*/
+
+#endif /* _ECM_IPA_H_ */
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
new file mode 100644
index 0000000..37d18e9
--- /dev/null
+++ b/include/linux/ipa.h
@@ -0,0 +1,2175 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_H_
+#define _IPA_H_
+
+#include <linux/msm_ipa.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/msm-sps.h>
+#include <linux/if_ether.h>
+#include "linux/msm_gsi.h"
+
+#define IPA_APPS_MAX_BW_IN_MBPS 700
+/**
+ * enum ipa_transport_type
+ * transport type: either GSI or SPS
+ */
+enum ipa_transport_type {
+ IPA_TRANSPORT_TYPE_SPS,
+ IPA_TRANSPORT_TYPE_GSI
+};
+
+/**
+ * enum ipa_nat_en_type - NAT setting type in IPA end-point
+ */
+enum ipa_nat_en_type {
+ IPA_BYPASS_NAT,
+ IPA_SRC_NAT,
+ IPA_DST_NAT,
+};
+
+/**
+ * enum ipa_mode_type - mode setting type in IPA end-point
+ * @BASIC: basic mode
+ * @ENABLE_FRAMING_HDLC: not currently supported
+ * @ENABLE_DEFRAMING_HDLC: not currently supported
+ * @DMA: all data arriving IPA will not go through IPA logic blocks, this
+ * allows IPA to work as DMA for specific pipes.
+ */
+enum ipa_mode_type {
+ IPA_BASIC,
+ IPA_ENABLE_FRAMING_HDLC,
+ IPA_ENABLE_DEFRAMING_HDLC,
+ IPA_DMA,
+};
+
+/**
+ * enum ipa_aggr_en_type - aggregation setting type in IPA
+ * end-point
+ */
+enum ipa_aggr_en_type {
+ IPA_BYPASS_AGGR,
+ IPA_ENABLE_AGGR,
+ IPA_ENABLE_DEAGGR,
+};
+
+/**
+ * enum ipa_aggr_type - type of aggregation in IPA end-point
+ */
+enum ipa_aggr_type {
+ IPA_MBIM_16 = 0,
+ IPA_HDLC = 1,
+ IPA_TLP = 2,
+ IPA_RNDIS = 3,
+ IPA_GENERIC = 4,
+ IPA_QCMAP = 6,
+};
+
+/**
+ * enum ipa_aggr_mode - global aggregation mode
+ */
+enum ipa_aggr_mode {
+ IPA_MBIM_AGGR,
+ IPA_QCNCM_AGGR,
+};
+
+/**
+ * enum ipa_dp_evt_type - type of event client callback is
+ * invoked for on data path
+ * @IPA_RECEIVE: data is struct sk_buff
+ * @IPA_WRITE_DONE: data is struct sk_buff
+ */
+enum ipa_dp_evt_type {
+ IPA_RECEIVE,
+ IPA_WRITE_DONE,
+ IPA_CLIENT_START_POLL,
+ IPA_CLIENT_COMP_NAPI,
+};
+
+/**
+ * enum hdr_total_len_or_pad_type - type vof value held by TOTAL_LEN_OR_PAD
+ * field in header configuration register.
+ * @IPA_HDR_PAD: field is used as padding length
+ * @IPA_HDR_TOTAL_LEN: field is used as total length
+ */
+enum hdr_total_len_or_pad_type {
+ IPA_HDR_PAD = 0,
+ IPA_HDR_TOTAL_LEN = 1,
+};
+
+/**
+ * struct ipa_ep_cfg_nat - NAT configuration in IPA end-point
+ * @nat_en: This defines the default NAT mode for the pipe: in case of
+ * filter miss - the default NAT mode defines the NATing operation
+ * on the packet. Valid for Input Pipes only (IPA consumer)
+ */
+struct ipa_ep_cfg_nat {
+ enum ipa_nat_en_type nat_en;
+};
+
+/**
+ * struct ipa_ep_cfg_hdr - header configuration in IPA end-point
+ *
+ * @hdr_len:Header length in bytes to be added/removed. Assuming
+ * header len is constant per endpoint. Valid for
+ * both Input and Output Pipes
+ * @hdr_ofst_metadata_valid: 0: Metadata_Ofst value is invalid, i.e., no
+ * metadata within header.
+ * 1: Metadata_Ofst value is valid, i.e., metadata
+ * within header is in offset Metadata_Ofst Valid
+ * for Input Pipes only (IPA Consumer) (for output
+ * pipes, metadata already set within the header)
+ * @hdr_ofst_metadata: Offset within header in which metadata resides
+ * Size of metadata - 4bytes
+ * Example - Stream ID/SSID/mux ID.
+ * Valid for Input Pipes only (IPA Consumer) (for output
+ * pipes, metadata already set within the header)
+ * @hdr_additional_const_len: Defines the constant length that should be added
+ * to the payload length in order for IPA to update
+ * correctly the length field within the header
+ * (valid only in case Hdr_Ofst_Pkt_Size_Valid=1)
+ * Valid for Output Pipes (IPA Producer)
+ * @hdr_ofst_pkt_size_valid: 0: Hdr_Ofst_Pkt_Size value is invalid, i.e., no
+ * length field within the inserted header
+ * 1: Hdr_Ofst_Pkt_Size value is valid, i.e., a
+ * packet length field resides within the header
+ * Valid for Output Pipes (IPA Producer)
+ * @hdr_ofst_pkt_size: Offset within header in which packet size reside. Upon
+ * Header Insertion, IPA will update this field within the
+ * header with the packet length . Assumption is that
+ * header length field size is constant and is 2Bytes
+ * Valid for Output Pipes (IPA Producer)
+ * @hdr_a5_mux: Determines whether A5 Mux header should be added to the packet.
+ * This bit is valid only when Hdr_En=01(Header Insertion)
+ * SW should set this bit for IPA-to-A5 pipes.
+ * 0: Do not insert A5 Mux Header
+ * 1: Insert A5 Mux Header
+ * Valid for Output Pipes (IPA Producer)
+ * @hdr_remove_additional: bool switch, remove more of the header
+ * based on the aggregation configuration (register
+ * HDR_LEN_INC_DEAGG_HDR)
+ * @hdr_metadata_reg_valid: bool switch, metadata from
+ * register INIT_HDR_METADATA_n is valid.
+ * (relevant only for IPA Consumer pipes)
+ */
+struct ipa_ep_cfg_hdr {
+ u32 hdr_len;
+ u32 hdr_ofst_metadata_valid;
+ u32 hdr_ofst_metadata;
+ u32 hdr_additional_const_len;
+ u32 hdr_ofst_pkt_size_valid;
+ u32 hdr_ofst_pkt_size;
+ u32 hdr_a5_mux;
+ u32 hdr_remove_additional;
+ u32 hdr_metadata_reg_valid;
+};
+
+/**
+ * struct ipa_ep_cfg_hdr_ext - extended header configuration in IPA end-point
+ * @hdr_pad_to_alignment: Pad packet to specified alignment
+ * (2^pad to alignment value), i.e. value of 3 means pad to 2^3 = 8 bytes
+ * alignment. Alignment is to 0,2 up to 32 bytes (IPAv2 does not support 64
+ * byte alignment). Valid for Output Pipes only (IPA Producer).
+ * @hdr_total_len_or_pad_offset: Offset to length field containing either
+ * total length or pad length, per hdr_total_len_or_pad config
+ * @hdr_payload_len_inc_padding: 0-IPA_ENDP_INIT_HDR_n's
+ * HDR_OFST_PKT_SIZE does
+ * not includes padding bytes size, payload_len = packet length,
+ * 1-IPA_ENDP_INIT_HDR_n's HDR_OFST_PKT_SIZE includes
+ * padding bytes size, payload_len = packet length + padding
+ * @hdr_total_len_or_pad: field is used as PAD length ot as Total length
+ * (header + packet + padding)
+ * @hdr_total_len_or_pad_valid: 0-Ignore TOTAL_LEN_OR_PAD field, 1-Process
+ * TOTAL_LEN_OR_PAD field
+ * @hdr_little_endian: 0-Big Endian, 1-Little Endian
+ */
+struct ipa_ep_cfg_hdr_ext {
+ u32 hdr_pad_to_alignment;
+ u32 hdr_total_len_or_pad_offset;
+ bool hdr_payload_len_inc_padding;
+ enum hdr_total_len_or_pad_type hdr_total_len_or_pad;
+ bool hdr_total_len_or_pad_valid;
+ bool hdr_little_endian;
+};
+
+/**
+ * struct ipa_ep_cfg_mode - mode configuration in IPA end-point
+ * @mode: Valid for Input Pipes only (IPA Consumer)
+ * @dst: This parameter specifies the output pipe to which the packets
+ * will be routed to.
+ * This parameter is valid for Mode=DMA and not valid for
+ * Mode=Basic
+ * Valid for Input Pipes only (IPA Consumer)
+ */
+struct ipa_ep_cfg_mode {
+ enum ipa_mode_type mode;
+ enum ipa_client_type dst;
+};
+
+/**
+ * struct ipa_ep_cfg_aggr - aggregation configuration in IPA end-point
+ *
+ * @aggr_en: Valid for both Input and Output Pipes
+ * @aggr: aggregation type (Valid for both Input and Output Pipes)
+ * @aggr_byte_limit: Limit of aggregated packet size in KB (<=32KB) When set
+ * to 0, there is no size limitation on the aggregation.
+ * When both, Aggr_Byte_Limit and Aggr_Time_Limit are set
+ * to 0, there is no aggregation, every packet is sent
+ * independently according to the aggregation structure
+ * Valid for Output Pipes only (IPA Producer )
+ * @aggr_time_limit: Timer to close aggregated packet (<=32ms) When set to 0,
+ * there is no time limitation on the aggregation. When
+ * both, Aggr_Byte_Limit and Aggr_Time_Limit are set to 0,
+ * there is no aggregation, every packet is sent
+ * independently according to the aggregation structure
+ * Valid for Output Pipes only (IPA Producer)
+ * @aggr_pkt_limit: Defines if EOF close aggregation or not. if set to false
+ * HW closes aggregation (sends EOT) only based on its
+ * aggregation config (byte/time limit, etc). if set to
+ * true EOF closes aggregation in addition to HW based
+ * aggregation closure. Valid for Output Pipes only (IPA
+ * Producer). EOF affects only Pipes configured for
+ * generic aggregation.
+ * @aggr_hard_byte_limit_en: If set to 1, byte-limit aggregation for this
+ * pipe will apply a hard-limit behavior which will not
+ * allow frames to be closed with more than byte-limit
+ * bytes. If set to 0, previous byte-limit behavior
+ * will apply - frames close once a packet causes the
+ * accumulated byte-count to cross the byte-limit
+ * threshold (closed frame will contain that packet).
+ * @aggr_sw_eof_active: 0: EOF does not close aggregation. HW closes aggregation
+ * (sends EOT) only based on its aggregation config
+ * (byte/time limit, etc).
+ * 1: EOF closes aggregation in addition to HW based
+ * aggregation closure. Valid for Output Pipes only (IPA
+ * Producer). EOF affects only Pipes configured for generic
+ * aggregation.
+ */
+struct ipa_ep_cfg_aggr {
+ enum ipa_aggr_en_type aggr_en;
+ enum ipa_aggr_type aggr;
+ u32 aggr_byte_limit;
+ u32 aggr_time_limit;
+ u32 aggr_pkt_limit;
+ u32 aggr_hard_byte_limit_en;
+ bool aggr_sw_eof_active;
+};
+
+/**
+ * struct ipa_ep_cfg_route - route configuration in IPA end-point
+ * @rt_tbl_hdl: Defines the default routing table index to be used in case there
+ * is no filter rule matching, valid for Input Pipes only (IPA
+ * Consumer). Clients should set this to 0 which will cause default
+ * v4 and v6 routes setup internally by IPA driver to be used for
+ * this end-point
+ */
+struct ipa_ep_cfg_route {
+ u32 rt_tbl_hdl;
+};
+
+/**
+ * struct ipa_ep_cfg_holb - head of line blocking configuration in IPA end-point
+ * @en: enable(1 => ok to drop pkt)/disable(0 => never drop pkt)
+ * @tmr_val: duration in units of 128 IPA clk clock cyles [0,511], 1 clk=1.28us
+ * IPAv2.5 support 32 bit HOLB timeout value, previous versions
+ * supports 16 bit
+ */
+struct ipa_ep_cfg_holb {
+ u16 en;
+ u32 tmr_val;
+};
+
+/**
+ * struct ipa_ep_cfg_deaggr - deaggregation configuration in IPA end-point
+ * @deaggr_hdr_len: Deaggregation Header length in bytes. Valid only for Input
+ * Pipes, which are configured for 'Generic' deaggregation.
+ * @packet_offset_valid: - 0: PACKET_OFFSET is not used, 1: PACKET_OFFSET is
+ * used.
+ * @packet_offset_location: Location of packet offset field, which specifies
+ * the offset to the packet from the start of the packet offset field.
+ * @max_packet_len: DEAGGR Max Packet Length in Bytes. A Packet with higher
+ * size wil be treated as an error. 0 - Packet Length is not Bound,
+ * IPA should not check for a Max Packet Length.
+ */
+struct ipa_ep_cfg_deaggr {
+ u32 deaggr_hdr_len;
+ bool packet_offset_valid;
+ u32 packet_offset_location;
+ u32 max_packet_len;
+};
+
+/**
+ * enum ipa_cs_offload - checksum offload setting
+ */
+enum ipa_cs_offload {
+ IPA_DISABLE_CS_OFFLOAD,
+ IPA_ENABLE_CS_OFFLOAD_UL,
+ IPA_ENABLE_CS_OFFLOAD_DL,
+ IPA_CS_RSVD
+};
+
+/**
+ * struct ipa_ep_cfg_cfg - IPA ENDP_INIT Configuration register
+ * @frag_offload_en: - 0 - IP packet fragment handling is disabled. IP packet
+ * fragments should be sent to SW. SW is responsible for
+ * configuring filter rules, and IP packet filter exception should be
+ * used to send all fragments to SW. 1 - IP packet fragment
+ * handling is enabled. IPA checks for fragments and uses frag
+ * rules table for processing fragments. Valid only for Input Pipes
+ * (IPA Consumer)
+ * @cs_offload_en: Checksum offload enable: 00: Disable checksum offload, 01:
+ * Enable checksum calculation offload (UL) - For output pipe
+ * (IPA producer) specifies that checksum trailer is to be added.
+ * For input pipe (IPA consumer) specifies presence of checksum
+ * header and IPA checksum calculation accordingly. 10: Enable
+ * checksum calculation offload (DL) - For output pipe (IPA
+ * producer) specifies that checksum trailer is to be added. For
+ * input pipe (IPA consumer) specifies IPA checksum calculation.
+ * 11: Reserved
+ * @cs_metadata_hdr_offset: Offset in Words (4 bytes) within header in which
+ * checksum meta info header (4 bytes) starts (UL). Values are 0-15, which
+ * mean 0 - 60 byte checksum header offset. Valid for input
+ * pipes only (IPA consumer)
+ * @gen_qmb_master_sel: Select bit for ENDP GEN-QMB master. This is used to
+ * separate DDR & PCIe transactions in-order to limit them as
+ * a group (using MAX_WRITES/READS limiation). Valid for input and
+ * output pipes (IPA consumer+producer)
+ */
+struct ipa_ep_cfg_cfg {
+ bool frag_offload_en;
+ enum ipa_cs_offload cs_offload_en;
+ u8 cs_metadata_hdr_offset;
+ u8 gen_qmb_master_sel;
+};
+
+/**
+ * struct ipa_ep_cfg_metadata_mask - Endpoint initialization hdr metadata mask
+ * @metadata_mask: Mask specifying which metadata bits to write to
+ * IPA_ENDP_INIT_HDR_n.s HDR_OFST_METADATA. Only
+ * masked metadata bits (set to 1) will be written. Valid for Output
+ * Pipes only (IPA Producer)
+ */
+struct ipa_ep_cfg_metadata_mask {
+ u32 metadata_mask;
+};
+
+/**
+ * struct ipa_ep_cfg_metadata - Meta Data configuration in IPA end-point
+ * @md: This defines the meta data from tx data descriptor
+ * @qmap_id: qmap id
+ */
+struct ipa_ep_cfg_metadata {
+ u32 qmap_id;
+};
+
+/**
+ * struct ipa_ep_cfg_seq - HPS/DPS sequencer type configuration in IPA end-point
+ * @set_dynamic: 0 - HPS/DPS seq type is configured statically,
+ * 1 - HPS/DPS seq type is set to seq_type
+ * @seq_type: HPS/DPS sequencer type configuration
+ */
+struct ipa_ep_cfg_seq {
+ bool set_dynamic;
+ int seq_type;
+};
+
+/**
+ * struct ipa_ep_cfg - configuration of IPA end-point
+ * @nat: NAT parmeters
+ * @hdr: Header parameters
+ * @hdr_ext: Extended header parameters
+ * @mode: Mode parameters
+ * @aggr: Aggregation parameters
+ * @deaggr: Deaggregation params
+ * @route: Routing parameters
+ * @cfg: Configuration register data
+ * @metadata_mask: Hdr metadata mask
+ * @meta: Meta Data
+ * @seq: HPS/DPS sequencers configuration
+ */
+struct ipa_ep_cfg {
+ struct ipa_ep_cfg_nat nat;
+ struct ipa_ep_cfg_hdr hdr;
+ struct ipa_ep_cfg_hdr_ext hdr_ext;
+ struct ipa_ep_cfg_mode mode;
+ struct ipa_ep_cfg_aggr aggr;
+ struct ipa_ep_cfg_deaggr deaggr;
+ struct ipa_ep_cfg_route route;
+ struct ipa_ep_cfg_cfg cfg;
+ struct ipa_ep_cfg_metadata_mask metadata_mask;
+ struct ipa_ep_cfg_metadata meta;
+ struct ipa_ep_cfg_seq seq;
+};
+
+/**
+ * struct ipa_ep_cfg_ctrl - Control configuration in IPA end-point
+ * @ipa_ep_suspend: 0 - ENDP is enabled, 1 - ENDP is suspended (disabled).
+ * Valid for PROD Endpoints
+ * @ipa_ep_delay: 0 - ENDP is free-running, 1 - ENDP is delayed.
+ * SW controls the data flow of an endpoint usind this bit.
+ * Valid for CONS Endpoints
+ */
+struct ipa_ep_cfg_ctrl {
+ bool ipa_ep_suspend;
+ bool ipa_ep_delay;
+};
+
+/**
+ * x should be in bytes
+ */
+#define IPA_NUM_OF_FIFO_DESC(x) (x/sizeof(struct sps_iovec))
+typedef void (*ipa_notify_cb)(void *priv, enum ipa_dp_evt_type evt,
+ unsigned long data);
+
+/**
+ * struct ipa_connect_params - low-level client connect input parameters. Either
+ * client allocates the data and desc FIFO and specifies that in data+desc OR
+ * specifies sizes and pipe_mem pref and IPA does the allocation.
+ *
+ * @ipa_ep_cfg: IPA EP configuration
+ * @client: type of "client"
+ * @client_bam_hdl: client SPS handle
+ * @client_ep_idx: client PER EP index
+ * @priv: callback cookie
+ * @notify: callback
+ * priv - callback cookie evt - type of event data - data relevant
+ * to event. May not be valid. See event_type enum for valid
+ * cases.
+ * @desc_fifo_sz: size of desc FIFO
+ * @data_fifo_sz: size of data FIFO
+ * @pipe_mem_preferred: if true, try to alloc the FIFOs in pipe mem, fallback
+ * to sys mem if pipe mem alloc fails
+ * @desc: desc FIFO meta-data when client has allocated it
+ * @data: data FIFO meta-data when client has allocated it
+ * @skip_ep_cfg: boolean field that determines if EP should be configured
+ * by IPA driver
+ * @keep_ipa_awake: when true, IPA will not be clock gated
+ */
+struct ipa_connect_params {
+ struct ipa_ep_cfg ipa_ep_cfg;
+ enum ipa_client_type client;
+ unsigned long client_bam_hdl;
+ u32 client_ep_idx;
+ void *priv;
+ ipa_notify_cb notify;
+ u32 desc_fifo_sz;
+ u32 data_fifo_sz;
+ bool pipe_mem_preferred;
+ struct sps_mem_buffer desc;
+ struct sps_mem_buffer data;
+ bool skip_ep_cfg;
+ bool keep_ipa_awake;
+};
+
+/**
+ * struct ipa_sps_params - SPS related output parameters resulting from
+ * low/high level client connect
+ * @ipa_bam_hdl: IPA SPS handle
+ * @ipa_ep_idx: IPA PER EP index
+ * @desc: desc FIFO meta-data
+ * @data: data FIFO meta-data
+ */
+struct ipa_sps_params {
+ unsigned long ipa_bam_hdl;
+ u32 ipa_ep_idx;
+ struct sps_mem_buffer desc;
+ struct sps_mem_buffer data;
+};
+
+/**
+ * struct ipa_tx_intf - interface tx properties
+ * @num_props: number of tx properties
+ * @prop: the tx properties array
+ */
+struct ipa_tx_intf {
+ u32 num_props;
+ struct ipa_ioc_tx_intf_prop *prop;
+};
+
+/**
+ * struct ipa_rx_intf - interface rx properties
+ * @num_props: number of rx properties
+ * @prop: the rx properties array
+ */
+struct ipa_rx_intf {
+ u32 num_props;
+ struct ipa_ioc_rx_intf_prop *prop;
+};
+
+/**
+ * struct ipa_ext_intf - interface ext properties
+ * @excp_pipe_valid: is next field valid?
+ * @excp_pipe: exception packets should be routed to this pipe
+ * @num_props: number of ext properties
+ * @prop: the ext properties array
+ */
+struct ipa_ext_intf {
+ bool excp_pipe_valid;
+ enum ipa_client_type excp_pipe;
+ u32 num_props;
+ struct ipa_ioc_ext_intf_prop *prop;
+};
+
+/**
+ * struct ipa_sys_connect_params - information needed to setup an IPA end-point
+ * in system-BAM mode
+ * @ipa_ep_cfg: IPA EP configuration
+ * @client: the type of client who "owns" the EP
+ * @desc_fifo_sz: size of desc FIFO. This number is used to allocate the desc
+ * fifo for BAM. For GSI, this size is used by IPA driver as a
+ * baseline to calculate the GSI ring size in the following way:
+ * For PROD pipes, GSI ring is 4 * desc_fifo_sz.
+ For PROD pipes, GSI ring is 2 * desc_fifo_sz.
+ * @priv: callback cookie
+ * @notify: callback
+ * priv - callback cookie
+ * evt - type of event
+ * data - data relevant to event. May not be valid. See event_type
+ * enum for valid cases.
+ * @skip_ep_cfg: boolean field that determines if EP should be configured
+ * by IPA driver
+ * @keep_ipa_awake: when true, IPA will not be clock gated
+ * @napi_enabled: when true, IPA call client callback to start polling
+ */
+struct ipa_sys_connect_params {
+ struct ipa_ep_cfg ipa_ep_cfg;
+ enum ipa_client_type client;
+ u32 desc_fifo_sz;
+ void *priv;
+ ipa_notify_cb notify;
+ bool skip_ep_cfg;
+ bool keep_ipa_awake;
+ bool napi_enabled;
+};
+
+/**
+ * struct ipa_tx_meta - meta-data for the TX packet
+ * @dma_address: dma mapped address of TX packet
+ * @dma_address_valid: is above field valid?
+ */
+struct ipa_tx_meta {
+ u8 pkt_init_dst_ep;
+ bool pkt_init_dst_ep_valid;
+ bool pkt_init_dst_ep_remote;
+ dma_addr_t dma_address;
+ bool dma_address_valid;
+};
+
+/**
+ * typedef ipa_msg_free_fn - callback function
+ * @param buff - [in] the message payload to free
+ * @param len - [in] size of message payload
+ * @param type - [in] the message type
+ *
+ * Message callback registered by kernel client with IPA driver to
+ * free message payload after IPA driver processing is complete
+ *
+ * No return value
+ */
+typedef void (*ipa_msg_free_fn)(void *buff, u32 len, u32 type);
+
+/**
+ * typedef ipa_msg_pull_fn - callback function
+ * @param buff - [in] where to copy message payload
+ * @param len - [in] size of buffer to copy payload into
+ * @param type - [in] the message type
+ *
+ * Message callback registered by kernel client with IPA driver for
+ * IPA driver to pull messages from the kernel client upon demand from
+ * user-space
+ *
+ * Returns how many bytes were copied into the buffer.
+ */
+typedef int (*ipa_msg_pull_fn)(void *buff, u32 len, u32 type);
+
+/**
+ * enum ipa_voltage_level - IPA Voltage levels
+ */
+enum ipa_voltage_level {
+ IPA_VOLTAGE_UNSPECIFIED,
+ IPA_VOLTAGE_SVS = IPA_VOLTAGE_UNSPECIFIED,
+ IPA_VOLTAGE_NOMINAL,
+ IPA_VOLTAGE_TURBO,
+ IPA_VOLTAGE_MAX,
+};
+
+/**
+ * enum ipa_rm_event - IPA RM events
+ *
+ * Indicate the resource state change
+ */
+enum ipa_rm_event {
+ IPA_RM_RESOURCE_GRANTED,
+ IPA_RM_RESOURCE_RELEASED
+};
+
+typedef void (*ipa_rm_notify_cb)(void *user_data,
+ enum ipa_rm_event event,
+ unsigned long data);
+/**
+ * struct ipa_rm_register_params - information needed to
+ * register IPA RM client with IPA RM
+ *
+ * @user_data: IPA RM client provided information
+ * to be passed to notify_cb callback below
+ * @notify_cb: callback which is called by resource
+ * to notify the IPA RM client about its state
+ * change IPA RM client is expected to perform non
+ * blocking operations only in notify_cb and
+ * release notification context as soon as
+ * possible.
+ */
+struct ipa_rm_register_params {
+ void *user_data;
+ ipa_rm_notify_cb notify_cb;
+};
+
+/**
+ * struct ipa_rm_create_params - information needed to initialize
+ * the resource
+ * @name: resource name
+ * @floor_voltage: floor voltage needed for client to operate in maximum
+ * bandwidth.
+ * @reg_params: register parameters, contains are ignored
+ * for consumer resource NULL should be provided
+ * for consumer resource
+ * @request_resource: function which should be called to request resource,
+ * NULL should be provided for producer resource
+ * @release_resource: function which should be called to release resource,
+ * NULL should be provided for producer resource
+ *
+ * IPA RM client is expected to perform non blocking operations only
+ * in request_resource and release_resource functions and
+ * release notification context as soon as possible.
+ */
+struct ipa_rm_create_params {
+ enum ipa_rm_resource_name name;
+ enum ipa_voltage_level floor_voltage;
+ struct ipa_rm_register_params reg_params;
+ int (*request_resource)(void);
+ int (*release_resource)(void);
+};
+
+/**
+ * struct ipa_rm_perf_profile - information regarding IPA RM client performance
+ * profile
+ *
+ * @max_bandwidth_mbps: maximum bandwidth need of the client in Mbps
+ */
+struct ipa_rm_perf_profile {
+ u32 max_supported_bandwidth_mbps;
+};
+
+#define A2_MUX_HDR_NAME_V4_PREF "dmux_hdr_v4_"
+#define A2_MUX_HDR_NAME_V6_PREF "dmux_hdr_v6_"
+
+/**
+ * enum teth_tethering_mode - Tethering mode (Rmnet / MBIM)
+ */
+enum teth_tethering_mode {
+ TETH_TETHERING_MODE_RMNET,
+ TETH_TETHERING_MODE_MBIM,
+ TETH_TETHERING_MODE_MAX,
+};
+
+/**
+ * teth_bridge_init_params - Parameters used for in/out USB API
+ * @usb_notify_cb: Callback function which should be used by the caller.
+ * Output parameter.
+ * @private_data: Data for the callback function. Should be used by the
+ * caller. Output parameter.
+ * @skip_ep_cfg: boolean field that determines if Apps-processor
+ * should or should not confiugre this end-point.
+ */
+struct teth_bridge_init_params {
+ ipa_notify_cb usb_notify_cb;
+ void *private_data;
+ enum ipa_client_type client;
+ bool skip_ep_cfg;
+};
+
+/**
+ * struct teth_bridge_connect_params - Parameters used in teth_bridge_connect()
+ * @ipa_usb_pipe_hdl: IPA to USB pipe handle, returned from ipa_connect()
+ * @usb_ipa_pipe_hdl: USB to IPA pipe handle, returned from ipa_connect()
+ * @tethering_mode: Rmnet or MBIM
+ * @ipa_client_type: IPA "client" name (IPA_CLIENT_USB#_PROD)
+ */
+struct teth_bridge_connect_params {
+ u32 ipa_usb_pipe_hdl;
+ u32 usb_ipa_pipe_hdl;
+ enum teth_tethering_mode tethering_mode;
+ enum ipa_client_type client_type;
+};
+
+/**
+ * struct ipa_tx_data_desc - information needed
+ * to send data packet to HW link: link to data descriptors
+ * priv: client specific private data
+ * @pyld_buffer: pointer to the data buffer that holds frame
+ * @pyld_len: length of the data packet
+ */
+struct ipa_tx_data_desc {
+ struct list_head link;
+ void *priv;
+ void *pyld_buffer;
+ u16 pyld_len;
+};
+
+/**
+ * struct ipa_rx_data - information needed
+ * to send to wlan driver on receiving data from ipa hw
+ * @skb: skb
+ * @dma_addr: DMA address of this Rx packet
+ */
+struct ipa_rx_data {
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+};
+
+/**
+ * enum ipa_irq_type - IPA Interrupt Type
+ * Used to register handlers for IPA interrupts
+ *
+ * Below enum is a logical mapping and not the actual interrupt bit in HW
+ */
+enum ipa_irq_type {
+ IPA_BAD_SNOC_ACCESS_IRQ,
+ IPA_EOT_COAL_IRQ,
+ IPA_UC_IRQ_0,
+ IPA_UC_IRQ_1,
+ IPA_UC_IRQ_2,
+ IPA_UC_IRQ_3,
+ IPA_UC_IN_Q_NOT_EMPTY_IRQ,
+ IPA_UC_RX_CMD_Q_NOT_FULL_IRQ,
+ IPA_UC_TX_CMD_Q_NOT_FULL_IRQ,
+ IPA_UC_TO_PROC_ACK_Q_NOT_FULL_IRQ,
+ IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ,
+ IPA_RX_ERR_IRQ,
+ IPA_DEAGGR_ERR_IRQ,
+ IPA_TX_ERR_IRQ,
+ IPA_STEP_MODE_IRQ,
+ IPA_PROC_ERR_IRQ,
+ IPA_TX_SUSPEND_IRQ,
+ IPA_TX_HOLB_DROP_IRQ,
+ IPA_BAM_IDLE_IRQ,
+ IPA_BAM_GSI_IDLE_IRQ = IPA_BAM_IDLE_IRQ,
+ IPA_IRQ_MAX
+};
+
+/**
+ * struct ipa_tx_suspend_irq_data - interrupt data for IPA_TX_SUSPEND_IRQ
+ * @endpoints: bitmask of endpoints which case IPA_TX_SUSPEND_IRQ interrupt
+ * @dma_addr: DMA address of this Rx packet
+ */
+struct ipa_tx_suspend_irq_data {
+ u32 endpoints;
+};
+
+
+/**
+ * typedef ipa_irq_handler_t - irq handler/callback type
+ * @param ipa_irq_type - [in] interrupt type
+ * @param private_data - [in, out] the client private data
+ * @param interrupt_data - [out] interrupt information data
+ *
+ * callback registered by ipa_add_interrupt_handler function to
+ * handle a specific interrupt type
+ *
+ * No return value
+ */
+typedef void (*ipa_irq_handler_t)(enum ipa_irq_type interrupt,
+ void *private_data,
+ void *interrupt_data);
+
+/**
+ * struct IpaHwBamStats_t - Strucuture holding the BAM statistics
+ *
+ * @bamFifoFull : Number of times Bam Fifo got full - For In Ch: Good,
+ * For Out Ch: Bad
+ * @bamFifoEmpty : Number of times Bam Fifo got empty - For In Ch: Bad,
+ * For Out Ch: Good
+ * @bamFifoUsageHigh : Number of times Bam fifo usage went above 75% -
+ * For In Ch: Good, For Out Ch: Bad
+ * @bamFifoUsageLow : Number of times Bam fifo usage went below 25% -
+ * For In Ch: Bad, For Out Ch: Good
+*/
+struct IpaHwBamStats_t {
+ u32 bamFifoFull;
+ u32 bamFifoEmpty;
+ u32 bamFifoUsageHigh;
+ u32 bamFifoUsageLow;
+ u32 bamUtilCount;
+} __packed;
+
+/**
+ * struct IpaHwRingStats_t - Strucuture holding the Ring statistics
+ *
+ * @ringFull : Number of times Transfer Ring got full - For In Ch: Good,
+ * For Out Ch: Bad
+ * @ringEmpty : Number of times Transfer Ring got empty - For In Ch: Bad,
+ * For Out Ch: Good
+ * @ringUsageHigh : Number of times Transfer Ring usage went above 75% -
+ * For In Ch: Good, For Out Ch: Bad
+ * @ringUsageLow : Number of times Transfer Ring usage went below 25% -
+ * For In Ch: Bad, For Out Ch: Good
+*/
+struct IpaHwRingStats_t {
+ u32 ringFull;
+ u32 ringEmpty;
+ u32 ringUsageHigh;
+ u32 ringUsageLow;
+ u32 RingUtilCount;
+} __packed;
+
+/**
+ * struct IpaHwStatsWDIRxInfoData_t - Structure holding the WDI Rx channel
+ * structures
+ *
+ * @max_outstanding_pkts : Number of outstanding packets in Rx Ring
+ * @num_pkts_processed : Number of packets processed - cumulative
+ * @rx_ring_rp_value : Read pointer last advertized to the WLAN FW
+ * @rx_ind_ring_stats : Ring info
+ * @bam_stats : BAM info
+ * @num_bam_int_handled : Number of Bam Interrupts handled by FW
+ * @num_db : Number of times the doorbell was rung
+ * @num_unexpected_db : Number of unexpected doorbells
+ * @num_pkts_in_dis_uninit_state : number of completions we
+ * received in disabled or uninitialized state
+ * @num_ic_inj_vdev_change : Number of times the Imm Cmd is
+ * injected due to vdev_id change
+ * @num_ic_inj_fw_desc_change : Number of times the Imm Cmd is
+ * injected due to fw_desc change
+*/
+struct IpaHwStatsWDIRxInfoData_t {
+ u32 max_outstanding_pkts;
+ u32 num_pkts_processed;
+ u32 rx_ring_rp_value;
+ struct IpaHwRingStats_t rx_ind_ring_stats;
+ struct IpaHwBamStats_t bam_stats;
+ u32 num_bam_int_handled;
+ u32 num_db;
+ u32 num_unexpected_db;
+ u32 num_pkts_in_dis_uninit_state;
+ u32 num_ic_inj_vdev_change;
+ u32 num_ic_inj_fw_desc_change;
+ u32 reserved1;
+ u32 reserved2;
+} __packed;
+
+/**
+ * struct IpaHwStatsWDITxInfoData_t - Structure holding the WDI Tx channel
+ * structures
+ *
+ * @num_pkts_processed : Number of packets processed - cumulative
+ * @copy_engine_doorbell_value : latest value of doorbell written to copy engine
+ * @num_db_fired : Number of DB from uC FW to Copy engine
+ * @tx_comp_ring_stats : ring info
+ * @bam_stats : BAM info
+ * @num_db : Number of times the doorbell was rung
+ * @num_unexpected_db : Number of unexpected doorbells
+ * @num_bam_int_handled : Number of Bam Interrupts handled by FW
+ * @num_bam_int_in_non_running_state : Number of Bam interrupts while not in
+ * Running state
+ * @num_qmb_int_handled : Number of QMB interrupts handled
+*/
+struct IpaHwStatsWDITxInfoData_t {
+ u32 num_pkts_processed;
+ u32 copy_engine_doorbell_value;
+ u32 num_db_fired;
+ struct IpaHwRingStats_t tx_comp_ring_stats;
+ struct IpaHwBamStats_t bam_stats;
+ u32 num_db;
+ u32 num_unexpected_db;
+ u32 num_bam_int_handled;
+ u32 num_bam_int_in_non_running_state;
+ u32 num_qmb_int_handled;
+ u32 num_bam_int_handled_while_wait_for_bam;
+} __packed;
+
+/**
+ * struct IpaHwStatsWDIInfoData_t - Structure holding the WDI channel structures
+ *
+ * @rx_ch_stats : RX stats
+ * @tx_ch_stats : TX stats
+*/
+struct IpaHwStatsWDIInfoData_t {
+ struct IpaHwStatsWDIRxInfoData_t rx_ch_stats;
+ struct IpaHwStatsWDITxInfoData_t tx_ch_stats;
+} __packed;
+
+
+/**
+ * struct ipa_wdi_ul_params - WDI_RX configuration
+ * @rdy_ring_base_pa: physical address of the base of the Rx ring (containing
+ * Rx buffers)
+ * @rdy_ring_size: size of the Rx ring in bytes
+ * @rdy_ring_rp_pa: physical address of the location through which IPA uc is
+ * reading (WDI-1.0)
+ * @rdy_comp_ring_base_pa: physical address of the base of the Rx completion
+ * ring (WDI-2.0)
+ * @rdy_comp_ring_wp_pa: physical address of the location through which IPA
+ * uc is writing (WDI-2.0)
+ * @rdy_comp_ring_size: size of the Rx_completion ring in bytes
+ * expected to communicate about the Read pointer into the Rx Ring
+ */
+struct ipa_wdi_ul_params {
+ phys_addr_t rdy_ring_base_pa;
+ u32 rdy_ring_size;
+ phys_addr_t rdy_ring_rp_pa;
+ phys_addr_t rdy_comp_ring_base_pa;
+ phys_addr_t rdy_comp_ring_wp_pa;
+ u32 rdy_comp_ring_size;
+ u32 *rdy_ring_rp_va;
+ u32 *rdy_comp_ring_wp_va;
+};
+
+/**
+ * struct ipa_wdi_ul_params_smmu - WDI_RX configuration (with WLAN SMMU)
+ * @rdy_ring: SG table describing the Rx ring (containing Rx buffers)
+ * @rdy_ring_size: size of the Rx ring in bytes
+ * @rdy_ring_rp_pa: physical address of the location through which IPA uc is
+ * expected to communicate about the Read pointer into the Rx Ring
+ */
+struct ipa_wdi_ul_params_smmu {
+ struct sg_table rdy_ring;
+ u32 rdy_ring_size;
+ phys_addr_t rdy_ring_rp_pa;
+ struct sg_table rdy_comp_ring;
+ phys_addr_t rdy_comp_ring_wp_pa;
+ u32 rdy_comp_ring_size;
+ u32 *rdy_ring_rp_va;
+ u32 *rdy_comp_ring_wp_va;
+};
+
+/**
+ * struct ipa_wdi_dl_params - WDI_TX configuration
+ * @comp_ring_base_pa: physical address of the base of the Tx completion ring
+ * @comp_ring_size: size of the Tx completion ring in bytes
+ * @ce_ring_base_pa: physical address of the base of the Copy Engine Source
+ * Ring
+ * @ce_door_bell_pa: physical address of the doorbell that the IPA uC has to
+ * write into to trigger the copy engine
+ * @ce_ring_size: Copy Engine Ring size in bytes
+ * @num_tx_buffers: Number of pkt buffers allocated
+ */
+struct ipa_wdi_dl_params {
+ phys_addr_t comp_ring_base_pa;
+ u32 comp_ring_size;
+ phys_addr_t ce_ring_base_pa;
+ phys_addr_t ce_door_bell_pa;
+ u32 ce_ring_size;
+ u32 num_tx_buffers;
+};
+
+/**
+ * struct ipa_wdi_dl_params_smmu - WDI_TX configuration (with WLAN SMMU)
+ * @comp_ring: SG table describing the Tx completion ring
+ * @comp_ring_size: size of the Tx completion ring in bytes
+ * @ce_ring: SG table describing the Copy Engine Source Ring
+ * @ce_door_bell_pa: physical address of the doorbell that the IPA uC has to
+ * write into to trigger the copy engine
+ * @ce_ring_size: Copy Engine Ring size in bytes
+ * @num_tx_buffers: Number of pkt buffers allocated
+ */
+struct ipa_wdi_dl_params_smmu {
+ struct sg_table comp_ring;
+ u32 comp_ring_size;
+ struct sg_table ce_ring;
+ phys_addr_t ce_door_bell_pa;
+ u32 ce_ring_size;
+ u32 num_tx_buffers;
+};
+
+/**
+ * struct ipa_wdi_in_params - information provided by WDI client
+ * @sys: IPA EP configuration info
+ * @ul: WDI_RX configuration info
+ * @dl: WDI_TX configuration info
+ * @ul_smmu: WDI_RX configuration info when WLAN uses SMMU
+ * @dl_smmu: WDI_TX configuration info when WLAN uses SMMU
+ * @smmu_enabled: true if WLAN uses SMMU
+ */
+struct ipa_wdi_in_params {
+ struct ipa_sys_connect_params sys;
+ union {
+ struct ipa_wdi_ul_params ul;
+ struct ipa_wdi_dl_params dl;
+ struct ipa_wdi_ul_params_smmu ul_smmu;
+ struct ipa_wdi_dl_params_smmu dl_smmu;
+ } u;
+ bool smmu_enabled;
+};
+
+/**
+ * struct ipa_wdi_out_params - information provided to WDI client
+ * @uc_door_bell_pa: physical address of IPA uc doorbell
+ * @clnt_hdl: opaque handle assigned to client
+ */
+struct ipa_wdi_out_params {
+ phys_addr_t uc_door_bell_pa;
+ u32 clnt_hdl;
+};
+
+/**
+ * struct ipa_wdi_db_params - information provided to retrieve
+ * physical address of uC doorbell
+ * @client: type of "client" (IPA_CLIENT_WLAN#_PROD/CONS)
+ * @uc_door_bell_pa: physical address of IPA uc doorbell
+ */
+struct ipa_wdi_db_params {
+ enum ipa_client_type client;
+ phys_addr_t uc_door_bell_pa;
+};
+
+/**
+ * struct ipa_wdi_uc_ready_params - uC ready CB parameters
+ * @is_uC_ready: uC loaded or not
+ * @priv : callback cookie
+ * @notify: callback
+ */
+typedef void (*ipa_uc_ready_cb)(void *priv);
+struct ipa_wdi_uc_ready_params {
+ bool is_uC_ready;
+ void *priv;
+ ipa_uc_ready_cb notify;
+};
+
+/**
+ * struct ipa_wdi_buffer_info - address info of a WLAN allocated buffer
+ * @pa: physical address of the buffer
+ * @iova: IOVA of the buffer as embedded inside the WDI descriptors
+ * @size: size in bytes of the buffer
+ * @result: result of map or unmap operations (out param)
+ *
+ * IPA driver will create/release IOMMU mapping in IPA SMMU from iova->pa
+ */
+struct ipa_wdi_buffer_info {
+ phys_addr_t pa;
+ unsigned long iova;
+ size_t size;
+ int result;
+};
+
+/**
+ * struct ipa_gsi_ep_config - IPA GSI endpoint configurations
+ *
+ * @ipa_ep_num: IPA EP pipe number
+ * @ipa_gsi_chan_num: GSI channel number
+ * @ipa_if_tlv: number of IPA_IF TLV
+ * @ipa_if_aos: number of IPA_IF AOS
+ * @ee: Execution environment
+ */
+struct ipa_gsi_ep_config {
+ int ipa_ep_num;
+ int ipa_gsi_chan_num;
+ int ipa_if_tlv;
+ int ipa_if_aos;
+ int ee;
+};
+
+#if defined CONFIG_IPA || defined CONFIG_IPA3
+
+/*
+ * Connect / Disconnect
+ */
+int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps,
+ u32 *clnt_hdl);
+int ipa_disconnect(u32 clnt_hdl);
+
+/*
+ * Resume / Suspend
+ */
+int ipa_reset_endpoint(u32 clnt_hdl);
+
+/*
+ * Remove ep delay
+ */
+int ipa_clear_endpoint_delay(u32 clnt_hdl);
+
+/*
+ * Disable ep
+ */
+int ipa_disable_endpoint(u32 clnt_hdl);
+
+/*
+ * Configuration
+ */
+int ipa_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
+
+int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg);
+
+int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg);
+
+int ipa_cfg_ep_hdr_ext(u32 clnt_hdl,
+ const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg);
+
+int ipa_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg);
+
+int ipa_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg);
+
+int ipa_cfg_ep_deaggr(u32 clnt_hdl,
+ const struct ipa_ep_cfg_deaggr *ipa_ep_cfg);
+
+int ipa_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg);
+
+int ipa_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+int ipa_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *ipa_ep_cfg);
+
+int ipa_cfg_ep_metadata_mask(u32 clnt_hdl, const struct ipa_ep_cfg_metadata_mask
+ *ipa_ep_cfg);
+
+int ipa_cfg_ep_holb_by_client(enum ipa_client_type client,
+ const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+int ipa_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl);
+
+/*
+ * Header removal / addition
+ */
+int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs);
+
+int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls);
+
+int ipa_commit_hdr(void);
+
+int ipa_reset_hdr(void);
+
+int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup);
+
+int ipa_put_hdr(u32 hdr_hdl);
+
+int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy);
+
+/*
+ * Header Processing Context
+ */
+int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs);
+
+int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls);
+
+/*
+ * Routing
+ */
+int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
+
+int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
+
+int ipa_commit_rt(enum ipa_ip_type ip);
+
+int ipa_reset_rt(enum ipa_ip_type ip);
+
+int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup);
+
+int ipa_put_rt_tbl(u32 rt_tbl_hdl);
+
+int ipa_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in);
+
+int ipa_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules);
+
+/*
+ * Filtering
+ */
+int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules);
+
+int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls);
+
+int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules);
+
+int ipa_commit_flt(enum ipa_ip_type ip);
+
+int ipa_reset_flt(enum ipa_ip_type ip);
+
+/*
+ * NAT
+ */
+int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem);
+
+int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init);
+
+int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma);
+
+int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del);
+
+/*
+ * Messaging
+ */
+int ipa_send_msg(struct ipa_msg_meta *meta, void *buff,
+ ipa_msg_free_fn callback);
+int ipa_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback);
+int ipa_deregister_pull_msg(struct ipa_msg_meta *meta);
+
+/*
+ * Interface
+ */
+int ipa_register_intf(const char *name, const struct ipa_tx_intf *tx,
+ const struct ipa_rx_intf *rx);
+int ipa_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
+ const struct ipa_rx_intf *rx,
+ const struct ipa_ext_intf *ext);
+int ipa_deregister_intf(const char *name);
+
+/*
+ * Aggregation
+ */
+int ipa_set_aggr_mode(enum ipa_aggr_mode mode);
+
+int ipa_set_qcncm_ndp_sig(char sig[3]);
+
+int ipa_set_single_ndp_per_mbim(bool enable);
+
+/*
+ * Data path
+ */
+int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+ struct ipa_tx_meta *metadata);
+
+/*
+ * To transfer multiple data packets
+ * While passing the data descriptor list, the anchor node
+ * should be of type struct ipa_tx_data_desc not list_head
+*/
+int ipa_tx_dp_mul(enum ipa_client_type dst,
+ struct ipa_tx_data_desc *data_desc);
+
+void ipa_free_skb(struct ipa_rx_data *);
+int ipa_rx_poll(u32 clnt_hdl, int budget);
+void ipa_recycle_wan_skb(struct sk_buff *skb);
+
+/*
+ * System pipes
+ */
+int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl);
+
+int ipa_teardown_sys_pipe(u32 clnt_hdl);
+
+int ipa_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+ struct ipa_wdi_out_params *out);
+int ipa_disconnect_wdi_pipe(u32 clnt_hdl);
+int ipa_enable_wdi_pipe(u32 clnt_hdl);
+int ipa_disable_wdi_pipe(u32 clnt_hdl);
+int ipa_resume_wdi_pipe(u32 clnt_hdl);
+int ipa_suspend_wdi_pipe(u32 clnt_hdl);
+int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
+u16 ipa_get_smem_restr_bytes(void);
+/*
+ * To retrieve doorbell physical address of
+ * wlan pipes
+ */
+int ipa_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out);
+
+/*
+ * To register uC ready callback if uC not ready
+ * and also check uC readiness
+ * if uC not ready only, register callback
+ */
+int ipa_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param);
+/*
+ * To de-register uC ready callback
+ */
+int ipa_uc_dereg_rdyCB(void);
+
+int ipa_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
+int ipa_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
+
+/*
+ * Resource manager
+ */
+int ipa_rm_create_resource(struct ipa_rm_create_params *create_params);
+
+int ipa_rm_delete_resource(enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_register(enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_register_params *reg_params);
+
+int ipa_rm_deregister(enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_register_params *reg_params);
+
+int ipa_rm_set_perf_profile(enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_perf_profile *profile);
+
+int ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name);
+
+int ipa_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name);
+
+int ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name);
+
+int ipa_rm_request_resource(enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_release_resource(enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_notify_completion(enum ipa_rm_event event,
+ enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
+ unsigned long msecs);
+
+int ipa_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_inactivity_timer_request_resource(
+ enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_inactivity_timer_release_resource(
+ enum ipa_rm_resource_name resource_name);
+
+/*
+ * Tethering bridge (Rmnet / MBIM)
+ */
+int teth_bridge_init(struct teth_bridge_init_params *params);
+
+int teth_bridge_disconnect(enum ipa_client_type client);
+
+int teth_bridge_connect(struct teth_bridge_connect_params *connect_params);
+
+/*
+ * Tethering client info
+ */
+void ipa_set_client(int index, enum ipacm_client_enum client, bool uplink);
+
+enum ipacm_client_enum ipa_get_client(int pipe_idx);
+
+bool ipa_get_client_uplink(int pipe_idx);
+
+/*
+ * IPADMA
+ */
+int ipa_dma_init(void);
+
+int ipa_dma_enable(void);
+
+int ipa_dma_disable(void);
+
+int ipa_dma_sync_memcpy(u64 dest, u64 src, int len);
+
+int ipa_dma_async_memcpy(u64 dest, u64 src, int len,
+ void (*user_cb)(void *user1), void *user_param);
+
+int ipa_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
+
+void ipa_dma_destroy(void);
+
+/*
+ * mux id
+ */
+int ipa_write_qmap_id(struct ipa_ioc_write_qmapid *param_in);
+
+/*
+ * interrupts
+ */
+int ipa_add_interrupt_handler(enum ipa_irq_type interrupt,
+ ipa_irq_handler_t handler,
+ bool deferred_flag,
+ void *private_data);
+
+int ipa_remove_interrupt_handler(enum ipa_irq_type interrupt);
+
+int ipa_restore_suspend_handler(void);
+
+/*
+ * Miscellaneous
+ */
+void ipa_bam_reg_dump(void);
+
+int ipa_get_ep_mapping(enum ipa_client_type client);
+
+bool ipa_is_ready(void);
+
+void ipa_proxy_clk_vote(void);
+void ipa_proxy_clk_unvote(void);
+
+enum ipa_hw_type ipa_get_hw_type(void);
+
+bool ipa_is_client_handle_valid(u32 clnt_hdl);
+
+enum ipa_client_type ipa_get_client_mapping(int pipe_idx);
+
+enum ipa_rm_resource_name ipa_get_rm_resource_from_ep(int pipe_idx);
+
+bool ipa_get_modem_cfg_emb_pipe_flt(void);
+
+enum ipa_transport_type ipa_get_transport_type(void);
+
+struct device *ipa_get_dma_dev(void);
+struct iommu_domain *ipa_get_smmu_domain(void);
+
+int ipa_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count);
+
+struct ipa_gsi_ep_config *ipa_get_gsi_ep_info(int ipa_ep_idx);
+
+int ipa_stop_gsi_channel(u32 clnt_hdl);
+
+typedef void (*ipa_ready_cb)(void *user_data);
+
+/**
+* ipa_register_ipa_ready_cb() - register a callback to be invoked
+* when IPA core driver initialization is complete.
+*
+* @ipa_ready_cb: CB to be triggered.
+* @user_data: Data to be sent to the originator of the CB.
+*
+* Note: This function is expected to be utilized when ipa_is_ready
+* function returns false.
+* An IPA client may also use this function directly rather than
+* calling ipa_is_ready beforehand, as if this API returns -EEXIST,
+* this means IPA initialization is complete (and no callback will
+* be triggered).
+* When the callback is triggered, the client MUST perform his
+* operations in a different context.
+*
+* The function will return 0 on success, -ENOMEM on memory issues and
+* -EEXIST if IPA initialization is complete already.
+*/
+int ipa_register_ipa_ready_cb(void (*ipa_ready_cb)(void *user_data),
+ void *user_data);
+
+#else /* (CONFIG_IPA || CONFIG_IPA3) */
+
+/*
+ * Connect / Disconnect
+ */
+static inline int ipa_connect(const struct ipa_connect_params *in,
+ struct ipa_sps_params *sps, u32 *clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_disconnect(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+/*
+ * Resume / Suspend
+ */
+static inline int ipa_reset_endpoint(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+/*
+ * Remove ep delay
+ */
+static inline int ipa_clear_endpoint_delay(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+/*
+ * Disable ep
+ */
+static inline int ipa_disable_endpoint(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+/*
+ * Configuration
+ */
+static inline int ipa_cfg_ep(u32 clnt_hdl,
+ const struct ipa_ep_cfg *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_nat(u32 clnt_hdl,
+ const struct ipa_ep_cfg_nat *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_hdr(u32 clnt_hdl,
+ const struct ipa_ep_cfg_hdr *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_hdr_ext(u32 clnt_hdl,
+ const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_mode(u32 clnt_hdl,
+ const struct ipa_ep_cfg_mode *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_aggr(u32 clnt_hdl,
+ const struct ipa_ep_cfg_aggr *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_deaggr(u32 clnt_hdl,
+ const struct ipa_ep_cfg_deaggr *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_route(u32 clnt_hdl,
+ const struct ipa_ep_cfg_route *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_holb(u32 clnt_hdl,
+ const struct ipa_ep_cfg_holb *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_cfg(u32 clnt_hdl,
+ const struct ipa_ep_cfg_cfg *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_metadata_mask(u32 clnt_hdl,
+ const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_ctrl(u32 clnt_hdl,
+ const struct ipa_ep_cfg_ctrl *ep_ctrl)
+{
+ return -EPERM;
+}
+
+/*
+ * Header removal / addition
+ */
+static inline int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs)
+{
+ return -EPERM;
+}
+
+static inline int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls)
+{
+ return -EPERM;
+}
+
+static inline int ipa_commit_hdr(void)
+{
+ return -EPERM;
+}
+
+static inline int ipa_reset_hdr(void)
+{
+ return -EPERM;
+}
+
+static inline int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup)
+{
+ return -EPERM;
+}
+
+static inline int ipa_put_hdr(u32 hdr_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy)
+{
+ return -EPERM;
+}
+
+/*
+ * Header Processing Context
+ */
+static inline int ipa_add_hdr_proc_ctx(
+ struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
+{
+ return -EPERM;
+}
+
+static inline int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
+{
+ return -EPERM;
+}
+/*
+ * Routing
+ */
+static inline int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
+{
+ return -EPERM;
+}
+
+static inline int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
+{
+ return -EPERM;
+}
+
+static inline int ipa_commit_rt(enum ipa_ip_type ip)
+{
+ return -EPERM;
+}
+
+static inline int ipa_reset_rt(enum ipa_ip_type ip)
+{
+ return -EPERM;
+}
+
+static inline int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
+{
+ return -EPERM;
+}
+
+static inline int ipa_put_rt_tbl(u32 rt_tbl_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in)
+{
+ return -EPERM;
+}
+
+static inline int ipa_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules)
+{
+ return -EPERM;
+}
+
+/*
+ * Filtering
+ */
+static inline int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
+{
+ return -EPERM;
+}
+
+static inline int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
+{
+ return -EPERM;
+}
+
+static inline int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules)
+{
+ return -EPERM;
+}
+
+static inline int ipa_commit_flt(enum ipa_ip_type ip)
+{
+ return -EPERM;
+}
+
+static inline int ipa_reset_flt(enum ipa_ip_type ip)
+{
+ return -EPERM;
+}
+
+/*
+ * NAT
+ */
+static inline int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
+{
+ return -EPERM;
+}
+
+/*
+ * Messaging
+ */
+static inline int ipa_send_msg(struct ipa_msg_meta *meta, void *buff,
+ ipa_msg_free_fn callback)
+{
+ return -EPERM;
+}
+
+static inline int ipa_register_pull_msg(struct ipa_msg_meta *meta,
+ ipa_msg_pull_fn callback)
+{
+ return -EPERM;
+}
+
+static inline int ipa_deregister_pull_msg(struct ipa_msg_meta *meta)
+{
+ return -EPERM;
+}
+
+/*
+ * Interface
+ */
+static inline int ipa_register_intf(const char *name,
+ const struct ipa_tx_intf *tx,
+ const struct ipa_rx_intf *rx)
+{
+ return -EPERM;
+}
+
+static inline int ipa_register_intf_ext(const char *name,
+ const struct ipa_tx_intf *tx,
+ const struct ipa_rx_intf *rx,
+ const struct ipa_ext_intf *ext)
+{
+ return -EPERM;
+}
+
+static inline int ipa_deregister_intf(const char *name)
+{
+ return -EPERM;
+}
+
+/*
+ * Aggregation
+ */
+static inline int ipa_set_aggr_mode(enum ipa_aggr_mode mode)
+{
+ return -EPERM;
+}
+
+static inline int ipa_set_qcncm_ndp_sig(char sig[3])
+{
+ return -EPERM;
+}
+
+static inline int ipa_set_single_ndp_per_mbim(bool enable)
+{
+ return -EPERM;
+}
+
+/*
+ * Data path
+ */
+static inline int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+ struct ipa_tx_meta *metadata)
+{
+ return -EPERM;
+}
+
+/*
+ * To transfer multiple data packets
+ */
+static inline int ipa_tx_dp_mul(
+ enum ipa_client_type dst,
+ struct ipa_tx_data_desc *data_desc)
+{
+ return -EPERM;
+}
+
+static inline void ipa_free_skb(struct ipa_rx_data *rx_in)
+{
+}
+
+static inline int ipa_rx_poll(u32 clnt_hdl, int budget)
+{
+ return -EPERM;
+}
+
+static inline void ipa_recycle_wan_skb(struct sk_buff *skb)
+{
+}
+
+/*
+ * System pipes
+ */
+static inline u16 ipa_get_smem_restr_bytes(void)
+{
+ return -EPERM;
+}
+
+static inline int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in,
+ u32 *clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_teardown_sys_pipe(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+ struct ipa_wdi_out_params *out)
+{
+ return -EPERM;
+}
+
+static inline int ipa_disconnect_wdi_pipe(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_enable_wdi_pipe(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_disable_wdi_pipe(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_resume_wdi_pipe(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_suspend_wdi_pipe(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_uc_wdi_get_dbpa(
+ struct ipa_wdi_db_params *out)
+{
+ return -EPERM;
+}
+
+static inline int ipa_uc_reg_rdyCB(
+ struct ipa_wdi_uc_ready_params *param)
+{
+ return -EPERM;
+}
+
+static inline int ipa_uc_dereg_rdyCB(void)
+{
+ return -EPERM;
+}
+
+
+/*
+ * Resource manager
+ */
+static inline int ipa_rm_create_resource(
+ struct ipa_rm_create_params *create_params)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_delete_resource(
+ enum ipa_rm_resource_name resource_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_register(enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_register_params *reg_params)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_set_perf_profile(
+ enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_perf_profile *profile)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_deregister(enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_register_params *reg_params)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_add_dependency(
+ enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_add_dependency_sync(
+ enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_delete_dependency(
+ enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_request_resource(
+ enum ipa_rm_resource_name resource_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_release_resource(
+ enum ipa_rm_resource_name resource_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_notify_completion(enum ipa_rm_event event,
+ enum ipa_rm_resource_name resource_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_init(
+ enum ipa_rm_resource_name resource_name,
+ unsigned long msecs)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_destroy(
+ enum ipa_rm_resource_name resource_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_request_resource(
+ enum ipa_rm_resource_name resource_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_release_resource(
+ enum ipa_rm_resource_name resource_name)
+{
+ return -EPERM;
+}
+
+/*
+ * Tethering bridge (Rmnet / MBIM)
+ */
+static inline int teth_bridge_init(struct teth_bridge_init_params *params)
+{
+ return -EPERM;
+}
+
+static inline int teth_bridge_disconnect(enum ipa_client_type client)
+{
+ return -EPERM;
+}
+
+static inline int teth_bridge_connect(struct teth_bridge_connect_params
+ *connect_params)
+{
+ return -EPERM;
+}
+
+/*
+ * Tethering client info
+ */
+static inline void ipa_set_client(int index, enum ipacm_client_enum client,
+ bool uplink)
+{
+}
+
+static inline enum ipacm_client_enum ipa_get_client(int pipe_idx)
+{
+ return -EPERM;
+}
+
+static inline bool ipa_get_client_uplink(int pipe_idx)
+{
+ return -EPERM;
+}
+
+/*
+ * IPADMA
+ */
+static inline int ipa_dma_init(void)
+{
+ return -EPERM;
+}
+
+static inline int ipa_dma_enable(void)
+{
+ return -EPERM;
+}
+
+static inline int ipa_dma_disable(void)
+{
+ return -EPERM;
+}
+
+static inline int ipa_dma_sync_memcpy(phys_addr_t dest, phys_addr_t src
+ , int len)
+{
+ return -EPERM;
+}
+
+static inline int ipa_dma_async_memcpy(phys_addr_t dest, phys_addr_t src
+ , int len, void (*user_cb)(void *user1),
+ void *user_param)
+{
+ return -EPERM;
+}
+
+static inline int ipa_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
+{
+ return -EPERM;
+}
+
+static inline void ipa_dma_destroy(void)
+{
+}
+
+/*
+ * mux id
+ */
+static inline int ipa_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
+{
+ return -EPERM;
+}
+
+/*
+ * interrupts
+ */
+static inline int ipa_add_interrupt_handler(enum ipa_irq_type interrupt,
+ ipa_irq_handler_t handler,
+ bool deferred_flag,
+ void *private_data)
+{
+ return -EPERM;
+}
+
+static inline int ipa_remove_interrupt_handler(enum ipa_irq_type interrupt)
+{
+ return -EPERM;
+}
+
+static inline int ipa_restore_suspend_handler(void)
+{
+ return -EPERM;
+}
+
+/*
+ * Miscellaneous
+ */
+static inline void ipa_bam_reg_dump(void)
+{
+}
+
+static inline int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
+{
+ return -EPERM;
+}
+
+static inline int ipa_get_ep_mapping(enum ipa_client_type client)
+{
+ return -EPERM;
+}
+
+static inline bool ipa_is_ready(void)
+{
+ return false;
+}
+
+static inline void ipa_proxy_clk_vote(void)
+{
+}
+
+static inline void ipa_proxy_clk_unvote(void)
+{
+}
+
+static inline enum ipa_hw_type ipa_get_hw_type(void)
+{
+ return IPA_HW_None;
+}
+
+static inline bool ipa_is_client_handle_valid(u32 clnt_hdl)
+{
+ return -EINVAL;
+}
+
+static inline enum ipa_client_type ipa_get_client_mapping(int pipe_idx)
+{
+ return -EINVAL;
+}
+
+static inline enum ipa_rm_resource_name ipa_get_rm_resource_from_ep(
+ int pipe_idx)
+{
+ return -EFAULT;
+}
+
+static inline bool ipa_get_modem_cfg_emb_pipe_flt(void)
+{
+ return -EINVAL;
+}
+
+static inline enum ipa_transport_type ipa_get_transport_type(void)
+{
+ return -EFAULT;
+}
+
+static inline struct device *ipa_get_dma_dev(void)
+{
+ return NULL;
+}
+
+static inline struct iommu_domain *ipa_get_smmu_domain(void)
+{
+ return NULL;
+}
+
+static inline int ipa_create_wdi_mapping(u32 num_buffers,
+ struct ipa_wdi_buffer_info *info)
+{
+ return -EINVAL;
+}
+
+static inline int ipa_release_wdi_mapping(u32 num_buffers,
+ struct ipa_wdi_buffer_info *info)
+{
+ return -EINVAL;
+}
+
+static inline int ipa_disable_apps_wan_cons_deaggr(void)
+{
+ return -EINVAL;
+}
+
+static inline struct ipa_gsi_ep_config *ipa_get_gsi_ep_info(int ipa_ep_idx)
+{
+ return NULL;
+}
+
+static inline int ipa_stop_gsi_channel(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_register_ipa_ready_cb(
+ void (*ipa_ready_cb)(void *user_data),
+ void *user_data)
+{
+ return -EPERM;
+}
+
+#endif /* (CONFIG_IPA || CONFIG_IPA3) */
+
+#endif /* _IPA_H_ */
diff --git a/include/linux/ipa_mhi.h b/include/linux/ipa_mhi.h
new file mode 100644
index 0000000..4d3b974
--- /dev/null
+++ b/include/linux/ipa_mhi.h
@@ -0,0 +1,161 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef IPA_MHI_H_
+#define IPA_MHI_H_
+
+#include <linux/ipa.h>
+#include <linux/types.h>
+
+/**
+ * enum ipa_mhi_event_type - event type for mhi callback
+ *
+ * @IPA_MHI_EVENT_READY: IPA MHI is ready and IPA uC is loaded. After getting
+ * this event MHI client is expected to call to ipa_mhi_start() API
+ * @IPA_MHI_EVENT_DATA_AVAILABLE: downlink data available on MHI channel
+ */
+enum ipa_mhi_event_type {
+ IPA_MHI_EVENT_READY,
+ IPA_MHI_EVENT_DATA_AVAILABLE,
+ IPA_MHI_EVENT_MAX,
+};
+
+typedef void (*mhi_client_cb)(void *priv, enum ipa_mhi_event_type event,
+ unsigned long data);
+
+/**
+ * struct ipa_mhi_msi_info - parameters for MSI (Message Signaled Interrupts)
+ * @addr_low: MSI lower base physical address
+ * @addr_hi: MSI higher base physical address
+ * @data: Data Pattern to use when generating the MSI
+ * @mask: Mask indicating number of messages assigned by the host to device
+ *
+ * msi value is written according to this formula:
+ * ((data & ~mask) | (mmio.msiVec & mask))
+ */
+struct ipa_mhi_msi_info {
+ u32 addr_low;
+ u32 addr_hi;
+ u32 data;
+ u32 mask;
+};
+
+/**
+ * struct ipa_mhi_init_params - parameters for IPA MHI initialization API
+ *
+ * @msi: MSI (Message Signaled Interrupts) parameters
+ * @mmio_addr: MHI MMIO physical address
+ * @first_ch_idx: First channel ID for hardware accelerated channels.
+ * @first_er_idx: First event ring ID for hardware accelerated channels.
+ * @assert_bit40: should assert bit 40 in order to access host space.
+ * if PCIe iATU is configured then not need to assert bit40
+ * @notify: client callback
+ * @priv: client private data to be provided in client callback
+ * @test_mode: flag to indicate if IPA MHI is in unit test mode
+ */
+struct ipa_mhi_init_params {
+ struct ipa_mhi_msi_info msi;
+ u32 mmio_addr;
+ u32 first_ch_idx;
+ u32 first_er_idx;
+ bool assert_bit40;
+ mhi_client_cb notify;
+ void *priv;
+ bool test_mode;
+};
+
+/**
+ * struct ipa_mhi_start_params - parameters for IPA MHI start API
+ *
+ * @host_ctrl_addr: Base address of MHI control data structures
+ * @host_data_addr: Base address of MHI data buffers
+ * @channel_context_addr: channel context array address in host address space
+ * @event_context_addr: event context array address in host address space
+ */
+struct ipa_mhi_start_params {
+ u32 host_ctrl_addr;
+ u32 host_data_addr;
+ u64 channel_context_array_addr;
+ u64 event_context_array_addr;
+};
+
+/**
+ * struct ipa_mhi_connect_params - parameters for IPA MHI channel connect API
+ *
+ * @sys: IPA EP configuration info
+ * @channel_id: MHI channel id
+ */
+struct ipa_mhi_connect_params {
+ struct ipa_sys_connect_params sys;
+ u8 channel_id;
+};
+
+/* bit #40 in address should be asserted for MHI transfers over pcie */
+#define IPA_MHI_HOST_ADDR(addr) ((addr) | BIT_ULL(40))
+
+#if defined CONFIG_IPA || defined CONFIG_IPA3
+
+int ipa_mhi_init(struct ipa_mhi_init_params *params);
+
+int ipa_mhi_start(struct ipa_mhi_start_params *params);
+
+int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl);
+
+int ipa_mhi_disconnect_pipe(u32 clnt_hdl);
+
+int ipa_mhi_suspend(bool force);
+
+int ipa_mhi_resume(void);
+
+void ipa_mhi_destroy(void);
+
+#else /* (CONFIG_IPA || CONFIG_IPA3) */
+
+static inline int ipa_mhi_init(struct ipa_mhi_init_params *params)
+{
+ return -EPERM;
+}
+
+static inline int ipa_mhi_start(struct ipa_mhi_start_params *params)
+{
+ return -EPERM;
+}
+
+static inline int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in,
+ u32 *clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_mhi_disconnect_pipe(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_mhi_suspend(bool force)
+{
+ return -EPERM;
+}
+
+static inline int ipa_mhi_resume(void)
+{
+ return -EPERM;
+}
+
+static inline void ipa_mhi_destroy(void)
+{
+
+}
+
+#endif /* (CONFIG_IPA || CONFIG_IPA3) */
+
+#endif /* IPA_MHI_H_ */
diff --git a/include/linux/ipa_odu_bridge.h b/include/linux/ipa_odu_bridge.h
new file mode 100644
index 0000000..5d30a97
--- /dev/null
+++ b/include/linux/ipa_odu_bridge.h
@@ -0,0 +1,84 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_ODO_BRIDGE_H_
+#define _IPA_ODO_BRIDGE_H_
+
+#include <linux/ipa.h>
+
+/**
+ * struct odu_bridge_params - parameters for odu bridge initialization API
+ *
+ * @netdev_name: network interface name
+ * @priv: private data that will be supplied to client's callback
+ * @tx_dp_notify: callback for handling SKB. the following event are supported:
+ * IPA_WRITE_DONE: will be called after client called to odu_bridge_tx_dp()
+ * Client is expected to free the skb.
+ * IPA_RECEIVE: will be called for delivering skb to APPS.
+ * Client is expected to deliver the skb to network stack.
+ * @send_dl_skb: callback for sending skb on downlink direction to adapter.
+ * Client is expected to free the skb.
+ * @device_ethaddr: device Ethernet address in network order.
+ * @ipa_desc_size: IPA Sys Pipe Desc Size
+ */
+struct odu_bridge_params {
+ const char *netdev_name;
+ void *priv;
+ ipa_notify_cb tx_dp_notify;
+ int (*send_dl_skb)(void *priv, struct sk_buff *skb);
+ u8 device_ethaddr[ETH_ALEN];
+ u32 ipa_desc_size;
+};
+
+#if defined CONFIG_IPA || defined CONFIG_IPA3
+
+int odu_bridge_init(struct odu_bridge_params *params);
+
+int odu_bridge_connect(void);
+
+int odu_bridge_disconnect(void);
+
+int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata);
+
+int odu_bridge_cleanup(void);
+
+#else
+
+static inline int odu_bridge_init(struct odu_bridge_params *params)
+{
+ return -EPERM;
+}
+
+static inline int odu_bridge_disconnect(void)
+{
+ return -EPERM;
+}
+
+static inline int odu_bridge_connect(void)
+{
+ return -EPERM;
+}
+
+static inline int odu_bridge_tx_dp(struct sk_buff *skb,
+ struct ipa_tx_meta *metadata)
+{
+ return -EPERM;
+}
+
+static inline int odu_bridge_cleanup(void)
+{
+ return -EPERM;
+}
+
+#endif /* CONFIG_IPA || defined CONFIG_IPA3 */
+
+#endif /* _IPA_ODO_BRIDGE_H */
diff --git a/include/linux/ipa_uc_offload.h b/include/linux/ipa_uc_offload.h
new file mode 100644
index 0000000..0277e87
--- /dev/null
+++ b/include/linux/ipa_uc_offload.h
@@ -0,0 +1,259 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_UC_OFFLOAD_H_
+#define _IPA_UC_OFFLOAD_H_
+
+#include <linux/ipa.h>
+
+/**
+ * enum ipa_uc_offload_proto
+ * Protocol type: either WDI or Neutrino
+ *
+ * @IPA_UC_WDI: wdi Protocol
+ * @IPA_UC_NTN: Neutrino Protocol
+ */
+enum ipa_uc_offload_proto {
+ IPA_UC_INVALID = 0,
+ IPA_UC_WDI = 1,
+ IPA_UC_NTN = 2,
+ IPA_UC_MAX_PROT_SIZE
+};
+
+/**
+ * struct ipa_hdr_info - Header to install on IPA HW
+ *
+ * @hdr: header to install on IPA HW
+ * @hdr_len: length of header
+ * @dst_mac_addr_offset: destination mac address offset
+ * @hdr_type: layer two header type
+ */
+struct ipa_hdr_info {
+ u8 *hdr;
+ u8 hdr_len;
+ u8 dst_mac_addr_offset;
+ enum ipa_hdr_l2_type hdr_type;
+};
+
+/**
+ * struct ipa_uc_offload_intf_params - parameters for uC offload
+ * interface registration
+ *
+ * @netdev_name: network interface name
+ * @notify: callback for exception/embedded packets
+ * @priv: callback cookie
+ * @hdr_info: header information
+ * @meta_data: meta data if any
+ * @meta_data_mask: meta data mask
+ * @proto: uC offload protocol type
+ * @alt_dst_pipe: alternate routing output pipe
+ */
+struct ipa_uc_offload_intf_params {
+ const char *netdev_name;
+ ipa_notify_cb notify;
+ void *priv;
+ struct ipa_hdr_info hdr_info[IPA_IP_MAX];
+ u8 is_meta_data_valid;
+ u32 meta_data;
+ u32 meta_data_mask;
+ enum ipa_uc_offload_proto proto;
+ enum ipa_client_type alt_dst_pipe;
+};
+
+/**
+ * struct ipa_ntn_setup_info - NTN TX/Rx configuration
+ * @client: type of "client" (IPA_CLIENT_ODU#_PROD/CONS)
+ * @ring_base_pa: physical address of the base of the Tx/Rx ring
+ * @ntn_ring_size: size of the Tx/Rx ring (in terms of elements)
+ * @buff_pool_base_pa: physical address of the base of the Tx/Rx
+ * buffer pool
+ * @num_buffers: Rx/Tx buffer pool size (in terms of elements)
+ * @data_buff_size: size of the each data buffer allocated in DDR
+ * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN Ring's
+ * tail pointer
+ */
+struct ipa_ntn_setup_info {
+ enum ipa_client_type client;
+ phys_addr_t ring_base_pa;
+ u32 ntn_ring_size;
+
+ phys_addr_t buff_pool_base_pa;
+ u32 num_buffers;
+ u32 data_buff_size;
+
+ phys_addr_t ntn_reg_base_ptr_pa;
+};
+
+/**
+ * struct ipa_uc_offload_out_params - out parameters for uC offload
+ *
+ * @clnt_hndl: Handle that client need to pass during
+ * further operations
+ */
+struct ipa_uc_offload_out_params {
+ u32 clnt_hndl;
+};
+
+/**
+ * struct ipa_ntn_conn_in_params - NTN TX/Rx connect parameters
+ * @ul: parameters to connect UL pipe(from Neutrino to IPA)
+ * @dl: parameters to connect DL pipe(from IPA to Neutrino)
+ */
+struct ipa_ntn_conn_in_params {
+ struct ipa_ntn_setup_info ul;
+ struct ipa_ntn_setup_info dl;
+};
+
+/**
+ * struct ipa_ntn_conn_out_params - information provided
+ * to uC offload client
+ * @ul_uc_db_pa: physical address of IPA uc doorbell for UL
+ * @dl_uc_db_pa: physical address of IPA uc doorbell for DL
+ * @clnt_hdl: opaque handle assigned to offload client
+ */
+struct ipa_ntn_conn_out_params {
+ phys_addr_t ul_uc_db_pa;
+ phys_addr_t dl_uc_db_pa;
+};
+
+/**
+ * struct ipa_uc_offload_conn_in_params - information provided by
+ * uC offload client
+ * @clnt_hndl: Handle that return as part of reg interface
+ * @proto: Protocol to use for offload data path
+ * @ntn: uC RX/Tx configuration info
+ */
+struct ipa_uc_offload_conn_in_params {
+ u32 clnt_hndl;
+ union {
+ struct ipa_ntn_conn_in_params ntn;
+ } u;
+};
+
+/**
+ * struct ipa_uc_offload_conn_out_params - information provided
+ * to uC offload client
+ * @ul_uc_db_pa: physical address of IPA uc doorbell for UL
+ * @dl_uc_db_pa: physical address of IPA uc doorbell for DL
+ * @clnt_hdl: opaque handle assigned to offload client
+ */
+struct ipa_uc_offload_conn_out_params {
+ union {
+ struct ipa_ntn_conn_out_params ntn;
+ } u;
+};
+
+/**
+ * struct ipa_perf_profile - To set BandWidth profile
+ *
+ * @client: type of "client" (IPA_CLIENT_ODU#_PROD/CONS)
+ * @max_supported_bw_mbps: maximum bandwidth needed (in Mbps)
+ */
+struct ipa_perf_profile {
+ enum ipa_client_type client;
+ u32 max_supported_bw_mbps;
+};
+
+#if defined CONFIG_IPA || defined CONFIG_IPA3
+
+/**
+ * ipa_uc_offload_reg_intf - Client should call this function to
+ * init uC offload data path
+ *
+ * @init: [in] initialization parameters
+ *
+ * Note: Should not be called from atomic context and only
+ * after checking IPA readiness using ipa_register_ipa_ready_cb()
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_uc_offload_reg_intf(
+ struct ipa_uc_offload_intf_params *in,
+ struct ipa_uc_offload_out_params *out);
+
+/**
+ * ipa_uc_offload_cleanup - Client Driver should call this
+ * function before unload and after disconnect
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_uc_offload_cleanup(u32 clnt_hdl);
+
+/**
+ * ipa_uc_offload_conn_pipes - Client should call this
+ * function to connect uC pipe for offload data path
+ *
+ * @in: [in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Note: Should not be called from atomic context and only
+ * after checking IPA readiness using ipa_register_ipa_ready_cb()
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *in,
+ struct ipa_uc_offload_conn_out_params *out);
+
+/**
+ * ipa_uc_offload_disconn_pipes() - Client should call this
+ * function to disconnect uC pipe to disable offload data path
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Note: Should not be called from atomic context
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_uc_offload_disconn_pipes(u32 clnt_hdl);
+
+/**
+ * ipa_set_perf_profile() - Client should call this function to
+ * set IPA clock Band Width based on data rates
+ * @profile: [in] BandWidth profile to use
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_set_perf_profile(struct ipa_perf_profile *profile);
+
+#else /* (CONFIG_IPA || CONFIG_IPA3) */
+
+static inline int ipa_uc_offload_reg_intf(
+ struct ipa_uc_offload_intf_params *in,
+ struct ipa_uc_offload_out_params *out)
+{
+ return -EPERM;
+}
+
+static inline int ipa_uC_offload_cleanup(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_uc_offload_conn_pipes(
+ struct ipa_uc_offload_conn_in_params *in,
+ struct ipa_uc_offload_conn_out_params *out)
+{
+ return -EPERM;
+}
+
+static inline int ipa_uc_offload_disconn_pipes(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_set_perf_profile(struct ipa_perf_profile *profile)
+{
+ return -EPERM;
+}
+
+#endif /* CONFIG_IPA3 */
+
+#endif /* _IPA_UC_OFFLOAD_H_ */
diff --git a/include/linux/ipa_usb.h b/include/linux/ipa_usb.h
new file mode 100644
index 0000000..0fe0e36
--- /dev/null
+++ b/include/linux/ipa_usb.h
@@ -0,0 +1,330 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_USB_H_
+#define _IPA_USB_H_
+
+enum ipa_usb_teth_prot {
+ IPA_USB_RNDIS = 0,
+ IPA_USB_ECM = 1,
+ IPA_USB_RMNET = 2,
+ IPA_USB_MBIM = 3,
+ IPA_USB_DIAG = 4,
+ IPA_USB_MAX_TETH_PROT_SIZE
+};
+
+/**
+ * ipa_usb_teth_params - parameters for RDNIS/ECM initialization API
+ *
+ * @host_ethaddr: host Ethernet address in network order
+ * @device_ethaddr: device Ethernet address in network order
+ */
+struct ipa_usb_teth_params {
+ u8 host_ethaddr[ETH_ALEN];
+ u8 device_ethaddr[ETH_ALEN];
+};
+
+enum ipa_usb_notify_event {
+ IPA_USB_DEVICE_READY,
+ IPA_USB_REMOTE_WAKEUP,
+ IPA_USB_SUSPEND_COMPLETED
+};
+
+enum ipa_usb_max_usb_packet_size {
+ IPA_USB_HIGH_SPEED_512B = 512,
+ IPA_USB_SUPER_SPEED_1024B = 1024
+};
+
+/**
+ * ipa_usb_teth_prot_params - parameters for connecting RNDIS
+ *
+ * @max_xfer_size_bytes_to_dev: max size of UL packets in bytes
+ * @max_packet_number_to_dev: max number of UL aggregated packets
+ * @max_xfer_size_bytes_to_host: max size of DL packets in bytes
+ *
+ */
+struct ipa_usb_teth_prot_params {
+ u32 max_xfer_size_bytes_to_dev;
+ u32 max_packet_number_to_dev;
+ u32 max_xfer_size_bytes_to_host;
+};
+
+/**
+ * ipa_usb_xdci_connect_params - parameters required to start IN, OUT
+ * channels, and connect RNDIS/ECM/teth_bridge
+ *
+ * @max_pkt_size: high speed or full speed
+ * @ipa_to_usb_xferrscidx: Transfer Resource Index (XferRscIdx) for IN channel.
+ * The hardware-assigned transfer resource index for the
+ * transfer, which was returned in response to the
+ * Start Transfer command. This field is used for
+ * "Update Transfer" command.
+ * Should be 0 =< ipa_to_usb_xferrscidx <= 127.
+ * @ipa_to_usb_xferrscidx_valid: true if xferRscIdx should be updated for IN
+ * channel
+ * @usb_to_ipa_xferrscidx: Transfer Resource Index (XferRscIdx) for OUT channel
+ * Should be 0 =< usb_to_ipa_xferrscidx <= 127.
+ * @usb_to_ipa_xferrscidx_valid: true if xferRscIdx should be updated for OUT
+ * channel
+ * @teth_prot: tethering protocol
+ * @teth_prot_params: parameters for connecting the tethering protocol.
+ * @max_supported_bandwidth_mbps: maximum bandwidth need of the client in Mbps
+ */
+struct ipa_usb_xdci_connect_params {
+ enum ipa_usb_max_usb_packet_size max_pkt_size;
+ u8 ipa_to_usb_xferrscidx;
+ bool ipa_to_usb_xferrscidx_valid;
+ u8 usb_to_ipa_xferrscidx;
+ bool usb_to_ipa_xferrscidx_valid;
+ enum ipa_usb_teth_prot teth_prot;
+ struct ipa_usb_teth_prot_params teth_prot_params;
+ u32 max_supported_bandwidth_mbps;
+};
+
+/**
+ * ipa_usb_xdci_chan_scratch - xDCI protocol SW config area of
+ * channel scratch
+ *
+ * @last_trb_addr_iova: Address (iova LSB - based on alignment restrictions) of
+ * last TRB in queue. Used to identify roll over case
+ * @const_buffer_size: TRB buffer size in KB (similar to IPA aggregation
+ * configuration). Must be aligned to max USB Packet Size.
+ * Should be 1 <= const_buffer_size <= 31.
+ * @depcmd_low_addr: Used to generate "Update Transfer" command
+ * @depcmd_hi_addr: Used to generate "Update Transfer" command.
+ */
+struct ipa_usb_xdci_chan_scratch {
+ u16 last_trb_addr_iova;
+ u8 const_buffer_size;
+ u32 depcmd_low_addr;
+ u8 depcmd_hi_addr;
+};
+
+/**
+ * ipa_usb_xdci_chan_params - xDCI channel related properties
+ *
+ * @client: type of "client"
+ * @ipa_ep_cfg: IPA EP configuration
+ * @keep_ipa_awake: when true, IPA will not be clock gated
+ * @teth_prot: tethering protocol for which the channel is created
+ * @gevntcount_low_addr: GEVNCOUNT low address for event scratch
+ * @gevntcount_hi_addr: GEVNCOUNT high address for event scratch
+ * @dir: channel direction
+ * @xfer_ring_len: length of transfer ring in bytes (must be integral
+ * multiple of transfer element size - 16B for xDCI)
+ * @xfer_ring_base_addr: physical base address of transfer ring. Address must be
+ * aligned to xfer_ring_len rounded to power of two
+ * @xfer_scratch: parameters for xDCI channel scratch
+ * @xfer_ring_base_addr_iova: IO virtual address mapped to xfer_ring_base_addr
+ * @data_buff_base_len: length of data buffer allocated by USB driver
+ * @data_buff_base_addr: physical base address for the data buffer (where TRBs
+ * points)
+ * @data_buff_base_addr_iova: IO virtual address mapped to data_buff_base_addr
+ *
+ */
+struct ipa_usb_xdci_chan_params {
+ /* IPA EP params */
+ enum ipa_client_type client;
+ struct ipa_ep_cfg ipa_ep_cfg;
+ bool keep_ipa_awake;
+ enum ipa_usb_teth_prot teth_prot;
+ /* event ring params */
+ u32 gevntcount_low_addr;
+ u8 gevntcount_hi_addr;
+ /* transfer ring params */
+ enum gsi_chan_dir dir;
+ u16 xfer_ring_len;
+ u64 xfer_ring_base_addr;
+ struct ipa_usb_xdci_chan_scratch xfer_scratch;
+ u64 xfer_ring_base_addr_iova;
+ u32 data_buff_base_len;
+ u64 data_buff_base_addr;
+ u64 data_buff_base_addr_iova;
+};
+
+/**
+ * ipa_usb_chan_out_params - out parameters for channel request
+ *
+ * @clnt_hdl: opaque client handle assigned by IPA to client
+ * @db_reg_phs_addr_lsb: Physical address of doorbell register where the 32
+ * LSBs of the doorbell value should be written
+ * @db_reg_phs_addr_msb: Physical address of doorbell register where the 32
+ * MSBs of the doorbell value should be written
+ *
+ */
+struct ipa_req_chan_out_params {
+ u32 clnt_hdl;
+ u32 db_reg_phs_addr_lsb;
+ u32 db_reg_phs_addr_msb;
+};
+
+#ifdef CONFIG_IPA3
+
+/**
+ * ipa_usb_init_teth_prot - Peripheral should call this function to initialize
+ * RNDIS/ECM/teth_bridge/DPL, prior to calling ipa_usb_xdci_connect()
+ *
+ * @usb_teth_type: tethering protocol type
+ * @teth_params: pointer to tethering protocol parameters.
+ * Should be struct ipa_usb_teth_params for RNDIS/ECM,
+ * or NULL for teth_bridge
+ * @ipa_usb_notify_cb: will be called to notify USB driver on certain events
+ * @user_data: cookie used for ipa_usb_notify_cb
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
+ struct ipa_usb_teth_params *teth_params,
+ int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event,
+ void *),
+ void *user_data);
+
+/**
+ * ipa_usb_xdci_connect - Peripheral should call this function to start IN &
+ * OUT xDCI channels, and connect RNDIS/ECM/MBIM/RMNET.
+ * For DPL, only starts IN channel.
+ *
+ * @ul_chan_params: parameters for allocating UL xDCI channel. containing
+ * required info on event and transfer rings, and IPA EP
+ * configuration
+ * @ul_out_params: [out] opaque client handle assigned by IPA to client & DB
+ * registers physical address for UL channel
+ * @dl_chan_params: parameters for allocating DL xDCI channel. containing
+ * required info on event and transfer rings, and IPA EP
+ * configuration
+ * @dl_out_params: [out] opaque client handle assigned by IPA to client & DB
+ * registers physical address for DL channel
+ * @connect_params: handles and scratch params of the required channels,
+ * tethering protocol and the tethering protocol parameters.
+ *
+ * Note: Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
+ struct ipa_usb_xdci_chan_params *dl_chan_params,
+ struct ipa_req_chan_out_params *ul_out_params,
+ struct ipa_req_chan_out_params *dl_out_params,
+ struct ipa_usb_xdci_connect_params *connect_params);
+
+/**
+ * ipa_usb_xdci_disconnect - Peripheral should call this function to stop
+ * IN & OUT xDCI channels
+ * For DPL, only stops IN channel.
+ *
+ * @ul_clnt_hdl: client handle received from ipa_usb_xdci_connect()
+ * for OUT channel
+ * @dl_clnt_hdl: client handle received from ipa_usb_xdci_connect()
+ * for IN channel
+ * @teth_prot: tethering protocol
+ *
+ * Note: Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot);
+
+/**
+ * ipa_usb_deinit_teth_prot - Peripheral should call this function to deinit
+ * RNDIS/ECM/MBIM/RMNET
+ *
+ * @teth_prot: tethering protocol
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot);
+
+/**
+ * ipa_usb_xdci_suspend - Peripheral should call this function to suspend
+ * IN & OUT or DPL xDCI channels
+ *
+ * @ul_clnt_hdl: client handle previously obtained from
+ * ipa_usb_xdci_connect() for OUT channel
+ * @dl_clnt_hdl: client handle previously obtained from
+ * ipa_usb_xdci_connect() for IN channel
+ * @teth_prot: tethering protocol
+ *
+ * Note: Should not be called from atomic context
+ * Note: for DPL, the ul will be ignored as irrelevant
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot);
+
+/**
+ * ipa_usb_xdci_resume - Peripheral should call this function to resume
+ * IN & OUT or DPL xDCI channels
+ *
+ * @ul_clnt_hdl: client handle received from ipa_usb_xdci_connect()
+ * for OUT channel
+ * @dl_clnt_hdl: client handle received from ipa_usb_xdci_connect()
+ * for IN channel
+ * @teth_prot: tethering protocol
+ *
+ * Note: Should not be called from atomic context
+ * Note: for DPL, the ul will be ignored as irrelevant
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot);
+
+#else /* CONFIG_IPA3 */
+
+static inline int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
+ struct ipa_usb_teth_params *teth_params,
+ int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event,
+ void *),
+ void *user_data)
+{
+ return -EPERM;
+}
+
+static inline int ipa_usb_xdci_connect(
+ struct ipa_usb_xdci_chan_params *ul_chan_params,
+ struct ipa_usb_xdci_chan_params *dl_chan_params,
+ struct ipa_req_chan_out_params *ul_out_params,
+ struct ipa_req_chan_out_params *dl_out_params,
+ struct ipa_usb_xdci_connect_params *connect_params)
+{
+ return -EPERM;
+}
+
+static inline int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot)
+{
+ return -EPERM;
+}
+
+static inline int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
+{
+ return -EPERM;
+}
+
+static inline int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot)
+{
+ return -EPERM;
+}
+
+static inline int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot)
+{
+ return -EPERM;
+}
+
+
+#endif /* CONFIG_IPA3 */
+
+#endif /* _IPA_USB_H_ */
diff --git a/include/linux/rndis_ipa.h b/include/linux/rndis_ipa.h
new file mode 100644
index 0000000..05d0a66
--- /dev/null
+++ b/include/linux/rndis_ipa.h
@@ -0,0 +1,102 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _RNDIS_IPA_H_
+#define _RNDIS_IPA_H_
+
+#include <linux/ipa.h>
+
+/*
+ * @priv: private data given upon ipa_connect
+ * @evt: event enum, should be IPA_WRITE_DONE
+ * @data: for tx path the data field is the sent socket buffer.
+ */
+typedef void (*ipa_callback)(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data);
+
+/*
+ * struct ipa_usb_init_params - parameters for driver initialization API
+ *
+ * @device_ready_notify: callback supplied by USB core driver
+ * This callback shall be called by the Netdev once the device
+ * is ready to receive data from tethered PC.
+ * @ipa_rx_notify: The network driver will set this callback (out parameter).
+ * this callback shall be supplied for ipa_connect upon pipe
+ * connection (USB->IPA), once IPA driver receive data packets
+ * from USB pipe destined for Apps this callback will be called.
+ * @ipa_tx_notify: The network driver will set this callback (out parameter).
+ * this callback shall be supplied for ipa_connect upon pipe
+ * connection (IPA->USB), once IPA driver send packets destined
+ * for USB, IPA BAM will notify for Tx-complete.
+ * @host_ethaddr: host Ethernet address in network order
+ * @device_ethaddr: device Ethernet address in network order
+ * @private: The network driver will set this pointer (out parameter).
+ * This pointer will hold the network device for later interaction
+ * with between USB driver and the network driver.
+ * @skip_ep_cfg: boolean field that determines if Apps-processor
+ * should or should not configure this end-point.
+ */
+struct ipa_usb_init_params {
+ void (*device_ready_notify)(void);
+ ipa_callback ipa_rx_notify;
+ ipa_callback ipa_tx_notify;
+ u8 host_ethaddr[ETH_ALEN];
+ u8 device_ethaddr[ETH_ALEN];
+ void *private;
+ bool skip_ep_cfg;
+};
+
+#ifdef CONFIG_RNDIS_IPA
+
+int rndis_ipa_init(struct ipa_usb_init_params *params);
+
+int rndis_ipa_pipe_connect_notify(u32 usb_to_ipa_hdl,
+ u32 ipa_to_usb_hdl,
+ u32 max_xfer_size_bytes_to_dev,
+ u32 max_packet_number_to_dev,
+ u32 max_xfer_size_bytes_to_host,
+ void *private);
+
+int rndis_ipa_pipe_disconnect_notify(void *private);
+
+void rndis_ipa_cleanup(void *private);
+
+#else /* CONFIG_RNDIS_IPA*/
+
+static inline int rndis_ipa_init(struct ipa_usb_init_params *params)
+{
+ return -ENOMEM;
+}
+
+static inline int rndis_ipa_pipe_connect_notify(u32 usb_to_ipa_hdl,
+ u32 ipa_to_usb_hdl,
+ u32 max_xfer_size_bytes_to_dev,
+ u32 max_packet_number_to_dev,
+ u32 max_xfer_size_bytes_to_host,
+ void *private)
+{
+ return -ENOMEM;
+}
+
+static inline int rndis_ipa_pipe_disconnect_notify(void *private)
+{
+ return -ENOMEM;
+}
+
+static inline void rndis_ipa_cleanup(void *private)
+{
+
+}
+#endif /* CONFIG_RNDIS_IPA */
+
+#endif /* _RNDIS_IPA_H_ */
diff --git a/include/uapi/linux/ipa_qmi_service_v01.h b/include/uapi/linux/ipa_qmi_service_v01.h
new file mode 100644
index 0000000..b2c40a4
--- /dev/null
+++ b/include/uapi/linux/ipa_qmi_service_v01.h
@@ -0,0 +1,1623 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * This header file defines the types and structures that were defined in
+ * ipa. It contains the constant values defined, enums, structures,
+ * messages, and service message IDs (in that order) Structures that were
+ * defined in the IDL as messages contain mandatory elements, optional
+ * elements, a combination of mandatory and optional elements (mandatory
+ * always come before optionals in the structure), or nothing (null message)
+
+ * An optional element in a message is preceded by a uint8_t value that must be
+ * set to true if the element is going to be included. When decoding a received
+ * message, the uint8_t values will be set to true or false by the decode
+ * routine, and should be checked before accessing the values that they
+ * correspond to.
+
+ * Variable sized arrays are defined as static sized arrays with an unsigned
+ * integer (32 bit) preceding it that must be set to the number of elements
+ * in the array that are valid. For Example:
+
+ * uint32_t test_opaque_len;
+ * uint8_t test_opaque[16];
+
+ * If only 4 elements are added to test_opaque[] then test_opaque_len must be
+ * set to 4 before sending the message. When decoding, the _len value is set
+ * by the decode routine and should be checked so that the correct number of
+ * elements in the array will be accessed.
+ */
+#ifndef IPA_QMI_SERVICE_V01_H
+#define IPA_QMI_SERVICE_V01_H
+
+#define QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01 2
+#define QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01 2
+#define QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01 2
+#define QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01 2
+#define QMI_IPA_MAX_FILTERS_V01 64
+#define QMI_IPA_MAX_PIPES_V01 20
+#define QMI_IPA_MAX_APN_V01 8
+
+#define IPA_INT_MAX ((int)(~0U>>1))
+#define IPA_INT_MIN (-IPA_INT_MAX - 1)
+
+/* IPA definition as msm_qmi_interface.h */
+
+enum ipa_qmi_result_type_v01 {
+ /* To force a 32 bit signed enum. Do not change or use*/
+ IPA_QMI_RESULT_TYPE_MIN_ENUM_VAL_V01 = IPA_INT_MIN,
+ IPA_QMI_RESULT_SUCCESS_V01 = 0,
+ IPA_QMI_RESULT_FAILURE_V01 = 1,
+ IPA_QMI_RESULT_TYPE_MAX_ENUM_VAL_V01 = IPA_INT_MAX,
+};
+
+enum ipa_qmi_error_type_v01 {
+ /* To force a 32 bit signed enum. Do not change or use*/
+ IPA_QMI_ERROR_TYPE_MIN_ENUM_VAL_V01 = IPA_INT_MIN,
+ IPA_QMI_ERR_NONE_V01 = 0x0000,
+ IPA_QMI_ERR_MALFORMED_MSG_V01 = 0x0001,
+ IPA_QMI_ERR_NO_MEMORY_V01 = 0x0002,
+ IPA_QMI_ERR_INTERNAL_V01 = 0x0003,
+ IPA_QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 = 0x0005,
+ IPA_QMI_ERR_INVALID_ID_V01 = 0x0029,
+ IPA_QMI_ERR_ENCODING_V01 = 0x003A,
+ IPA_QMI_ERR_INCOMPATIBLE_STATE_V01 = 0x005A,
+ IPA_QMI_ERR_NOT_SUPPORTED_V01 = 0x005E,
+ IPA_QMI_ERROR_TYPE_MAX_ENUM_VAL_V01 = IPA_INT_MAX,
+};
+
+struct ipa_qmi_response_type_v01 {
+ enum ipa_qmi_result_type_v01 result;
+ enum ipa_qmi_error_type_v01 error;
+};
+
+enum ipa_platform_type_enum_v01 {
+ IPA_PLATFORM_TYPE_ENUM_MIN_ENUM_VAL_V01 =
+ -2147483647, /* To force a 32 bit signed enum. Do not change or use */
+ QMI_IPA_PLATFORM_TYPE_INVALID_V01 = 0,
+ /* Invalid platform identifier */
+ QMI_IPA_PLATFORM_TYPE_TN_V01 = 1,
+ /* Platform identifier - Data card device */
+ QMI_IPA_PLATFORM_TYPE_LE_V01 = 2,
+ /* Platform identifier - Data router device */
+ QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01 = 3,
+ /* Platform identifier - MSM device with Android HLOS */
+ QMI_IPA_PLATFORM_TYPE_MSM_WINDOWS_V01 = 4,
+ /* Platform identifier - MSM device with Windows HLOS */
+ QMI_IPA_PLATFORM_TYPE_MSM_QNX_V01 = 5,
+ /* Platform identifier - MSM device with QNX HLOS */
+ IPA_PLATFORM_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+ /* To force a 32 bit signed enum. Do not change or use */
+};
+
+struct ipa_hdr_tbl_info_type_v01 {
+ uint32_t modem_offset_start;
+ /* Offset from the start of IPA Shared memory from which
+ * modem driver may insert header table entries.
+ */
+ uint32_t modem_offset_end;
+ /* Offset from the start of IPA shared mem beyond which modem
+ * driver shall not insert header table entries. The space
+ * available for the modem driver shall include the
+ * modem_offset_start and modem_offset_end.
+ */
+}; /* Type */
+
+struct ipa_route_tbl_info_type_v01 {
+ uint32_t route_tbl_start_addr;
+ /* Identifies the start of the routing table. Denotes the offset
+ * from the start of the IPA Shared Mem
+ */
+
+ uint32_t num_indices;
+ /* Number of indices (starting from 0) that is being allocated to
+ * the modem. The number indicated here is also included in the
+ * allocation. The value of num_indices shall not exceed 31
+ * (5 bits used to specify the routing table index), unless there
+ * is a change in the hardware.
+ */
+}; /* Type */
+
+struct ipa_modem_mem_info_type_v01 {
+
+ uint32_t block_start_addr;
+ /* Identifies the start of the memory block allocated for the
+ * modem. Denotes the offset from the start of the IPA Shared Mem
+ */
+
+ uint32_t size;
+ /* Size of the block allocated for the modem driver */
+}; /* Type */
+
+struct ipa_hdr_proc_ctx_tbl_info_type_v01 {
+
+ uint32_t modem_offset_start;
+ /* Offset from the start of IPA shared memory from which the modem
+ * driver may insert header processing context table entries.
+ */
+
+ uint32_t modem_offset_end;
+ /* Offset from the start of IPA shared memory beyond which the modem
+ * driver may not insert header proc table entries. The space
+ * available for the modem driver includes modem_offset_start and
+ * modem_offset_end.
+ */
+}; /* Type */
+
+struct ipa_zip_tbl_info_type_v01 {
+
+ uint32_t modem_offset_start;
+ /* Offset from the start of IPA shared memory from which the modem
+ * driver may insert compression/decompression command entries.
+ */
+
+ uint32_t modem_offset_end;
+ /* Offset from the start of IPA shared memory beyond which the modem
+ * driver may not insert compression/decompression command entries.
+ * The space available for the modem driver includes
+ * modem_offset_start and modem_offset_end.
+ */
+}; /* Type */
+
+/**
+ * Request Message; Requests the modem IPA driver
+ * to perform initialization
+ */
+struct ipa_init_modem_driver_req_msg_v01 {
+
+ /* Optional */
+ /* Platform info */
+ uint8_t platform_type_valid;
+ /* Must be set to true if platform_type is being passed */
+ enum ipa_platform_type_enum_v01 platform_type;
+ /* Provides information about the platform (ex. TN/MN/LE/MSM,etc) */
+
+ /* Optional */
+ /* Header table info */
+ uint8_t hdr_tbl_info_valid;
+ /* Must be set to true if hdr_tbl_info is being passed */
+ struct ipa_hdr_tbl_info_type_v01 hdr_tbl_info;
+ /* Provides information about the header table */
+
+ /* Optional */
+ /* IPV4 Routing table info */
+ uint8_t v4_route_tbl_info_valid;
+ /* Must be set to true if v4_route_tbl_info is being passed */
+ struct ipa_route_tbl_info_type_v01 v4_route_tbl_info;
+ /* Provides information about the IPV4 routing table */
+
+ /* Optional */
+ /* IPV6 Routing table info */
+ uint8_t v6_route_tbl_info_valid;
+ /* Must be set to true if v6_route_tbl_info is being passed */
+ struct ipa_route_tbl_info_type_v01 v6_route_tbl_info;
+ /* Provides information about the IPV6 routing table */
+
+ /* Optional */
+ /* IPV4 Filter table start address */
+ uint8_t v4_filter_tbl_start_addr_valid;
+ /* Must be set to true if v4_filter_tbl_start_addr is being passed */
+ uint32_t v4_filter_tbl_start_addr;
+ /* Provides information about the starting address of IPV4 filter
+ * table in IPAv2 or non-hashable IPv4 filter table in IPAv3.
+ * Denotes the offset from the start of the IPA Shared Mem
+ */
+
+ /* Optional */
+ /* IPV6 Filter table start address */
+ uint8_t v6_filter_tbl_start_addr_valid;
+ /* Must be set to true if v6_filter_tbl_start_addr is being passed */
+ uint32_t v6_filter_tbl_start_addr;
+ /* Provides information about the starting address of IPV6 filter
+ * table in IPAv2 or non-hashable IPv6 filter table in IPAv3.
+ * Denotes the offset from the start of the IPA Shared Mem
+ */
+
+ /* Optional */
+ /* Modem memory block */
+ uint8_t modem_mem_info_valid;
+ /* Must be set to true if modem_mem_info is being passed */
+ struct ipa_modem_mem_info_type_v01 modem_mem_info;
+ /* Provides information about the start address and the size of
+ * the memory block that is being allocated to the modem driver.
+ * Denotes the physical address
+ */
+
+ /* Optional */
+ /* Destination end point for control commands from modem */
+ uint8_t ctrl_comm_dest_end_pt_valid;
+ /* Must be set to true if ctrl_comm_dest_end_pt is being passed */
+ uint32_t ctrl_comm_dest_end_pt;
+ /* Provides information about the destination end point on the
+ * application processor to which the modem driver can send
+ * control commands. The value of this parameter cannot exceed
+ * 19 since IPA only supports 20 end points.
+ */
+
+ /* Optional */
+ /* Modem Bootup Information */
+ uint8_t is_ssr_bootup_valid;
+ /* Must be set to true if is_ssr_bootup is being passed */
+ uint8_t is_ssr_bootup;
+ /* Specifies whether the modem is booting up after a modem only
+ * sub-system restart or not. This will let the modem driver
+ * know that it doesn't have to reinitialize some of the HW
+ * blocks because IPA has not been reset since the previous
+ * initialization.
+ */
+
+ /* Optional */
+ /* Header Processing Context Table Information */
+ uint8_t hdr_proc_ctx_tbl_info_valid;
+ /* Must be set to true if hdr_proc_ctx_tbl_info is being passed */
+ struct ipa_hdr_proc_ctx_tbl_info_type_v01 hdr_proc_ctx_tbl_info;
+ /* Provides information about the header processing context table.
+ */
+
+ /* Optional */
+ /* Compression Decompression Table Information */
+ uint8_t zip_tbl_info_valid;
+ /* Must be set to true if zip_tbl_info is being passed */
+ struct ipa_zip_tbl_info_type_v01 zip_tbl_info;
+ /* Provides information about the zip table.
+ */
+
+ /* Optional */
+ /* IPv4 Hashable Routing Table Information */
+ /** Must be set to true if v4_hash_route_tbl_info is being passed */
+ uint8_t v4_hash_route_tbl_info_valid;
+ struct ipa_route_tbl_info_type_v01 v4_hash_route_tbl_info;
+
+ /* Optional */
+ /* IPv6 Hashable Routing Table Information */
+ /** Must be set to true if v6_hash_route_tbl_info is being passed */
+ uint8_t v6_hash_route_tbl_info_valid;
+ struct ipa_route_tbl_info_type_v01 v6_hash_route_tbl_info;
+
+ /*
+ * Optional
+ * IPv4 Hashable Filter Table Start Address
+ * Must be set to true if v4_hash_filter_tbl_start_addr
+ * is being passed
+ */
+ uint8_t v4_hash_filter_tbl_start_addr_valid;
+ uint32_t v4_hash_filter_tbl_start_addr;
+ /* Identifies the starting address of the IPv4 hashable filter
+ * table in IPAv3 onwards. Denotes the offset from the start of
+ * the IPA shared memory.
+ */
+
+ /* Optional
+ * IPv6 Hashable Filter Table Start Address
+ * Must be set to true if v6_hash_filter_tbl_start_addr
+ * is being passed
+ */
+ uint8_t v6_hash_filter_tbl_start_addr_valid;
+ uint32_t v6_hash_filter_tbl_start_addr;
+ /* Identifies the starting address of the IPv6 hashable filter
+ * table in IPAv3 onwards. Denotes the offset from the start of
+ * the IPA shared memory.
+ */
+}; /* Message */
+
+/* Response Message; Requests the modem IPA driver about initialization */
+struct ipa_init_modem_driver_resp_msg_v01 {
+ /* Mandatory */
+ /* Result Code */
+ struct ipa_qmi_response_type_v01 resp;
+ /* Standard response type.*/
+
+ /* Optional */
+ /* Destination end point for control commands from master driver */
+ uint8_t ctrl_comm_dest_end_pt_valid;
+ /* Must be set to true if ctrl_comm_dest_ep is being passed */
+ uint32_t ctrl_comm_dest_end_pt;
+ /* Provides information about the destination end point on the
+ * modem processor to which the master driver can send control
+ * commands. The value of this parameter cannot exceed 19 since
+ * IPA only supports 20 end points. This field is looked at only
+ * if the result in TLV RESULT_CODE is QMI_RESULT_SUCCESS
+ */
+
+ /* Optional */
+ /* Default end point */
+ uint8_t default_end_pt_valid;
+ /* Must be set to true if default_end_pt is being passed */
+ uint32_t default_end_pt;
+ /* Provides information about the default end point. The master
+ * driver may or may not set the register in the hardware with
+ * this value. The value of this parameter cannot exceed 19
+ * since IPA only supports 20 end points. This field is looked
+ * at only if the result in TLV RESULT_CODE is QMI_RESULT_SUCCESS
+ */
+
+ /* Optional */
+ /* Modem Driver Initialization Pending */
+ uint8_t modem_driver_init_pending_valid;
+ /* Must be set to true if modem_driver_init_pending is being passed */
+ uint8_t modem_driver_init_pending;
+ /*
+ * Identifies if second level message handshake is needed
+ * between drivers to indicate when IPA HWP loading is completed.
+ * If this is set by modem driver, AP driver will need to wait
+ * for a INIT_MODEM_DRIVER_CMPLT message before communicating with
+ * IPA HWP.
+ */
+}; /* Message */
+
+/*
+ * Request Message; Request from Modem IPA driver to indicate
+ * modem driver init completion
+ */
+struct ipa_init_modem_driver_cmplt_req_msg_v01 {
+ /* Mandatory */
+ /* Modem Driver init complete status; */
+ uint8_t status;
+ /*
+ * Specifies whether the modem driver initialization is complete
+ * including the micro controller image loading.
+ */
+}; /* Message */
+
+/*
+ * Response Message; Request from Modem IPA driver to indicate
+ * modem driver init completion
+ */
+struct ipa_init_modem_driver_cmplt_resp_msg_v01 {
+ /* Mandatory */
+ /* Result Code */
+ struct ipa_qmi_response_type_v01 resp;
+ /**< Standard response type.*/
+}; /* Message */
+
+/* Request Message; This is the message that is exchanged between the
+ * control point and the service in order to register for indications.
+ */
+struct ipa_indication_reg_req_msg_v01 {
+ /* Optional */
+ /* Master driver initialization completion */
+ uint8_t master_driver_init_complete_valid;
+ /* Must be set to true if master_driver_init_complete is being passed */
+ uint8_t master_driver_init_complete;
+ /* If set to TRUE, this field indicates that the client is
+ * interested in getting indications about the completion
+ * of the initialization sequence of the master driver.
+ * Setting this field in the request message makes sense
+ * only when the QMI_IPA_INDICATION_REGISTER_REQ is being
+ * originated from the modem driver
+ */
+
+ /* Optional */
+ /* Data Usage Quota Reached */
+ uint8_t data_usage_quota_reached_valid;
+ /* Must be set to true if data_usage_quota_reached is being passed */
+ uint8_t data_usage_quota_reached;
+ /* If set to TRUE, this field indicates that the client wants to
+ * receive indications about reaching the data usage quota that
+ * previously set via QMI_IPA_SET_DATA_USAGE_QUOTA. Setting this field
+ * in the request message makes sense only when the
+ * QMI_IPA_INDICATION_REGISTER_REQ is being originated from the Master
+ * driver
+ */
+}; /* Message */
+
+
+/* Response Message; This is the message that is exchanged between the
+ * control point and the service in order to register for indications.
+ */
+struct ipa_indication_reg_resp_msg_v01 {
+ /* Mandatory */
+ /* Result Code */
+ struct ipa_qmi_response_type_v01 resp;
+ /**< Standard response type.*/
+}; /* Message */
+
+
+/* Indication Message; Indication sent to the Modem IPA driver from
+ * master IPA driver about initialization being complete.
+ */
+struct ipa_master_driver_init_complt_ind_msg_v01 {
+ /* Mandatory */
+ /* Master driver initialization completion status */
+ struct ipa_qmi_response_type_v01 master_driver_init_status;
+ /* Indicates the status of initialization. If everything went
+ * as expected, this field is set to SUCCESS. ERROR is set
+ * otherwise. Extended error info may be used to convey
+ * additional information about the error
+ */
+}; /* Message */
+
+struct ipa_ipfltr_range_eq_16_type_v01 {
+ uint8_t offset;
+ /* Specifies the offset from the IHL (Internet Header length) */
+
+ uint16_t range_low;
+ /* Specifies the lower bound of the range */
+
+ uint16_t range_high;
+ /* Specifies the upper bound of the range */
+}; /* Type */
+
+struct ipa_ipfltr_mask_eq_32_type_v01 {
+ uint8_t offset;
+ /* Specifies the offset either from IHL or from the start of
+ * the IP packet. This depends on the equation that this structure
+ * is used in.
+ */
+
+ uint32_t mask;
+ /* Specifies the mask that has to be used in the comparison.
+ * The field is ANDed with the mask and compared against the value.
+ */
+
+ uint32_t value;
+ /* Specifies the 32 bit value that used in the comparison. */
+}; /* Type */
+
+struct ipa_ipfltr_eq_16_type_v01 {
+ uint8_t offset;
+ /* Specifies the offset into the packet */
+
+ uint16_t value;
+ /* Specifies the 16 bit value that should be used in the comparison. */
+}; /* Type */
+
+struct ipa_ipfltr_eq_32_type_v01 {
+ uint8_t offset;
+ /* Specifies the offset into the packet */
+
+ uint32_t value;
+ /* Specifies the 32 bit value that should be used in the comparison. */
+}; /* Type */
+
+struct ipa_ipfltr_mask_eq_128_type_v01 {
+ uint8_t offset;
+ /* Specifies the offset into the packet */
+
+ uint8_t mask[16];
+ /* Specifies the mask that has to be used in the comparison.
+ * The field is ANDed with the mask and compared against the value.
+ */
+
+ uint8_t value[16];
+ /* Specifies the 128 bit value that should be used in the comparison. */
+}; /* Type */
+
+
+struct ipa_filter_rule_type_v01 {
+ uint16_t rule_eq_bitmap;
+ /* 16-bit Bitmask to indicate how many eqs are valid in this rule */
+
+ uint8_t tos_eq_present;
+ /* Specifies if a type of service check rule is present */
+
+ uint8_t tos_eq;
+ /* The value to check against the type of service (ipv4) field */
+
+ uint8_t protocol_eq_present;
+ /* Specifies if a protocol check rule is present */
+
+ uint8_t protocol_eq;
+ /* The value to check against the protocol field */
+
+ uint8_t num_ihl_offset_range_16;
+ /* The number of 16 bit range check rules at the location
+ * determined by IP header length plus a given offset offset
+ * in this rule. See the definition of the ipa_filter_range_eq_16
+ * for better understanding. The value of this field cannot exceed
+ * IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS which is set as 2
+ */
+
+ struct ipa_ipfltr_range_eq_16_type_v01
+ ihl_offset_range_16[QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01];
+ /* Array of the registered IP header length offset 16 bit range
+ * check rules.
+ */
+
+ uint8_t num_offset_meq_32;
+ /* The number of 32 bit masked comparison rules present
+ * in this rule
+ */
+
+ struct ipa_ipfltr_mask_eq_32_type_v01
+ offset_meq_32[QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01];
+ /* An array of all the possible 32bit masked comparison rules
+ * in this rule
+ */
+
+ uint8_t tc_eq_present;
+ /* Specifies if the traffic class rule is present in this rule */
+
+ uint8_t tc_eq;
+ /* The value against which the IPV4 traffic class field has to
+ * be checked
+ */
+
+ uint8_t flow_eq_present;
+ /* Specifies if the "flow equals" rule is present in this rule */
+
+ uint32_t flow_eq;
+ /* The value against which the IPV6 flow field has to be checked */
+
+ uint8_t ihl_offset_eq_16_present;
+ /* Specifies if there is a 16 bit comparison required at the
+ * location in the packet determined by "Intenet Header length
+ * + specified offset"
+ */
+
+ struct ipa_ipfltr_eq_16_type_v01 ihl_offset_eq_16;
+ /* The 16 bit comparison equation */
+
+ uint8_t ihl_offset_eq_32_present;
+ /* Specifies if there is a 32 bit comparison required at the
+ * location in the packet determined by "Intenet Header length
+ * + specified offset"
+ */
+
+ struct ipa_ipfltr_eq_32_type_v01 ihl_offset_eq_32;
+ /* The 32 bit comparison equation */
+
+ uint8_t num_ihl_offset_meq_32;
+ /* The number of 32 bit masked comparison equations in this
+ * rule. The location of the packet to be compared is
+ * determined by the IP Header length + the give offset
+ */
+
+ struct ipa_ipfltr_mask_eq_32_type_v01
+ ihl_offset_meq_32[QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01];
+ /* Array of 32 bit masked comparison equations.
+ */
+
+ uint8_t num_offset_meq_128;
+ /* The number of 128 bit comparison equations in this rule */
+
+ struct ipa_ipfltr_mask_eq_128_type_v01
+ offset_meq_128[QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01];
+ /* Array of 128 bit comparison equations. The location in the
+ * packet is determined by the specified offset
+ */
+
+ uint8_t metadata_meq32_present;
+ /* Boolean indicating if the 32 bit masked comparison equation
+ * is present or not. Comparison is done against the metadata
+ * in IPA. Metadata can either be extracted from the packet
+ * header or from the "metadata" register.
+ */
+
+ struct ipa_ipfltr_mask_eq_32_type_v01
+ metadata_meq32;
+ /* The metadata 32 bit masked comparison equation */
+
+ uint8_t ipv4_frag_eq_present;
+ /* Specifies if the IPv4 Fragment equation is present in this rule */
+}; /* Type */
+
+
+enum ipa_ip_type_enum_v01 {
+ IPA_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+ /* To force a 32 bit signed enum. Do not change or use*/
+ QMI_IPA_IP_TYPE_INVALID_V01 = 0,
+ /* Invalid IP type identifier */
+ QMI_IPA_IP_TYPE_V4_V01 = 1,
+ /* IP V4 type */
+ QMI_IPA_IP_TYPE_V6_V01 = 2,
+ /* IP V6 type */
+ QMI_IPA_IP_TYPE_V4V6_V01 = 3,
+ /* Applies to both IP types */
+ IPA_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+ /* To force a 32 bit signed enum. Do not change or use*/
+};
+
+
+enum ipa_filter_action_enum_v01 {
+ IPA_FILTER_ACTION_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+ /* To force a 32 bit signed enum. Do not change or use */
+ QMI_IPA_FILTER_ACTION_INVALID_V01 = 0,
+ /* Invalid action on filter hit */
+ QMI_IPA_FILTER_ACTION_SRC_NAT_V01 = 1,
+ /* Pass packet to NAT block for Source NAT */
+ QMI_IPA_FILTER_ACTION_DST_NAT_V01 = 2,
+ /* Pass packet to NAT block for Destination NAT */
+ QMI_IPA_FILTER_ACTION_ROUTING_V01 = 3,
+ /* Pass packet to Routing block */
+ QMI_IPA_FILTER_ACTION_EXCEPTION_V01 = 4,
+ /* Treat packet as exception and send to exception pipe */
+ IPA_FILTER_ACTION_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+ /* To force a 32 bit signed enum. Do not change or use*/
+};
+
+struct ipa_filter_spec_type_v01 {
+ uint32_t filter_spec_identifier;
+ /* This field is used to identify a filter spec in the list
+ * of filter specs being sent from the client. This field
+ * is applicable only in the filter install request and response.
+ */
+
+ enum ipa_ip_type_enum_v01 ip_type;
+ /* This field identifies the IP type for which this rule is
+ * applicable. The driver needs to identify the filter table
+ * (V6 or V4) and this field is essential for that
+ */
+
+ struct ipa_filter_rule_type_v01 filter_rule;
+ /* This field specifies the rules in the filter spec. These rules
+ * are the ones that are matched against fields in the packet.
+ */
+
+ enum ipa_filter_action_enum_v01 filter_action;
+ /* This field specifies the action to be taken when a filter match
+ * occurs. The remote side should install this information into the
+ * hardware along with the filter equations.
+ */
+
+ uint8_t is_routing_table_index_valid;
+ /* Specifies whether the routing table index is present or not.
+ * If the action is "QMI_IPA_FILTER_ACTION_EXCEPTION", this
+ * parameter need not be provided.
+ */
+
+ uint32_t route_table_index;
+ /* This is the index in the routing table that should be used
+ * to route the packets if the filter rule is hit
+ */
+
+ uint8_t is_mux_id_valid;
+ /* Specifies whether the mux_id is valid */
+
+ uint32_t mux_id;
+ /* This field identifies the QMAP MUX ID. As a part of QMAP
+ * protocol, several data calls may be multiplexed over the
+ * same physical transport channel. This identifier is used to
+ * identify one such data call. The maximum value for this
+ * identifier is 255.
+ */
+}; /* Type */
+
+struct ipa_filter_spec_ex_type_v01 {
+ enum ipa_ip_type_enum_v01 ip_type;
+ /* This field identifies the IP type for which this rule is
+ * applicable. The driver needs to identify the filter table
+ * (V6 or V4) and this field is essential for that
+ */
+
+ struct ipa_filter_rule_type_v01 filter_rule;
+ /* This field specifies the rules in the filter spec. These rules
+ * are the ones that are matched against fields in the packet.
+ */
+
+ enum ipa_filter_action_enum_v01 filter_action;
+ /* This field specifies the action to be taken when a filter match
+ * occurs. The remote side should install this information into the
+ * hardware along with the filter equations.
+ */
+
+ uint8_t is_routing_table_index_valid;
+ /* Specifies whether the routing table index is present or not.
+ * If the action is "QMI_IPA_FILTER_ACTION_EXCEPTION", this
+ * parameter need not be provided.
+ */
+
+ uint32_t route_table_index;
+ /* This is the index in the routing table that should be used
+ * to route the packets if the filter rule is hit
+ */
+
+ uint8_t is_mux_id_valid;
+ /* Specifies whether the mux_id is valid */
+
+ uint32_t mux_id;
+ /* This field identifies the QMAP MUX ID. As a part of QMAP
+ * protocol, several data calls may be multiplexed over the
+ * same physical transport channel. This identifier is used to
+ * identify one such data call. The maximum value for this
+ * identifier is 255.
+ */
+
+ uint32_t rule_id;
+ /* Rule Id of the given filter. The Rule Id is populated in the rule
+ * header when installing the rule in IPA.
+ */
+
+ uint8_t is_rule_hashable;
+ /** Specifies whether the given rule is hashable.
+ */
+}; /* Type */
+
+
+/* Request Message; This is the message that is exchanged between the
+ * control point and the service in order to request the installation
+ * of filtering rules in the hardware block by the remote side.
+ */
+struct ipa_install_fltr_rule_req_msg_v01 {
+ /* Optional
+ * IP type that this rule applies to
+ * Filter specification to be installed in the hardware
+ */
+ uint8_t filter_spec_list_valid;
+ /* Must be set to true if filter_spec_list is being passed */
+ uint32_t filter_spec_list_len;
+ /* Must be set to # of elements in filter_spec_list */
+ struct ipa_filter_spec_type_v01
+ filter_spec_list[QMI_IPA_MAX_FILTERS_V01];
+ /* This structure defines the list of filters that have
+ * to be installed in the hardware. The driver installing
+ * these rules shall do so in the same order as specified
+ * in this list.
+ */
+
+ /* Optional */
+ /* Pipe index to intall rule */
+ uint8_t source_pipe_index_valid;
+ /* Must be set to true if source_pipe_index is being passed */
+ uint32_t source_pipe_index;
+ /* This is the source pipe on which the filter rule is to be
+ * installed. The requestor may always not know the pipe
+ * indices. If not specified, the receiver shall install
+ * this rule on all the pipes that it controls through
+ * which data may be fed into IPA.
+ */
+
+ /* Optional */
+ /* Total number of IPv4 filters in the filter spec list */
+ uint8_t num_ipv4_filters_valid;
+ /* Must be set to true if num_ipv4_filters is being passed */
+ uint32_t num_ipv4_filters;
+ /* Number of IPv4 rules included in filter spec list */
+
+ /* Optional */
+ /* Total number of IPv6 filters in the filter spec list */
+ uint8_t num_ipv6_filters_valid;
+ /* Must be set to true if num_ipv6_filters is being passed */
+ uint32_t num_ipv6_filters;
+ /* Number of IPv6 rules included in filter spec list */
+
+ /* Optional */
+ /* List of XLAT filter indices in the filter spec list */
+ uint8_t xlat_filter_indices_list_valid;
+ /* Must be set to true if xlat_filter_indices_list
+ * is being passed
+ */
+ uint32_t xlat_filter_indices_list_len;
+ /* Must be set to # of elements in xlat_filter_indices_list */
+ uint32_t xlat_filter_indices_list[QMI_IPA_MAX_FILTERS_V01];
+ /* List of XLAT filter indices. Filter rules at specified indices
+ * will need to be modified by the receiver if the PDN is XLAT
+ * before installing them on the associated IPA consumer pipe.
+ */
+
+ /* Optional */
+ /* Extended Filter Specification */
+ uint8_t filter_spec_ex_list_valid;
+ /* Must be set to true if filter_spec_ex_list is being passed */
+ uint32_t filter_spec_ex_list_len;
+ /* Must be set to # of elements in filter_spec_ex_list */
+ struct ipa_filter_spec_ex_type_v01
+ filter_spec_ex_list[QMI_IPA_MAX_FILTERS_V01];
+ /*
+ * List of filter specifications of filters that must be installed in
+ * the IPAv3.x hardware.
+ * The driver installing these rules must do so in the same
+ * order as specified in this list.
+ */
+}; /* Message */
+
+struct ipa_filter_rule_identifier_to_handle_map_v01 {
+ uint32_t filter_spec_identifier;
+ /* This field is used to identify a filter spec in the list of
+ * filter specs being sent from the client. This field is
+ * applicable only in the filter install request and response.
+ */
+ uint32_t filter_handle;
+ /* This field is used to identify a rule in any subsequent message.
+ * This is a value that is provided by the server to the control
+ * point
+ */
+}; /* Type */
+
+/* Response Message; This is the message that is exchanged between the
+ * control point and the service in order to request the
+ * installation of filtering rules in the hardware block by
+ * the remote side.
+ */
+struct ipa_install_fltr_rule_resp_msg_v01 {
+ /* Mandatory */
+ /* Result Code */
+ struct ipa_qmi_response_type_v01 resp;
+ /* Standard response type.
+ * Standard response type. Contains the following data members:
+ * - qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE
+ * - qmi_error_type -- Error code. Possible error code values are
+ * described in the error codes section of each message definition.
+ */
+
+ /* Optional */
+ /* Filter Handle List */
+ uint8_t filter_handle_list_valid;
+ /* Must be set to true if filter_handle_list is being passed */
+ uint32_t filter_handle_list_len;
+ /* Must be set to # of elements in filter_handle_list */
+ struct ipa_filter_rule_identifier_to_handle_map_v01
+ filter_handle_list[QMI_IPA_MAX_FILTERS_V01];
+ /*
+ * List of handles returned to the control point. Each handle is
+ * mapped to the rule identifier that was specified in the
+ * request message. Any further reference to the rule is done
+ * using the filter handle.
+ */
+
+ /* Optional */
+ /* Rule id List */
+ uint8_t rule_id_valid;
+ /* Must be set to true if rule_id is being passed */
+ uint32_t rule_id_len;
+ /* Must be set to # of elements in rule_id */
+ uint32_t rule_id[QMI_IPA_MAX_FILTERS_V01];
+ /*
+ * List of rule ids returned to the control point.
+ * Any further reference to the rule is done using the
+ * filter rule id specified in this list.
+ */
+}; /* Message */
+
+struct ipa_filter_handle_to_index_map_v01 {
+ uint32_t filter_handle;
+ /* This is a handle that was given to the remote client that
+ * requested the rule addition.
+ */
+ uint32_t filter_index;
+ /* This index denotes the location in a filter table, where the
+ * filter rule has been installed. The maximum value of this
+ * field is 64.
+ */
+}; /* Type */
+
+/* Request Message; This is the message that is exchanged between the
+ * control point and the service in order to notify the remote driver
+ * of the installation of the filter rule supplied earlier by the
+ * remote driver.
+ */
+struct ipa_fltr_installed_notif_req_msg_v01 {
+ /* Mandatory */
+ /* Pipe index */
+ uint32_t source_pipe_index;
+ /* This is the source pipe on which the filter rule has been
+ * installed or was attempted to be installed
+ */
+
+ /* Mandatory */
+ /* Installation Status */
+ enum ipa_qmi_result_type_v01 install_status;
+ /* This is the status of installation. If this indicates
+ * SUCCESS, other optional fields carry additional
+ * information
+ */
+
+ /* Mandatory */
+ /* List of Filter Indices */
+ uint32_t filter_index_list_len;
+ /* Must be set to # of elements in filter_index_list */
+ struct ipa_filter_handle_to_index_map_v01
+ filter_index_list[QMI_IPA_MAX_FILTERS_V01];
+ /*
+ * Provides the list of filter indices and the corresponding
+ * filter handle. If the installation_status indicates a
+ * failure, the filter indices must be set to a reserve
+ * index (255).
+ */
+
+ /* Optional */
+ /* Embedded pipe index */
+ uint8_t embedded_pipe_index_valid;
+ /* Must be set to true if embedded_pipe_index is being passed */
+ uint32_t embedded_pipe_index;
+ /* This index denotes the embedded pipe number on which a call to
+ * the same PDN has been made. If this field is set, it denotes
+ * that this is a use case where PDN sharing is happening. The
+ * embedded pipe is used to send data from the embedded client
+ * in the device
+ */
+
+ /* Optional */
+ /* Retain Header Configuration */
+ uint8_t retain_header_valid;
+ /* Must be set to true if retain_header is being passed */
+ uint8_t retain_header;
+ /* This field indicates if the driver installing the rule has
+ * turned on the "retain header" bit. If this is true, the
+ * header that is removed by IPA is reinserted after the
+ * packet processing is completed.
+ */
+
+ /* Optional */
+ /* Embedded call Mux Id */
+ uint8_t embedded_call_mux_id_valid;
+ /**< Must be set to true if embedded_call_mux_id is being passed */
+ uint32_t embedded_call_mux_id;
+ /* This identifies one of the many calls that have been originated
+ * on the embedded pipe. This is how we identify the PDN gateway
+ * to which traffic from the source pipe has to flow.
+ */
+
+ /* Optional */
+ /* Total number of IPv4 filters in the filter index list */
+ uint8_t num_ipv4_filters_valid;
+ /* Must be set to true if num_ipv4_filters is being passed */
+ uint32_t num_ipv4_filters;
+ /* Number of IPv4 rules included in filter index list */
+
+ /* Optional */
+ /* Total number of IPv6 filters in the filter index list */
+ uint8_t num_ipv6_filters_valid;
+ /* Must be set to true if num_ipv6_filters is being passed */
+ uint32_t num_ipv6_filters;
+ /* Number of IPv6 rules included in filter index list */
+
+ /* Optional */
+ /* Start index on IPv4 filters installed on source pipe */
+ uint8_t start_ipv4_filter_idx_valid;
+ /* Must be set to true if start_ipv4_filter_idx is being passed */
+ uint32_t start_ipv4_filter_idx;
+ /* Start index of IPv4 rules in filter index list */
+
+ /* Optional */
+ /* Start index on IPv6 filters installed on source pipe */
+ uint8_t start_ipv6_filter_idx_valid;
+ /* Must be set to true if start_ipv6_filter_idx is being passed */
+ uint32_t start_ipv6_filter_idx;
+ /* Start index of IPv6 rules in filter index list */
+
+ /* Optional */
+ /* List of Rule Ids */
+ uint8_t rule_id_valid;
+ /* Must be set to true if rule_id is being passed */
+ uint32_t rule_id_len;
+ /* Must be set to # of elements in rule_id */
+ uint32_t rule_id[QMI_IPA_MAX_FILTERS_V01];
+ /*
+ * Provides the list of Rule Ids of rules added in IPA on the given
+ * source pipe index. If the install_status TLV indicates a
+ * failure, the Rule Ids in this list must be set to a reserved
+ * index (255).
+ */
+}; /* Message */
+
+/* Response Message; This is the message that is exchanged between the
+ * control point and the service in order to notify the remote driver
+ * of the installation of the filter rule supplied earlier by the
+ * remote driver.
+ */
+struct ipa_fltr_installed_notif_resp_msg_v01 {
+ /* Mandatory */
+ /* Result Code */
+ struct ipa_qmi_response_type_v01 resp;
+ /* Standard response type */
+}; /* Message */
+
+/* Request Message; Notifies the remote driver of the need to clear the data
+ * path to prevent the IPA from being blocked at the head of the processing
+ * pipeline
+ */
+struct ipa_enable_force_clear_datapath_req_msg_v01 {
+ /* Mandatory */
+ /* Pipe Mask */
+ uint32_t source_pipe_bitmask;
+ /* Set of consumer (source) pipes that must be clear of
+ * active data transfers.
+ */
+
+ /* Mandatory */
+ /* Request ID */
+ uint32_t request_id;
+ /* Identifies the ID of the request that is sent to the server
+ * The same request ID is used in the message to remove the force_clear
+ * request. The server is expected to keep track of the request ID and
+ * the source_pipe_bitmask so that it can revert as needed
+ */
+
+ /* Optional */
+ /* Source Throttle State */
+ uint8_t throttle_source_valid;
+ /* Must be set to true if throttle_source is being passed */
+ uint8_t throttle_source;
+ /* Specifies whether the server is to throttle the data from
+ * these consumer (source) pipes after clearing the exisiting
+ * data present in the IPA that were pulled from these pipes
+ * The server is expected to put all the source pipes in the
+ * source_pipe_bitmask in the same state
+ */
+}; /* Message */
+
+/* Response Message; Notifies the remote driver of the need to clear the
+ * data path to prevent the IPA from being blocked at the head of the
+ * processing pipeline
+ */
+struct ipa_enable_force_clear_datapath_resp_msg_v01 {
+ /* Mandatory */
+ /* Result Code */
+ struct ipa_qmi_response_type_v01 resp;
+ /* Standard response type */
+}; /* Message */
+
+/* Request Message; Notifies the remote driver that the forceful clearing
+ * of the data path can be lifted
+ */
+struct ipa_disable_force_clear_datapath_req_msg_v01 {
+ /* Mandatory */
+ /* Request ID */
+ uint32_t request_id;
+ /* Identifies the request that was sent to the server to
+ * forcibly clear the data path. This request simply undoes
+ * the operation done in that request
+ */
+}; /* Message */
+
+/* Response Message; Notifies the remote driver that the forceful clearing
+ * of the data path can be lifted
+ */
+struct ipa_disable_force_clear_datapath_resp_msg_v01 {
+ /* Mandatory */
+ /* Result Code */
+ struct ipa_qmi_response_type_v01 resp;
+ /* Standard response type */
+}; /* Message */
+
+enum ipa_peripheral_speed_enum_v01 {
+ IPA_PERIPHERAL_SPEED_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+ /* To force a 32 bit signed enum. Do not change or use */
+ QMI_IPA_PER_USB_FS_V01 = 1,
+ /* Full-speed USB connection */
+ QMI_IPA_PER_USB_HS_V01 = 2,
+ /* High-speed USB connection */
+ QMI_IPA_PER_USB_SS_V01 = 3,
+ /* Super-speed USB connection */
+ QMI_IPA_PER_WLAN_V01 = 4,
+ /* WLAN connection */
+ IPA_PERIPHERAL_SPEED_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+ /* To force a 32 bit signed enum. Do not change or use*/
+};
+
+enum ipa_pipe_mode_enum_v01 {
+ IPA_PIPE_MODE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+ /* To force a 32 bit signed enum. Do not change or use */
+ QMI_IPA_PIPE_MODE_HW_V01 = 1,
+ /* Pipe is connected with a hardware block */
+ QMI_IPA_PIPE_MODE_SW_V01 = 2,
+ /* Pipe is controlled by the software */
+ IPA_PIPE_MODE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+ /* To force a 32 bit signed enum. Do not change or use */
+};
+
+enum ipa_peripheral_type_enum_v01 {
+ IPA_PERIPHERAL_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+ /* To force a 32 bit signed enum. Do not change or use */
+ QMI_IPA_PERIPHERAL_USB_V01 = 1,
+ /* Specifies a USB peripheral */
+ QMI_IPA_PERIPHERAL_HSIC_V01 = 2,
+ /* Specifies an HSIC peripheral */
+ QMI_IPA_PERIPHERAL_PCIE_V01 = 3,
+ /* Specifies a PCIe peripheral */
+ IPA_PERIPHERAL_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+ /* To force a 32 bit signed enum. Do not change or use */
+};
+
+struct ipa_config_req_msg_v01 {
+ /* Optional */
+ /* Peripheral Type */
+ uint8_t peripheral_type_valid;
+ /* Must be set to true if peripheral_type is being passed */
+ enum ipa_peripheral_type_enum_v01 peripheral_type;
+ /* Informs the remote driver about the perhipheral for
+ * which this configuration information is relevant. Values:
+ * - QMI_IPA_PERIPHERAL_USB (1) -- Specifies a USB peripheral
+ * - QMI_IPA_PERIPHERAL_HSIC(2) -- Specifies an HSIC peripheral
+ * - QMI_IPA_PERIPHERAL_PCIE(3) -- Specifies a PCIe peripheral
+ */
+
+ /* Optional */
+ /* HW Deaggregation Support */
+ uint8_t hw_deaggr_supported_valid;
+ /* Must be set to true if hw_deaggr_supported is being passed */
+ uint8_t hw_deaggr_supported;
+ /* Informs the remote driver whether the local IPA driver
+ * allows de-aggregation to be performed in the hardware
+ */
+
+ /* Optional */
+ /* Maximum Aggregation Frame Size */
+ uint8_t max_aggr_frame_size_valid;
+ /* Must be set to true if max_aggr_frame_size is being passed */
+ uint32_t max_aggr_frame_size;
+ /* Specifies the maximum size of the aggregated frame that
+ * the remote driver can expect from this execution environment
+ * - Valid range: 128 bytes to 32768 bytes
+ */
+
+ /* Optional */
+ /* IPA Ingress Pipe Mode */
+ uint8_t ipa_ingress_pipe_mode_valid;
+ /* Must be set to true if ipa_ingress_pipe_mode is being passed */
+
+ enum ipa_pipe_mode_enum_v01 ipa_ingress_pipe_mode;
+ /* Indicates to the remote driver if the ingress pipe into the
+ * IPA is in direct connection with another hardware block or
+ * if the producer of data to this ingress pipe is a software
+ * module. Values:
+ * -QMI_IPA_PIPE_MODE_HW(1) --Pipe is connected with hardware block
+ * -QMI_IPA_PIPE_MODE_SW(2) --Pipe is controlled by the software
+ */
+
+ /* Optional */
+ /* Peripheral Speed Info */
+ uint8_t peripheral_speed_info_valid;
+ /* Must be set to true if peripheral_speed_info is being passed */
+
+ enum ipa_peripheral_speed_enum_v01 peripheral_speed_info;
+ /* Indicates the speed that the peripheral connected to the IPA supports
+ * Values:
+ * - QMI_IPA_PER_USB_FS (1) -- Full-speed USB connection
+ * - QMI_IPA_PER_USB_HS (2) -- High-speed USB connection
+ * - QMI_IPA_PER_USB_SS (3) -- Super-speed USB connection
+ * - QMI_IPA_PER_WLAN (4) -- WLAN connection
+ */
+
+ /* Optional */
+ /* Downlink Accumulation Time limit */
+ uint8_t dl_accumulation_time_limit_valid;
+ /* Must be set to true if dl_accumulation_time_limit is being passed */
+ uint32_t dl_accumulation_time_limit;
+ /* Informs the remote driver about the time for which data
+ * is accumulated in the downlink direction before it is pushed into the
+ * IPA (downlink is with respect to the WWAN air interface)
+ * - Units: milliseconds
+ * - Maximum value: 255
+ */
+
+ /* Optional */
+ /* Downlink Accumulation Packet limit */
+ uint8_t dl_accumulation_pkt_limit_valid;
+ /* Must be set to true if dl_accumulation_pkt_limit is being passed */
+ uint32_t dl_accumulation_pkt_limit;
+ /* Informs the remote driver about the number of packets
+ * that are to be accumulated in the downlink direction before it is
+ * pushed into the IPA - Maximum value: 1023
+ */
+
+ /* Optional */
+ /* Downlink Accumulation Byte Limit */
+ uint8_t dl_accumulation_byte_limit_valid;
+ /* Must be set to true if dl_accumulation_byte_limit is being passed */
+ uint32_t dl_accumulation_byte_limit;
+ /* Inform the remote driver about the number of bytes
+ * that are to be accumulated in the downlink direction before it
+ * is pushed into the IPA - Maximum value: TBD
+ */
+
+ /* Optional */
+ /* Uplink Accumulation Time Limit */
+ uint8_t ul_accumulation_time_limit_valid;
+ /* Must be set to true if ul_accumulation_time_limit is being passed */
+ uint32_t ul_accumulation_time_limit;
+ /* Inform thes remote driver about the time for which data
+ * is to be accumulated in the uplink direction before it is pushed into
+ * the IPA (downlink is with respect to the WWAN air interface).
+ * - Units: milliseconds
+ * - Maximum value: 255
+ */
+
+ /* Optional */
+ /* HW Control Flags */
+ uint8_t hw_control_flags_valid;
+ /* Must be set to true if hw_control_flags is being passed */
+ uint32_t hw_control_flags;
+ /* Informs the remote driver about the hardware control flags:
+ * - Bit 0: IPA_HW_FLAG_HALT_SYSTEM_ON_NON_TERMINAL_FAILURE --
+ * Indicates to the hardware that it must not continue with
+ * any subsequent operation even if the failure is not terminal
+ * - Bit 1: IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR --
+ * Indicates to the hardware that it is not required to report
+ * channel errors to the host.
+ * - Bit 2: IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP --
+ * Indicates to the hardware that it is not required to generate
+ * wake-up events to the host.
+ * - Bit 4: IPA_HW_FLAG_WORK_OVER_DDR --
+ * Indicates to the hardware that it is accessing addresses in
+ * the DDR and not over PCIe
+ * - Bit 5: IPA_HW_FLAG_INTERRUPT_MODE_CTRL_FLAG --
+ * Indicates whether the device must
+ * raise an event to let the host know that it is going into an
+ * interrupt mode (no longer polling for data/buffer availability)
+ */
+
+ /* Optional */
+ /* Uplink MSI Event Threshold */
+ uint8_t ul_msi_event_threshold_valid;
+ /* Must be set to true if ul_msi_event_threshold is being passed */
+ uint32_t ul_msi_event_threshold;
+ /* Informs the remote driver about the threshold that will
+ * cause an interrupt (MSI) to be fired to the host. This ensures
+ * that the remote driver does not accumulate an excesive number of
+ * events before firing an interrupt.
+ * This threshold is applicable for data moved in the UL direction.
+ * - Maximum value: 65535
+ */
+
+ /* Optional */
+ /* Downlink MSI Event Threshold */
+ uint8_t dl_msi_event_threshold_valid;
+ /* Must be set to true if dl_msi_event_threshold is being passed */
+ uint32_t dl_msi_event_threshold;
+ /* Informs the remote driver about the threshold that will
+ * cause an interrupt (MSI) to be fired to the host. This ensures
+ * that the remote driver does not accumulate an excesive number of
+ * events before firing an interrupt
+ * This threshold is applicable for data that is moved in the
+ * DL direction - Maximum value: 65535
+ */
+
+ /* Optional */
+ /* Uplink Fifo Size */
+ uint8_t ul_fifo_size_valid;
+ /* Must be set to true if ul_fifo_size is being passed */
+ uint32_t ul_fifo_size;
+ /*
+ * Informs the remote driver about the total Uplink xDCI
+ * buffer size that holds the complete aggregated frame
+ * or BAM data fifo size of the peripheral channel/pipe(in Bytes).
+ * This deprecates the max_aggr_frame_size field. This TLV
+ * deprecates max_aggr_frame_size TLV from version 1.9 onwards
+ * and the max_aggr_frame_size TLV will be ignored in the presence
+ * of this TLV.
+ */
+
+ /* Optional */
+ /* Downlink Fifo Size */
+ uint8_t dl_fifo_size_valid;
+ /* Must be set to true if dl_fifo_size is being passed */
+ uint32_t dl_fifo_size;
+ /*
+ * Informs the remote driver about the total Downlink xDCI buffering
+ * capacity or BAM data fifo size of the peripheral channel/pipe.
+ * (In Bytes). dl_fifo_size = n * dl_buf_size. This deprecates the
+ * max_aggr_frame_size field. If this value is set
+ * max_aggr_frame_size is ignored.
+ */
+
+ /* Optional */
+ /* Downlink Buffer Size */
+ uint8_t dl_buf_size_valid;
+ /* Must be set to true if dl_buf_size is being passed */
+ uint32_t dl_buf_size;
+ /* Informs the remote driver about the single xDCI buffer size.
+ * This is applicable only in GSI mode(in Bytes).\n
+ */
+}; /* Message */
+
+/* Response Message; Notifies the remote driver of the configuration
+ * information
+ */
+struct ipa_config_resp_msg_v01 {
+ /* Mandatory */
+ /* Result Code */
+ struct ipa_qmi_response_type_v01 resp;
+ /**< Standard response type.*/
+}; /* Message */
+
+enum ipa_stats_type_enum_v01 {
+ IPA_STATS_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+ /* To force a 32 bit signed enum. Do not change or use */
+ QMI_IPA_STATS_TYPE_INVALID_V01 = 0,
+ /* Invalid stats type identifier */
+ QMI_IPA_STATS_TYPE_PIPE_V01 = 1,
+ /* Pipe stats type */
+ QMI_IPA_STATS_TYPE_FILTER_RULES_V01 = 2,
+ /* Filter rule stats type */
+ IPA_STATS_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+ /* To force a 32 bit signed enum. Do not change or use */
+};
+
+struct ipa_pipe_stats_info_type_v01 {
+ uint32_t pipe_index;
+ /* Pipe index for statistics to be retrieved. */
+
+ uint64_t num_ipv4_packets;
+ /* Accumulated number of IPv4 packets over this pipe. */
+
+ uint64_t num_ipv4_bytes;
+ /* Accumulated number of IPv4 bytes over this pipe. */
+
+ uint64_t num_ipv6_packets;
+ /* Accumulated number of IPv6 packets over this pipe. */
+
+ uint64_t num_ipv6_bytes;
+ /* Accumulated number of IPv6 bytes over this pipe. */
+};
+
+struct ipa_stats_type_filter_rule_v01 {
+ uint32_t filter_rule_index;
+ /* Filter rule index for statistics to be retrieved. */
+
+ uint64_t num_packets;
+ /* Accumulated number of packets over this filter rule. */
+};
+
+/* Request Message; Retrieve the data statistics collected on modem
+ * IPA driver.
+ */
+struct ipa_get_data_stats_req_msg_v01 {
+ /* Mandatory */
+ /* Stats Type */
+ enum ipa_stats_type_enum_v01 ipa_stats_type;
+ /* Indicates the type of statistics to be retrieved. */
+
+ /* Optional */
+ /* Reset Statistics */
+ uint8_t reset_stats_valid;
+ /* Must be set to true if reset_stats is being passed */
+ uint8_t reset_stats;
+ /* Option to reset the specific type of data statistics
+ * currently collected.
+ */
+}; /* Message */
+
+/* Response Message; Retrieve the data statistics collected
+ * on modem IPA driver.
+ */
+struct ipa_get_data_stats_resp_msg_v01 {
+ /* Mandatory */
+ /* Result Code */
+ struct ipa_qmi_response_type_v01 resp;
+ /* Standard response type. */
+
+ /* Optional */
+ /* Stats Type */
+ uint8_t ipa_stats_type_valid;
+ /* Must be set to true if ipa_stats_type is passed */
+ enum ipa_stats_type_enum_v01 ipa_stats_type;
+ /* Indicates the type of statistics that are retrieved. */
+
+ /* Optional */
+ /* Uplink Source Pipe Statistics List */
+ uint8_t ul_src_pipe_stats_list_valid;
+ /* Must be set to true if ul_src_pipe_stats_list is being passed */
+ uint32_t ul_src_pipe_stats_list_len;
+ /* Must be set to # of elements in ul_src_pipe_stats_list */
+ struct ipa_pipe_stats_info_type_v01
+ ul_src_pipe_stats_list[QMI_IPA_MAX_PIPES_V01];
+ /* List of all Uplink pipe statistics that are retrieved. */
+
+ /* Optional */
+ /* Downlink Destination Pipe Statistics List */
+ uint8_t dl_dst_pipe_stats_list_valid;
+ /* Must be set to true if dl_dst_pipe_stats_list is being passed */
+ uint32_t dl_dst_pipe_stats_list_len;
+ /* Must be set to # of elements in dl_dst_pipe_stats_list */
+ struct ipa_pipe_stats_info_type_v01
+ dl_dst_pipe_stats_list[QMI_IPA_MAX_PIPES_V01];
+ /* List of all Downlink pipe statistics that are retrieved. */
+
+ /* Optional */
+ /* Downlink Filter Rule Stats List */
+ uint8_t dl_filter_rule_stats_list_valid;
+ /* Must be set to true if dl_filter_rule_stats_list is being passed */
+ uint32_t dl_filter_rule_stats_list_len;
+ /* Must be set to # of elements in dl_filter_rule_stats_list */
+ struct ipa_stats_type_filter_rule_v01
+ dl_filter_rule_stats_list[QMI_IPA_MAX_FILTERS_V01];
+ /* List of all Downlink filter rule statistics retrieved. */
+}; /* Message */
+
+struct ipa_apn_data_stats_info_type_v01 {
+ uint32_t mux_id;
+ /* Indicates the MUX ID associated with the APN for which the data
+ * usage statistics is queried
+ */
+
+ uint64_t num_ul_packets;
+ /* Accumulated number of uplink packets corresponding to
+ * this Mux ID
+ */
+
+ uint64_t num_ul_bytes;
+ /* Accumulated number of uplink bytes corresponding to
+ * this Mux ID
+ */
+
+ uint64_t num_dl_packets;
+ /* Accumulated number of downlink packets corresponding
+ * to this Mux ID
+ */
+
+ uint64_t num_dl_bytes;
+ /* Accumulated number of downlink bytes corresponding to
+ * this Mux ID
+ */
+}; /* Type */
+
+/* Request Message; Retrieve the APN data statistics collected from modem */
+struct ipa_get_apn_data_stats_req_msg_v01 {
+ /* Optional */
+ /* Mux ID List */
+ uint8_t mux_id_list_valid;
+ /* Must be set to true if mux_id_list is being passed */
+ uint32_t mux_id_list_len;
+ /* Must be set to # of elements in mux_id_list */
+ uint32_t mux_id_list[QMI_IPA_MAX_APN_V01];
+ /* The list of MUX IDs associated with APNs for which the data usage
+ * statistics is being retrieved
+ */
+}; /* Message */
+
+/* Response Message; Retrieve the APN data statistics collected from modem */
+struct ipa_get_apn_data_stats_resp_msg_v01 {
+ /* Mandatory */
+ /* Result Code */
+ struct ipa_qmi_response_type_v01 resp;
+ /* Standard response type.*/
+
+ /* Optional */
+ /* APN Data Statistics List */
+ uint8_t apn_data_stats_list_valid;
+ /* Must be set to true if apn_data_stats_list is being passed */
+ uint32_t apn_data_stats_list_len;
+ /* Must be set to # of elements in apn_data_stats_list */
+ struct ipa_apn_data_stats_info_type_v01
+ apn_data_stats_list[QMI_IPA_MAX_APN_V01];
+ /* List of APN data retrieved as per request on mux_id.
+ * For now, only one APN monitoring is supported on modem driver.
+ * Making this as list for expandability to support more APNs in future.
+ */
+}; /* Message */
+
+struct ipa_data_usage_quota_info_type_v01 {
+ uint32_t mux_id;
+ /* Indicates the MUX ID associated with the APN for which the data usage
+ * quota needs to be set
+ */
+
+ uint64_t num_Mbytes;
+ /* Number of Mega-bytes of quota value to be set on this APN associated
+ * with this Mux ID.
+ */
+}; /* Type */
+
+/* Request Message; Master driver sets a data usage quota value on
+ * modem driver
+ */
+struct ipa_set_data_usage_quota_req_msg_v01 {
+ /* Optional */
+ /* APN Quota List */
+ uint8_t apn_quota_list_valid;
+ /* Must be set to true if apn_quota_list is being passed */
+ uint32_t apn_quota_list_len;
+ /* Must be set to # of elements in apn_quota_list */
+ struct ipa_data_usage_quota_info_type_v01
+ apn_quota_list[QMI_IPA_MAX_APN_V01];
+ /* The list of APNs on which a data usage quota to be set on modem
+ * driver. For now, only one APN monitoring is supported on modem
+ * driver. Making this as list for expandability to support more
+ * APNs in future.
+ */
+}; /* Message */
+
+/* Response Message; Master driver sets a data usage on modem driver. */
+struct ipa_set_data_usage_quota_resp_msg_v01 {
+ /* Mandatory */
+ /* Result Code */
+ struct ipa_qmi_response_type_v01 resp;
+ /* Standard response type.*/
+}; /* Message */
+
+/* Indication Message; Modem driver sends this indication to master
+ * driver when the data usage quota is reached
+ */
+struct ipa_data_usage_quota_reached_ind_msg_v01 {
+ /* Mandatory */
+ /* APN Quota List */
+ struct ipa_data_usage_quota_info_type_v01 apn;
+ /* This message indicates which APN has the previously set quota
+ * reached. For now, only one APN monitoring is supported on modem
+ * driver.
+ */
+}; /* Message */
+
+/* Request Message; Master driver request modem driver to terminate
+ * the current data usage quota monitoring session.
+ */
+struct ipa_stop_data_usage_quota_req_msg_v01 {
+ /* This element is a placeholder to prevent the declaration of
+ * an empty struct. DO NOT USE THIS FIELD UNDER ANY CIRCUMSTANCE
+ */
+ char __placeholder;
+}; /* Message */
+
+/* Response Message; Master driver request modem driver to terminate
+ * the current quota monitoring session.
+ */
+struct ipa_stop_data_usage_quota_resp_msg_v01 {
+ /* Mandatory */
+ /* Result Code */
+ struct ipa_qmi_response_type_v01 resp;
+ /**< Standard response type.*/
+}; /* Message */
+
+/*Service Message Definition*/
+#define QMI_IPA_INDICATION_REGISTER_REQ_V01 0x0020
+#define QMI_IPA_INDICATION_REGISTER_RESP_V01 0x0020
+#define QMI_IPA_INIT_MODEM_DRIVER_REQ_V01 0x0021
+#define QMI_IPA_INIT_MODEM_DRIVER_RESP_V01 0x0021
+#define QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01 0x0022
+#define QMI_IPA_INSTALL_FILTER_RULE_REQ_V01 0x0023
+#define QMI_IPA_INSTALL_FILTER_RULE_RESP_V01 0x0023
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01 0x0024
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01 0x0024
+#define QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_V01 0x0025
+#define QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_V01 0x0025
+#define QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_V01 0x0026
+#define QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01 0x0026
+#define QMI_IPA_CONFIG_REQ_V01 0x0027
+#define QMI_IPA_CONFIG_RESP_V01 0x0027
+#define QMI_IPA_DISABLE_LINK_LOW_PWR_STATE_REQ_V01 0x0028
+#define QMI_IPA_DISABLE_LINK_LOW_PWR_STATE_RESP_V01 0x0028
+#define QMI_IPA_ENABLE_LINK_LOW_PWR_STATE_REQ_V01 0x0029
+#define QMI_IPA_ENABLE_LINK_LOW_PWR_STATE_RESP_V01 0x0029
+#define QMI_IPA_GET_DATA_STATS_REQ_V01 0x0030
+#define QMI_IPA_GET_DATA_STATS_RESP_V01 0x0030
+#define QMI_IPA_GET_APN_DATA_STATS_REQ_V01 0x0031
+#define QMI_IPA_GET_APN_DATA_STATS_RESP_V01 0x0031
+#define QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01 0x0032
+#define QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01 0x0032
+#define QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01 0x0033
+#define QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01 0x0034
+#define QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01 0x0034
+#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01 0x0035
+#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01 0x0035
+
+/* add for max length*/
+#define QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01 134
+#define QMI_IPA_INIT_MODEM_DRIVER_RESP_MAX_MSG_LEN_V01 25
+#define QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01 8
+#define QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01 22369
+#define QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01 783
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01 834
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01 7
+#define QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01 15
+
+
+#define QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01 18
+#define QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01 7
+#define QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01 7
+
+
+#define QMI_IPA_CONFIG_REQ_MAX_MSG_LEN_V01 102
+#define QMI_IPA_CONFIG_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_DISABLE_LINK_LOW_PWR_STATE_REQ_MAX_MSG_LEN_V01 18
+#define QMI_IPA_DISABLE_LINK_LOW_PWR_STATE_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_ENABLE_LINK_LOW_PWR_STATE_REQ_MAX_MSG_LEN_V01 7
+#define QMI_IPA_ENABLE_LINK_LOW_PWR_STATE_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_GET_DATA_STATS_REQ_MAX_MSG_LEN_V01 11
+#define QMI_IPA_GET_DATA_STATS_RESP_MAX_MSG_LEN_V01 2234
+#define QMI_IPA_GET_APN_DATA_STATS_REQ_MAX_MSG_LEN_V01 36
+#define QMI_IPA_GET_APN_DATA_STATS_RESP_MAX_MSG_LEN_V01 299
+#define QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01 100
+#define QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01 0
+#define QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01 7
+
+#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_MAX_MSG_LEN_V01 4
+#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_MAX_MSG_LEN_V01 7
+
+/* Service Object Accessor */
+
+#endif/* IPA_QMI_SERVICE_V01_H */
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
new file mode 100644
index 0000000..89ffeb6
--- /dev/null
+++ b/include/uapi/linux/msm_ipa.h
@@ -0,0 +1,1762 @@
+#ifndef _UAPI_MSM_IPA_H_
+#define _UAPI_MSM_IPA_H_
+
+#ifndef __KERNEL__
+#include <stdint.h>
+#include <stddef.h>
+#include <sys/stat.h>
+#endif
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+/**
+ * unique magic number of the IPA device
+ */
+#define IPA_IOC_MAGIC 0xCF
+
+/**
+ * name of the default routing tables for v4 and v6
+ */
+#define IPA_DFLT_RT_TBL_NAME "ipa_dflt_rt"
+
+/**
+ * the commands supported by IPA driver
+ */
+#define IPA_IOCTL_ADD_HDR 0
+#define IPA_IOCTL_DEL_HDR 1
+#define IPA_IOCTL_ADD_RT_RULE 2
+#define IPA_IOCTL_DEL_RT_RULE 3
+#define IPA_IOCTL_ADD_FLT_RULE 4
+#define IPA_IOCTL_DEL_FLT_RULE 5
+#define IPA_IOCTL_COMMIT_HDR 6
+#define IPA_IOCTL_RESET_HDR 7
+#define IPA_IOCTL_COMMIT_RT 8
+#define IPA_IOCTL_RESET_RT 9
+#define IPA_IOCTL_COMMIT_FLT 10
+#define IPA_IOCTL_RESET_FLT 11
+#define IPA_IOCTL_DUMP 12
+#define IPA_IOCTL_GET_RT_TBL 13
+#define IPA_IOCTL_PUT_RT_TBL 14
+#define IPA_IOCTL_COPY_HDR 15
+#define IPA_IOCTL_QUERY_INTF 16
+#define IPA_IOCTL_QUERY_INTF_TX_PROPS 17
+#define IPA_IOCTL_QUERY_INTF_RX_PROPS 18
+#define IPA_IOCTL_GET_HDR 19
+#define IPA_IOCTL_PUT_HDR 20
+#define IPA_IOCTL_SET_FLT 21
+#define IPA_IOCTL_ALLOC_NAT_MEM 22
+#define IPA_IOCTL_V4_INIT_NAT 23
+#define IPA_IOCTL_NAT_DMA 24
+#define IPA_IOCTL_V4_DEL_NAT 26
+#define IPA_IOCTL_PULL_MSG 27
+#define IPA_IOCTL_GET_NAT_OFFSET 28
+#define IPA_IOCTL_RM_ADD_DEPENDENCY 29
+#define IPA_IOCTL_RM_DEL_DEPENDENCY 30
+#define IPA_IOCTL_GENERATE_FLT_EQ 31
+#define IPA_IOCTL_QUERY_INTF_EXT_PROPS 32
+#define IPA_IOCTL_QUERY_EP_MAPPING 33
+#define IPA_IOCTL_QUERY_RT_TBL_INDEX 34
+#define IPA_IOCTL_WRITE_QMAPID 35
+#define IPA_IOCTL_MDFY_FLT_RULE 36
+#define IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD 37
+#define IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL 38
+#define IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED 39
+#define IPA_IOCTL_ADD_HDR_PROC_CTX 40
+#define IPA_IOCTL_DEL_HDR_PROC_CTX 41
+#define IPA_IOCTL_MDFY_RT_RULE 42
+#define IPA_IOCTL_ADD_RT_RULE_AFTER 43
+#define IPA_IOCTL_ADD_FLT_RULE_AFTER 44
+#define IPA_IOCTL_GET_HW_VERSION 45
+#define IPA_IOCTL_MAX 46
+
+/**
+ * max size of the header to be inserted
+ */
+#define IPA_HDR_MAX_SIZE 64
+
+/**
+ * max size of the name of the resource (routing table, header)
+ */
+#define IPA_RESOURCE_NAME_MAX 32
+
+/**
+ * max number of interface properties
+ */
+#define IPA_NUM_PROPS_MAX 35
+
+/**
+ * size of the mac address
+ */
+#define IPA_MAC_ADDR_SIZE 6
+
+/**
+ * max number of mbim streams
+ */
+#define IPA_MBIM_MAX_STREAM_NUM 8
+
+/**
+ * size of the ipv6 address
+ */
+#define IPA_WAN_MSG_IPv6_ADDR_GW_LEN 4
+
+/**
+ * the attributes of the rule (routing or filtering)
+ */
+#define IPA_FLT_TOS (1ul << 0)
+#define IPA_FLT_PROTOCOL (1ul << 1)
+#define IPA_FLT_SRC_ADDR (1ul << 2)
+#define IPA_FLT_DST_ADDR (1ul << 3)
+#define IPA_FLT_SRC_PORT_RANGE (1ul << 4)
+#define IPA_FLT_DST_PORT_RANGE (1ul << 5)
+#define IPA_FLT_TYPE (1ul << 6)
+#define IPA_FLT_CODE (1ul << 7)
+#define IPA_FLT_SPI (1ul << 8)
+#define IPA_FLT_SRC_PORT (1ul << 9)
+#define IPA_FLT_DST_PORT (1ul << 10)
+#define IPA_FLT_TC (1ul << 11)
+#define IPA_FLT_FLOW_LABEL (1ul << 12)
+#define IPA_FLT_NEXT_HDR (1ul << 13)
+#define IPA_FLT_META_DATA (1ul << 14)
+#define IPA_FLT_FRAGMENT (1ul << 15)
+#define IPA_FLT_TOS_MASKED (1ul << 16)
+#define IPA_FLT_MAC_SRC_ADDR_ETHER_II (1ul << 17)
+#define IPA_FLT_MAC_DST_ADDR_ETHER_II (1ul << 18)
+#define IPA_FLT_MAC_SRC_ADDR_802_3 (1ul << 19)
+#define IPA_FLT_MAC_DST_ADDR_802_3 (1ul << 20)
+#define IPA_FLT_MAC_ETHER_TYPE (1ul << 21)
+
+/**
+ * enum ipa_client_type - names for the various IPA "clients"
+ * these are from the perspective of the clients, for e.g.
+ * HSIC1_PROD means HSIC client is the producer and IPA is the
+ * consumer
+ */
+enum ipa_client_type {
+ IPA_CLIENT_PROD,
+ IPA_CLIENT_HSIC1_PROD = IPA_CLIENT_PROD,
+ IPA_CLIENT_WLAN1_PROD,
+ IPA_CLIENT_HSIC2_PROD,
+ IPA_CLIENT_USB2_PROD,
+ IPA_CLIENT_HSIC3_PROD,
+ IPA_CLIENT_USB3_PROD,
+ IPA_CLIENT_HSIC4_PROD,
+ IPA_CLIENT_USB4_PROD,
+ IPA_CLIENT_HSIC5_PROD,
+ IPA_CLIENT_USB_PROD,
+ IPA_CLIENT_A5_WLAN_AMPDU_PROD,
+ IPA_CLIENT_A2_EMBEDDED_PROD,
+ IPA_CLIENT_A2_TETHERED_PROD,
+ IPA_CLIENT_APPS_LAN_WAN_PROD,
+ IPA_CLIENT_APPS_CMD_PROD,
+ IPA_CLIENT_ODU_PROD,
+ IPA_CLIENT_MHI_PROD,
+ IPA_CLIENT_Q6_LAN_PROD,
+ IPA_CLIENT_Q6_WAN_PROD,
+ IPA_CLIENT_Q6_CMD_PROD,
+ IPA_CLIENT_MEMCPY_DMA_SYNC_PROD,
+ IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD,
+ IPA_CLIENT_Q6_DECOMP_PROD,
+ IPA_CLIENT_Q6_DECOMP2_PROD,
+ IPA_CLIENT_UC_USB_PROD,
+
+ /* Below PROD client type is only for test purpose */
+ IPA_CLIENT_TEST_PROD,
+ IPA_CLIENT_TEST1_PROD,
+ IPA_CLIENT_TEST2_PROD,
+ IPA_CLIENT_TEST3_PROD,
+ IPA_CLIENT_TEST4_PROD,
+
+ IPA_CLIENT_CONS,
+ IPA_CLIENT_HSIC1_CONS = IPA_CLIENT_CONS,
+ IPA_CLIENT_WLAN1_CONS,
+ IPA_CLIENT_HSIC2_CONS,
+ IPA_CLIENT_USB2_CONS,
+ IPA_CLIENT_WLAN2_CONS,
+ IPA_CLIENT_HSIC3_CONS,
+ IPA_CLIENT_USB3_CONS,
+ IPA_CLIENT_WLAN3_CONS,
+ IPA_CLIENT_HSIC4_CONS,
+ IPA_CLIENT_USB4_CONS,
+ IPA_CLIENT_WLAN4_CONS,
+ IPA_CLIENT_HSIC5_CONS,
+ IPA_CLIENT_USB_CONS,
+ IPA_CLIENT_USB_DPL_CONS,
+ IPA_CLIENT_A2_EMBEDDED_CONS,
+ IPA_CLIENT_A2_TETHERED_CONS,
+ IPA_CLIENT_A5_LAN_WAN_CONS,
+ IPA_CLIENT_APPS_LAN_CONS,
+ IPA_CLIENT_APPS_WAN_CONS,
+ IPA_CLIENT_ODU_EMB_CONS,
+ IPA_CLIENT_ODU_TETH_CONS,
+ IPA_CLIENT_MHI_CONS,
+ IPA_CLIENT_Q6_LAN_CONS,
+ IPA_CLIENT_Q6_WAN_CONS,
+ IPA_CLIENT_Q6_DUN_CONS,
+ IPA_CLIENT_MEMCPY_DMA_SYNC_CONS,
+ IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS,
+ IPA_CLIENT_Q6_DECOMP_CONS,
+ IPA_CLIENT_Q6_DECOMP2_CONS,
+ IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS,
+ /* Below CONS client type is only for test purpose */
+ IPA_CLIENT_TEST_CONS,
+ IPA_CLIENT_TEST1_CONS,
+ IPA_CLIENT_TEST2_CONS,
+ IPA_CLIENT_TEST3_CONS,
+ IPA_CLIENT_TEST4_CONS,
+
+ IPA_CLIENT_MAX,
+};
+
+#define IPA_CLIENT_IS_APPS_CONS(client) \
+ ((client) == IPA_CLIENT_APPS_LAN_CONS || \
+ (client) == IPA_CLIENT_APPS_WAN_CONS)
+
+#define IPA_CLIENT_IS_USB_CONS(client) \
+ ((client) == IPA_CLIENT_USB_CONS || \
+ (client) == IPA_CLIENT_USB2_CONS || \
+ (client) == IPA_CLIENT_USB3_CONS || \
+ (client) == IPA_CLIENT_USB_DPL_CONS || \
+ (client) == IPA_CLIENT_USB4_CONS)
+
+#define IPA_CLIENT_IS_WLAN_CONS(client) \
+ ((client) == IPA_CLIENT_WLAN1_CONS || \
+ (client) == IPA_CLIENT_WLAN2_CONS || \
+ (client) == IPA_CLIENT_WLAN3_CONS || \
+ (client) == IPA_CLIENT_WLAN4_CONS)
+
+#define IPA_CLIENT_IS_ODU_CONS(client) \
+ ((client) == IPA_CLIENT_ODU_EMB_CONS || \
+ (client) == IPA_CLIENT_ODU_TETH_CONS)
+
+#define IPA_CLIENT_IS_Q6_CONS(client) \
+ ((client) == IPA_CLIENT_Q6_LAN_CONS || \
+ (client) == IPA_CLIENT_Q6_WAN_CONS || \
+ (client) == IPA_CLIENT_Q6_DUN_CONS || \
+ (client) == IPA_CLIENT_Q6_DECOMP_CONS || \
+ (client) == IPA_CLIENT_Q6_DECOMP2_CONS || \
+ (client) == IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS)
+
+#define IPA_CLIENT_IS_Q6_PROD(client) \
+ ((client) == IPA_CLIENT_Q6_LAN_PROD || \
+ (client) == IPA_CLIENT_Q6_WAN_PROD || \
+ (client) == IPA_CLIENT_Q6_CMD_PROD || \
+ (client) == IPA_CLIENT_Q6_DECOMP_PROD || \
+ (client) == IPA_CLIENT_Q6_DECOMP2_PROD)
+
+#define IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client) \
+ ((client) == IPA_CLIENT_Q6_LAN_CONS || \
+ (client) == IPA_CLIENT_Q6_WAN_CONS || \
+ (client) == IPA_CLIENT_Q6_DUN_CONS || \
+ (client) == IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS)
+
+#define IPA_CLIENT_IS_Q6_ZIP_CONS(client) \
+ ((client) == IPA_CLIENT_Q6_DECOMP_CONS || \
+ (client) == IPA_CLIENT_Q6_DECOMP2_CONS)
+
+#define IPA_CLIENT_IS_Q6_NON_ZIP_PROD(client) \
+ ((client) == IPA_CLIENT_Q6_LAN_PROD || \
+ (client) == IPA_CLIENT_Q6_WAN_PROD || \
+ (client) == IPA_CLIENT_Q6_CMD_PROD)
+
+#define IPA_CLIENT_IS_Q6_ZIP_PROD(client) \
+ ((client) == IPA_CLIENT_Q6_DECOMP_PROD || \
+ (client) == IPA_CLIENT_Q6_DECOMP2_PROD)
+
+#define IPA_CLIENT_IS_MEMCPY_DMA_CONS(client) \
+ ((client) == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS || \
+ (client) == IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS)
+
+#define IPA_CLIENT_IS_MEMCPY_DMA_PROD(client) \
+ ((client) == IPA_CLIENT_MEMCPY_DMA_SYNC_PROD || \
+ (client) == IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD)
+
+#define IPA_CLIENT_IS_MHI_CONS(client) \
+ ((client) == IPA_CLIENT_MHI_CONS)
+
+#define IPA_CLIENT_IS_MHI(client) \
+ ((client) == IPA_CLIENT_MHI_CONS || \
+ (client) == IPA_CLIENT_MHI_PROD)
+
+#define IPA_CLIENT_IS_TEST_PROD(client) \
+ ((client) == IPA_CLIENT_TEST_PROD || \
+ (client) == IPA_CLIENT_TEST1_PROD || \
+ (client) == IPA_CLIENT_TEST2_PROD || \
+ (client) == IPA_CLIENT_TEST3_PROD || \
+ (client) == IPA_CLIENT_TEST4_PROD)
+
+#define IPA_CLIENT_IS_TEST_CONS(client) \
+ ((client) == IPA_CLIENT_TEST_CONS || \
+ (client) == IPA_CLIENT_TEST1_CONS || \
+ (client) == IPA_CLIENT_TEST2_CONS || \
+ (client) == IPA_CLIENT_TEST3_CONS || \
+ (client) == IPA_CLIENT_TEST4_CONS)
+
+#define IPA_CLIENT_IS_TEST(client) \
+ (IPA_CLIENT_IS_TEST_PROD(client) || IPA_CLIENT_IS_TEST_CONS(client))
+
+/**
+ * enum ipa_ip_type - Address family: IPv4 or IPv6
+ */
+enum ipa_ip_type {
+ IPA_IP_v4,
+ IPA_IP_v6,
+ IPA_IP_MAX
+};
+
+/**
+ * enum ipa_rule_type - Type of routing or filtering rule
+ * Hashable: Rule will be located at the hashable tables
+ * Non_Hashable: Rule will be located at the non-hashable tables
+ */
+enum ipa_rule_type {
+ IPA_RULE_HASHABLE,
+ IPA_RULE_NON_HASHABLE,
+ IPA_RULE_TYPE_MAX
+};
+
+/**
+ * enum ipa_flt_action - action field of filtering rule
+ *
+ * Pass to routing: 5'd0
+ * Pass to source NAT: 5'd1
+ * Pass to destination NAT: 5'd2
+ * Pass to default output pipe (e.g., Apps or Modem): 5'd3
+ */
+enum ipa_flt_action {
+ IPA_PASS_TO_ROUTING,
+ IPA_PASS_TO_SRC_NAT,
+ IPA_PASS_TO_DST_NAT,
+ IPA_PASS_TO_EXCEPTION
+};
+
+/**
+ * enum ipa_wlan_event - Events for wlan client
+ *
+ * wlan client connect: New wlan client connected
+ * wlan client disconnect: wlan client disconnected
+ * wlan client power save: wlan client moved to power save
+ * wlan client normal: wlan client moved out of power save
+ * sw routing enable: ipa routing is disabled
+ * sw routing disable: ipa routing is enabled
+ * wlan ap connect: wlan AP(access point) is up
+ * wlan ap disconnect: wlan AP(access point) is down
+ * wlan sta connect: wlan STA(station) is up
+ * wlan sta disconnect: wlan STA(station) is down
+ * wlan client connect ex: new wlan client connected
+ * wlan scc switch: wlan interfaces in scc mode
+ * wlan mcc switch: wlan interfaces in mcc mode
+ * wlan wdi enable: wdi data path completed
+ * wlan wdi disable: wdi data path teardown
+ */
+enum ipa_wlan_event {
+ WLAN_CLIENT_CONNECT,
+ WLAN_CLIENT_DISCONNECT,
+ WLAN_CLIENT_POWER_SAVE_MODE,
+ WLAN_CLIENT_NORMAL_MODE,
+ SW_ROUTING_ENABLE,
+ SW_ROUTING_DISABLE,
+ WLAN_AP_CONNECT,
+ WLAN_AP_DISCONNECT,
+ WLAN_STA_CONNECT,
+ WLAN_STA_DISCONNECT,
+ WLAN_CLIENT_CONNECT_EX,
+ WLAN_SWITCH_TO_SCC,
+ WLAN_SWITCH_TO_MCC,
+ WLAN_WDI_ENABLE,
+ WLAN_WDI_DISABLE,
+ IPA_WLAN_EVENT_MAX
+};
+
+/**
+ * enum ipa_wan_event - Events for wan client
+ *
+ * wan default route add/del
+ * wan embms connect: New wan embms interface connected
+ */
+enum ipa_wan_event {
+ WAN_UPSTREAM_ROUTE_ADD = IPA_WLAN_EVENT_MAX,
+ WAN_UPSTREAM_ROUTE_DEL,
+ WAN_EMBMS_CONNECT,
+ WAN_XLAT_CONNECT,
+ IPA_WAN_EVENT_MAX
+};
+
+enum ipa_ecm_event {
+ ECM_CONNECT = IPA_WAN_EVENT_MAX,
+ ECM_DISCONNECT,
+ IPA_ECM_EVENT_MAX,
+};
+
+enum ipa_tethering_stats_event {
+ IPA_TETHERING_STATS_UPDATE_STATS = IPA_ECM_EVENT_MAX,
+ IPA_TETHERING_STATS_UPDATE_NETWORK_STATS,
+ IPA_TETHERING_STATS_EVENT_MAX,
+ IPA_EVENT_MAX_NUM = IPA_TETHERING_STATS_EVENT_MAX
+};
+
+#define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM)
+
+/**
+ * enum ipa_rm_resource_name - IPA RM clients identification names
+ *
+ * Add new mapping to ipa_rm_prod_index() / ipa_rm_cons_index()
+ * when adding new entry to this enum.
+ */
+enum ipa_rm_resource_name {
+ IPA_RM_RESOURCE_PROD = 0,
+ IPA_RM_RESOURCE_Q6_PROD = IPA_RM_RESOURCE_PROD,
+ IPA_RM_RESOURCE_USB_PROD,
+ IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
+ IPA_RM_RESOURCE_HSIC_PROD,
+ IPA_RM_RESOURCE_STD_ECM_PROD,
+ IPA_RM_RESOURCE_RNDIS_PROD,
+ IPA_RM_RESOURCE_WWAN_0_PROD,
+ IPA_RM_RESOURCE_WLAN_PROD,
+ IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+ IPA_RM_RESOURCE_MHI_PROD,
+ IPA_RM_RESOURCE_PROD_MAX,
+
+ IPA_RM_RESOURCE_Q6_CONS = IPA_RM_RESOURCE_PROD_MAX,
+ IPA_RM_RESOURCE_USB_CONS,
+ IPA_RM_RESOURCE_USB_DPL_CONS,
+ IPA_RM_RESOURCE_HSIC_CONS,
+ IPA_RM_RESOURCE_WLAN_CONS,
+ IPA_RM_RESOURCE_APPS_CONS,
+ IPA_RM_RESOURCE_ODU_ADAPT_CONS,
+ IPA_RM_RESOURCE_MHI_CONS,
+ IPA_RM_RESOURCE_MAX
+};
+
+/**
+ * enum ipa_hw_type - IPA hardware version type
+ * @IPA_HW_None: IPA hardware version not defined
+ * @IPA_HW_v1_0: IPA hardware version 1.0
+ * @IPA_HW_v1_1: IPA hardware version 1.1
+ * @IPA_HW_v2_0: IPA hardware version 2.0
+ * @IPA_HW_v2_1: IPA hardware version 2.1
+ * @IPA_HW_v2_5: IPA hardware version 2.5
+ * @IPA_HW_v2_6: IPA hardware version 2.6
+ * @IPA_HW_v2_6L: IPA hardware version 2.6L
+ * @IPA_HW_v3_0: IPA hardware version 3.0
+ * @IPA_HW_v3_1: IPA hardware version 3.1
+ * @IPA_HW_v3_5: IPA hardware version 3.5
+ * @IPA_HW_v3_5_1: IPA hardware version 3.5.1
+ */
+enum ipa_hw_type {
+ IPA_HW_None = 0,
+ IPA_HW_v1_0 = 1,
+ IPA_HW_v1_1 = 2,
+ IPA_HW_v2_0 = 3,
+ IPA_HW_v2_1 = 4,
+ IPA_HW_v2_5 = 5,
+ IPA_HW_v2_6 = IPA_HW_v2_5,
+ IPA_HW_v2_6L = 6,
+ IPA_HW_v3_0 = 10,
+ IPA_HW_v3_1 = 11,
+ IPA_HW_v3_5 = 12,
+ IPA_HW_v3_5_1 = 13,
+ IPA_HW_MAX
+};
+
+/**
+ * struct ipa_rule_attrib - attributes of a routing/filtering
+ * rule, all in LE
+ * @attrib_mask: what attributes are valid
+ * @src_port_lo: low port of src port range
+ * @src_port_hi: high port of src port range
+ * @dst_port_lo: low port of dst port range
+ * @dst_port_hi: high port of dst port range
+ * @type: ICMP/IGMP type
+ * @code: ICMP/IGMP code
+ * @spi: IPSec SPI
+ * @src_port: exact src port
+ * @dst_port: exact dst port
+ * @meta_data: meta-data val
+ * @meta_data_mask: meta-data mask
+ * @u.v4.tos: type of service
+ * @u.v4.protocol: protocol
+ * @u.v4.src_addr: src address value
+ * @u.v4.src_addr_mask: src address mask
+ * @u.v4.dst_addr: dst address value
+ * @u.v4.dst_addr_mask: dst address mask
+ * @u.v6.tc: traffic class
+ * @u.v6.flow_label: flow label
+ * @u.v6.next_hdr: next header
+ * @u.v6.src_addr: src address val
+ * @u.v6.src_addr_mask: src address mask
+ * @u.v6.dst_addr: dst address val
+ * @u.v6.dst_addr_mask: dst address mask
+ */
+struct ipa_rule_attrib {
+ uint32_t attrib_mask;
+ uint16_t src_port_lo;
+ uint16_t src_port_hi;
+ uint16_t dst_port_lo;
+ uint16_t dst_port_hi;
+ uint8_t type;
+ uint8_t code;
+ uint8_t tos_value;
+ uint8_t tos_mask;
+ uint32_t spi;
+ uint16_t src_port;
+ uint16_t dst_port;
+ uint32_t meta_data;
+ uint32_t meta_data_mask;
+ uint8_t src_mac_addr[ETH_ALEN];
+ uint8_t src_mac_addr_mask[ETH_ALEN];
+ uint8_t dst_mac_addr[ETH_ALEN];
+ uint8_t dst_mac_addr_mask[ETH_ALEN];
+ uint16_t ether_type;
+ union {
+ struct {
+ uint8_t tos;
+ uint8_t protocol;
+ uint32_t src_addr;
+ uint32_t src_addr_mask;
+ uint32_t dst_addr;
+ uint32_t dst_addr_mask;
+ } v4;
+ struct {
+ uint8_t tc;
+ uint32_t flow_label;
+ uint8_t next_hdr;
+ uint32_t src_addr[4];
+ uint32_t src_addr_mask[4];
+ uint32_t dst_addr[4];
+ uint32_t dst_addr_mask[4];
+ } v6;
+ } u;
+};
+
+/*! @brief The maximum number of Mask Equal 32 Eqns */
+#define IPA_IPFLTR_NUM_MEQ_32_EQNS 2
+
+/*! @brief The maximum number of IHL offset Mask Equal 32 Eqns */
+#define IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS 2
+
+/*! @brief The maximum number of Mask Equal 128 Eqns */
+#define IPA_IPFLTR_NUM_MEQ_128_EQNS 2
+
+/*! @brief The maximum number of IHL offset Range Check 16 Eqns */
+#define IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS 2
+
+/*! @brief Offset and 16 bit comparison equation */
+struct ipa_ipfltr_eq_16 {
+ int8_t offset;
+ uint16_t value;
+};
+
+/*! @brief Offset and 32 bit comparison equation */
+struct ipa_ipfltr_eq_32 {
+ int8_t offset;
+ uint32_t value;
+};
+
+/*! @brief Offset and 128 bit masked comparison equation */
+struct ipa_ipfltr_mask_eq_128 {
+ int8_t offset;
+ uint8_t mask[16];
+ uint8_t value[16];
+};
+
+/*! @brief Offset and 32 bit masked comparison equation */
+struct ipa_ipfltr_mask_eq_32 {
+ int8_t offset;
+ uint32_t mask;
+ uint32_t value;
+};
+
+/*! @brief Equation for identifying a range. Ranges are inclusive */
+struct ipa_ipfltr_range_eq_16 {
+ int8_t offset;
+ uint16_t range_low;
+ uint16_t range_high;
+};
+
+/*! @brief Rule equations which are set according to DS filter installation */
+struct ipa_ipfltri_rule_eq {
+ /*! 16-bit Bitmask to indicate how many eqs are valid in this rule */
+ uint16_t rule_eq_bitmap;
+ /*! Specifies if a type of service check rule is present */
+ uint8_t tos_eq_present;
+ /*! The value to check against the type of service (ipv4) field */
+ uint8_t tos_eq;
+ /*! Specifies if a protocol check rule is present */
+ uint8_t protocol_eq_present;
+ /*! The value to check against the protocol (ipv6) field */
+ uint8_t protocol_eq;
+ /*! The number of ip header length offset 16 bit range check
+ * rules in this rule
+ */
+ uint8_t num_ihl_offset_range_16;
+ /*! An array of the registered ip header length offset 16 bit
+ * range check rules
+ */
+ struct ipa_ipfltr_range_eq_16
+ ihl_offset_range_16[IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS];
+ /*! The number of mask equal 32 rules present in this rule */
+ uint8_t num_offset_meq_32;
+ /*! An array of all the possible mask equal 32 rules in this rule */
+ struct ipa_ipfltr_mask_eq_32
+ offset_meq_32[IPA_IPFLTR_NUM_MEQ_32_EQNS];
+ /*! Specifies if the traffic class rule is present in this rule */
+ uint8_t tc_eq_present;
+ /*! The value to check the traffic class (ipv4) field against */
+ uint8_t tc_eq;
+ /*! Specifies if the flow equals rule is present in this rule */
+ uint8_t fl_eq_present;
+ /*! The value to check the flow (ipv6) field against */
+ uint32_t fl_eq;
+ /*! The number of ip header length offset 16 bit equations in this
+ * rule
+ */
+ uint8_t ihl_offset_eq_16_present;
+ /*! The ip header length offset 16 bit equation */
+ struct ipa_ipfltr_eq_16 ihl_offset_eq_16;
+ /*! The number of ip header length offset 32 bit equations in this
+ * rule
+ */
+ uint8_t ihl_offset_eq_32_present;
+ /*! The ip header length offset 32 bit equation */
+ struct ipa_ipfltr_eq_32 ihl_offset_eq_32;
+ /*! The number of ip header length offset 32 bit mask equations in
+ * this rule
+ */
+ uint8_t num_ihl_offset_meq_32;
+ /*! The ip header length offset 32 bit mask equation */
+ struct ipa_ipfltr_mask_eq_32
+ ihl_offset_meq_32[IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS];
+ /*! The number of ip header length offset 128 bit equations in this
+ * rule
+ */
+ uint8_t num_offset_meq_128;
+ /*! The ip header length offset 128 bit equation */
+ struct ipa_ipfltr_mask_eq_128
+ offset_meq_128[IPA_IPFLTR_NUM_MEQ_128_EQNS];
+ /*! The metadata 32 bit masked comparison equation present or not */
+ /* Metadata based rules are added internally by IPA driver */
+ uint8_t metadata_meq32_present;
+ /*! The metadata 32 bit masked comparison equation */
+ struct ipa_ipfltr_mask_eq_32 metadata_meq32;
+ /*! Specifies if the Fragment equation is present in this rule */
+ uint8_t ipv4_frag_eq_present;
+};
+
+/**
+ * struct ipa_flt_rule - attributes of a filtering rule
+ * @retain_hdr: bool switch to instruct IPA core to add back to the packet
+ * the header removed as part of header removal
+ * @to_uc: bool switch to pass packet to micro-controller
+ * @action: action field
+ * @rt_tbl_hdl: handle of table from "get"
+ * @attrib: attributes of the rule
+ * @eq_attrib: attributes of the rule in equation form (valid when
+ * eq_attrib_type is true)
+ * @rt_tbl_idx: index of RT table referred to by filter rule (valid when
+ * eq_attrib_type is true and non-exception action)
+ * @eq_attrib_type: true if equation level form used to specify attributes
+ * @max_prio: bool switch. is this rule with Max priority? meaning on rule hit,
+ * IPA will use the rule and will not look for other rules that may have
+ * higher priority
+ * @hashable: bool switch. is this rule hashable or not?
+ * ipa uses hashable rules to cache their hit results to be used in
+ * consecutive packets
+ * @rule_id: rule_id to be assigned to the filter rule. In case client specifies
+ * rule_id as 0 the driver will assign a new rule_id
+ */
+struct ipa_flt_rule {
+ uint8_t retain_hdr;
+ uint8_t to_uc;
+ enum ipa_flt_action action;
+ uint32_t rt_tbl_hdl;
+ struct ipa_rule_attrib attrib;
+ struct ipa_ipfltri_rule_eq eq_attrib;
+ uint32_t rt_tbl_idx;
+ uint8_t eq_attrib_type;
+ uint8_t max_prio;
+ uint8_t hashable;
+ uint16_t rule_id;
+};
+
+/**
+ * enum ipa_hdr_l2_type - L2 header type
+ * IPA_HDR_L2_NONE: L2 header which isn't Ethernet II and isn't 802_3
+ * IPA_HDR_L2_ETHERNET_II: L2 header of type Ethernet II
+ * IPA_HDR_L2_802_3: L2 header of type 802_3
+ */
+enum ipa_hdr_l2_type {
+ IPA_HDR_L2_NONE,
+ IPA_HDR_L2_ETHERNET_II,
+ IPA_HDR_L2_802_3,
+ IPA_HDR_L2_MAX,
+};
+
+/**
+ * enum ipa_hdr_l2_type - Processing context type
+ * IPA_HDR_PROC_NONE: No processing context
+ * IPA_HDR_PROC_ETHII_TO_ETHII: Process Ethernet II to Ethernet II
+ * IPA_HDR_PROC_ETHII_TO_802_3: Process Ethernet II to 802_3
+ * IPA_HDR_PROC_802_3_TO_ETHII: Process 802_3 to Ethernet II
+ * IPA_HDR_PROC_802_3_TO_802_3: Process 802_3 to 802_3
+ */
+enum ipa_hdr_proc_type {
+ IPA_HDR_PROC_NONE,
+ IPA_HDR_PROC_ETHII_TO_ETHII,
+ IPA_HDR_PROC_ETHII_TO_802_3,
+ IPA_HDR_PROC_802_3_TO_ETHII,
+ IPA_HDR_PROC_802_3_TO_802_3,
+ IPA_HDR_PROC_MAX,
+};
+
+/**
+ * struct ipa_rt_rule - attributes of a routing rule
+ * @dst: dst "client"
+ * @hdr_hdl: handle to the dynamic header
+ it is not an index or an offset
+ * @hdr_proc_ctx_hdl: handle to header processing context. if it is provided
+ hdr_hdl shall be 0
+ * @attrib: attributes of the rule
+ * @max_prio: bool switch. is this rule with Max priority? meaning on rule hit,
+ * IPA will use the rule and will not look for other rules that may have
+ * higher priority
+ * @hashable: bool switch. is this rule hashable or not?
+ * ipa uses hashable rules to cache their hit results to be used in
+ * consecutive packets
+ * @retain_hdr: bool switch to instruct IPA core to add back to the packet
+ * the header removed as part of header removal
+ */
+struct ipa_rt_rule {
+ enum ipa_client_type dst;
+ uint32_t hdr_hdl;
+ uint32_t hdr_proc_ctx_hdl;
+ struct ipa_rule_attrib attrib;
+ uint8_t max_prio;
+ uint8_t hashable;
+ uint8_t retain_hdr;
+};
+
+/**
+ * struct ipa_hdr_add - header descriptor includes in and out
+ * parameters
+ * @name: name of the header
+ * @hdr: actual header to be inserted
+ * @hdr_len: size of above header
+ * @type: l2 header type
+ * @is_partial: header not fully specified
+ * @hdr_hdl: out parameter, handle to header, valid when status is 0
+ * @status: out parameter, status of header add operation,
+ * 0 for success,
+ * -1 for failure
+ * @is_eth2_ofst_valid: is eth2_ofst field valid?
+ * @eth2_ofst: offset to start of Ethernet-II/802.3 header
+ */
+struct ipa_hdr_add {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint8_t hdr[IPA_HDR_MAX_SIZE];
+ uint8_t hdr_len;
+ enum ipa_hdr_l2_type type;
+ uint8_t is_partial;
+ uint32_t hdr_hdl;
+ int status;
+ uint8_t is_eth2_ofst_valid;
+ uint16_t eth2_ofst;
+};
+
+/**
+ * struct ipa_ioc_add_hdr - header addition parameters (support
+ * multiple headers and commit)
+ * @commit: should headers be written to IPA HW also?
+ * @num_hdrs: num of headers that follow
+ * @ipa_hdr_add hdr: all headers need to go here back to
+ * back, no pointers
+ */
+struct ipa_ioc_add_hdr {
+ uint8_t commit;
+ uint8_t num_hdrs;
+ struct ipa_hdr_add hdr[0];
+};
+
+/**
+ * struct ipa_hdr_proc_ctx_add - processing context descriptor includes
+ * in and out parameters
+ * @type: processing context type
+ * @hdr_hdl: in parameter, handle to header
+ * @proc_ctx_hdl: out parameter, handle to proc_ctx, valid when status is 0
+ * @status: out parameter, status of header add operation,
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_hdr_proc_ctx_add {
+ enum ipa_hdr_proc_type type;
+ uint32_t hdr_hdl;
+ uint32_t proc_ctx_hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_add_hdr - processing context addition parameters (support
+ * multiple processing context and commit)
+ * @commit: should processing context be written to IPA HW also?
+ * @num_proc_ctxs: num of processing context that follow
+ * @proc_ctx: all processing context need to go here back to
+ * back, no pointers
+ */
+struct ipa_ioc_add_hdr_proc_ctx {
+ uint8_t commit;
+ uint8_t num_proc_ctxs;
+ struct ipa_hdr_proc_ctx_add proc_ctx[0];
+};
+
+/**
+ * struct ipa_ioc_copy_hdr - retrieve a copy of the specified
+ * header - caller can then derive the complete header
+ * @name: name of the header resource
+ * @hdr: out parameter, contents of specified header,
+ * valid only when ioctl return val is non-negative
+ * @hdr_len: out parameter, size of above header
+ * valid only when ioctl return val is non-negative
+ * @type: l2 header type
+ * valid only when ioctl return val is non-negative
+ * @is_partial: out parameter, indicates whether specified header is partial
+ * valid only when ioctl return val is non-negative
+ * @is_eth2_ofst_valid: is eth2_ofst field valid?
+ * @eth2_ofst: offset to start of Ethernet-II/802.3 header
+ */
+struct ipa_ioc_copy_hdr {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint8_t hdr[IPA_HDR_MAX_SIZE];
+ uint8_t hdr_len;
+ enum ipa_hdr_l2_type type;
+ uint8_t is_partial;
+ uint8_t is_eth2_ofst_valid;
+ uint16_t eth2_ofst;
+};
+
+/**
+ * struct ipa_ioc_get_hdr - header entry lookup parameters, if lookup was
+ * successful caller must call put to release the reference count when done
+ * @name: name of the header resource
+ * @hdl: out parameter, handle of header entry
+ * valid only when ioctl return val is non-negative
+ */
+struct ipa_ioc_get_hdr {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint32_t hdl;
+};
+
+/**
+ * struct ipa_hdr_del - header descriptor includes in and out
+ * parameters
+ *
+ * @hdl: handle returned from header add operation
+ * @status: out parameter, status of header remove operation,
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_hdr_del {
+ uint32_t hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_del_hdr - header deletion parameters (support
+ * multiple headers and commit)
+ * @commit: should headers be removed from IPA HW also?
+ * @num_hdls: num of headers being removed
+ * @ipa_hdr_del hdl: all handles need to go here back to back, no pointers
+ */
+struct ipa_ioc_del_hdr {
+ uint8_t commit;
+ uint8_t num_hdls;
+ struct ipa_hdr_del hdl[0];
+};
+
+/**
+ * struct ipa_hdr_proc_ctx_del - processing context descriptor includes
+ * in and out parameters
+ * @hdl: handle returned from processing context add operation
+ * @status: out parameter, status of header remove operation,
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_hdr_proc_ctx_del {
+ uint32_t hdl;
+ int status;
+};
+
+/**
+ * ipa_ioc_del_hdr_proc_ctx - processing context deletion parameters (support
+ * multiple headers and commit)
+ * @commit: should processing contexts be removed from IPA HW also?
+ * @num_hdls: num of processing contexts being removed
+ * @ipa_hdr_proc_ctx_del hdl: all handles need to go here back to back,
+ * no pointers
+ */
+struct ipa_ioc_del_hdr_proc_ctx {
+ uint8_t commit;
+ uint8_t num_hdls;
+ struct ipa_hdr_proc_ctx_del hdl[0];
+};
+
+/**
+ * struct ipa_rt_rule_add - routing rule descriptor includes in
+ * and out parameters
+ * @rule: actual rule to be added
+ * @at_rear: add at back of routing table, it is NOT possible to add rules at
+ * the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status: output parameter, status of routing rule add operation,
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_rt_rule_add {
+ struct ipa_rt_rule rule;
+ uint8_t at_rear;
+ uint32_t rt_rule_hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule - routing rule addition parameters (supports
+ * multiple rules and commit);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @ipa_rt_rule_add rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_rt_rule {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+ uint8_t num_rules;
+ struct ipa_rt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule_after - routing rule addition after a specific
+ * rule parameters(supports multiple rules and commit);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @add_after_hdl: the rules will be added after this specific rule
+ * @ipa_rt_rule_add rules: all rules need to go back to back here, no pointers
+ * at_rear field will be ignored when using this IOCTL
+ */
+struct ipa_ioc_add_rt_rule_after {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+ uint8_t num_rules;
+ uint32_t add_after_hdl;
+ struct ipa_rt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_rt_rule_mdfy - routing rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @rt_rule_hdl: handle to rule which supposed to modify
+ * @status: output parameter, status of routing rule modify operation,
+ * 0 for success,
+ * -1 for failure
+ *
+ */
+struct ipa_rt_rule_mdfy {
+ struct ipa_rt_rule rule;
+ uint32_t rt_rule_hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_mdfy_rt_rule - routing rule modify parameters (supports
+ * multiple rules and commit)
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @num_rules: number of routing rules that follow
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_mdfy_rt_rule {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ uint8_t num_rules;
+ struct ipa_rt_rule_mdfy rules[0];
+};
+
+/**
+ * struct ipa_rt_rule_del - routing rule descriptor includes in
+ * and out parameters
+ * @hdl: handle returned from route rule add operation
+ * @status: output parameter, status of route rule delete operation,
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_rt_rule_del {
+ uint32_t hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_del_rt_rule - routing rule deletion parameters (supports
+ * multiple headers and commit)
+ * @commit: should rules be removed from IPA HW also?
+ * @ip: IP family of rules
+ * @num_hdls: num of rules being removed
+ * @ipa_rt_rule_del hdl: all handles need to go back to back here, no pointers
+ */
+struct ipa_ioc_del_rt_rule {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ uint8_t num_hdls;
+ struct ipa_rt_rule_del hdl[0];
+};
+
+/**
+ * struct ipa_ioc_get_rt_tbl_indx - routing table index lookup parameters
+ * @ip: IP family of table
+ * @name: name of routing table resource
+ * @index: output parameter, routing table index, valid only when ioctl
+ * return val is non-negative
+ */
+struct ipa_ioc_get_rt_tbl_indx {
+ enum ipa_ip_type ip;
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint32_t idx;
+};
+
+/**
+ * struct ipa_flt_rule_add - filtering rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @at_rear: add at back of filtering table?
+ * @flt_rule_hdl: out parameter, handle to rule, valid when status is 0
+ * @status: output parameter, status of filtering rule add operation,
+ * 0 for success,
+ * -1 for failure
+ *
+ */
+struct ipa_flt_rule_add {
+ struct ipa_flt_rule rule;
+ uint8_t at_rear;
+ uint32_t flt_rule_hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_add_flt_rule - filtering rule addition parameters (supports
+ * multiple rules and commit)
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @ep: which "clients" pipe does this rule apply to?
+ * valid only when global is 0
+ * @global: does this apply to global filter table of specific IP family
+ * @num_rules: number of filtering rules that follow
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_flt_rule {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ enum ipa_client_type ep;
+ uint8_t global;
+ uint8_t num_rules;
+ struct ipa_flt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_ioc_add_flt_rule_after - filtering rule addition after specific
+ * rule parameters (supports multiple rules and commit)
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @ep: which "clients" pipe does this rule apply to?
+ * @num_rules: number of filtering rules that follow
+ * @add_after_hdl: rules will be added after the rule with this handle
+ * @rules: all rules need to go back to back here, no pointers. at rear field
+ * is ignored when using this IOCTL
+ */
+struct ipa_ioc_add_flt_rule_after {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ enum ipa_client_type ep;
+ uint8_t num_rules;
+ uint32_t add_after_hdl;
+ struct ipa_flt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_flt_rule_mdfy - filtering rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @flt_rule_hdl: handle to rule
+ * @status: output parameter, status of filtering rule modify operation,
+ * 0 for success,
+ * -1 for failure
+ *
+ */
+struct ipa_flt_rule_mdfy {
+ struct ipa_flt_rule rule;
+ uint32_t rule_hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_mdfy_flt_rule - filtering rule modify parameters (supports
+ * multiple rules and commit)
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @num_rules: number of filtering rules that follow
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_mdfy_flt_rule {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ uint8_t num_rules;
+ struct ipa_flt_rule_mdfy rules[0];
+};
+
+/**
+ * struct ipa_flt_rule_del - filtering rule descriptor includes
+ * in and out parameters
+ *
+ * @hdl: handle returned from filtering rule add operation
+ * @status: output parameter, status of filtering rule delete operation,
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_flt_rule_del {
+ uint32_t hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_del_flt_rule - filtering rule deletion parameters (supports
+ * multiple headers and commit)
+ * @commit: should rules be removed from IPA HW also?
+ * @ip: IP family of rules
+ * @num_hdls: num of rules being removed
+ * @hdl: all handles need to go back to back here, no pointers
+ */
+struct ipa_ioc_del_flt_rule {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ uint8_t num_hdls;
+ struct ipa_flt_rule_del hdl[0];
+};
+
+/**
+ * struct ipa_ioc_get_rt_tbl - routing table lookup parameters, if lookup was
+ * successful caller must call put to release the reference
+ * count when done
+ * @ip: IP family of table
+ * @name: name of routing table resource
+ * @htl: output parameter, handle of routing table, valid only when ioctl
+ * return val is non-negative
+ */
+struct ipa_ioc_get_rt_tbl {
+ enum ipa_ip_type ip;
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint32_t hdl;
+};
+
+/**
+ * struct ipa_ioc_query_intf - used to lookup number of tx and
+ * rx properties of interface
+ * @name: name of interface
+ * @num_tx_props: output parameter, number of tx properties
+ * valid only when ioctl return val is non-negative
+ * @num_rx_props: output parameter, number of rx properties
+ * valid only when ioctl return val is non-negative
+ * @num_ext_props: output parameter, number of ext properties
+ * valid only when ioctl return val is non-negative
+ * @excp_pipe: exception packets of this interface should be
+ * routed to this pipe
+ */
+struct ipa_ioc_query_intf {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint32_t num_tx_props;
+ uint32_t num_rx_props;
+ uint32_t num_ext_props;
+ enum ipa_client_type excp_pipe;
+};
+
+/**
+ * struct ipa_ioc_tx_intf_prop - interface tx property
+ * @ip: IP family of routing rule
+ * @attrib: routing rule
+ * @dst_pipe: routing output pipe
+ * @alt_dst_pipe: alternate routing output pipe
+ * @hdr_name: name of associated header if any, empty string when no header
+ * @hdr_l2_type: type of associated header if any, use NONE when no header
+ */
+struct ipa_ioc_tx_intf_prop {
+ enum ipa_ip_type ip;
+ struct ipa_rule_attrib attrib;
+ enum ipa_client_type dst_pipe;
+ enum ipa_client_type alt_dst_pipe;
+ char hdr_name[IPA_RESOURCE_NAME_MAX];
+ enum ipa_hdr_l2_type hdr_l2_type;
+};
+
+/**
+ * struct ipa_ioc_query_intf_tx_props - interface tx propertie
+ * @name: name of interface
+ * @num_tx_props: number of TX properties
+ * @tx[0]: output parameter, the tx properties go here back to back
+ */
+struct ipa_ioc_query_intf_tx_props {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint32_t num_tx_props;
+ struct ipa_ioc_tx_intf_prop tx[0];
+};
+
+/**
+ * struct ipa_ioc_ext_intf_prop - interface extended property
+ * @ip: IP family of routing rule
+ * @eq_attrib: attributes of the rule in equation form
+ * @action: action field
+ * @rt_tbl_idx: index of RT table referred to by filter rule
+ * @mux_id: MUX_ID
+ * @filter_hdl: handle of filter (as specified by provider of filter rule)
+ * @is_xlat_rule: it is xlat flt rule or not
+ */
+struct ipa_ioc_ext_intf_prop {
+ enum ipa_ip_type ip;
+ struct ipa_ipfltri_rule_eq eq_attrib;
+ enum ipa_flt_action action;
+ uint32_t rt_tbl_idx;
+ uint8_t mux_id;
+ uint32_t filter_hdl;
+ uint8_t is_xlat_rule;
+ uint32_t rule_id;
+ uint8_t is_rule_hashable;
+};
+
+/**
+ * struct ipa_ioc_query_intf_ext_props - interface ext propertie
+ * @name: name of interface
+ * @num_ext_props: number of EXT properties
+ * @ext[0]: output parameter, the ext properties go here back to back
+ */
+struct ipa_ioc_query_intf_ext_props {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint32_t num_ext_props;
+ struct ipa_ioc_ext_intf_prop ext[0];
+};
+
+/**
+ * struct ipa_ioc_rx_intf_prop - interface rx property
+ * @ip: IP family of filtering rule
+ * @attrib: filtering rule
+ * @src_pipe: input pipe
+ * @hdr_l2_type: type of associated header if any, use NONE when no header
+ */
+struct ipa_ioc_rx_intf_prop {
+ enum ipa_ip_type ip;
+ struct ipa_rule_attrib attrib;
+ enum ipa_client_type src_pipe;
+ enum ipa_hdr_l2_type hdr_l2_type;
+};
+
+/**
+ * struct ipa_ioc_query_intf_rx_props - interface rx propertie
+ * @name: name of interface
+ * @num_rx_props: number of RX properties
+ * @rx: output parameter, the rx properties go here back to back
+ */
+struct ipa_ioc_query_intf_rx_props {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint32_t num_rx_props;
+ struct ipa_ioc_rx_intf_prop rx[0];
+};
+
+/**
+ * struct ipa_ioc_nat_alloc_mem - nat table memory allocation
+ * properties
+ * @dev_name: input parameter, the name of table
+ * @size: input parameter, size of table in bytes
+ * @offset: output parameter, offset into page in case of system memory
+ */
+struct ipa_ioc_nat_alloc_mem {
+ char dev_name[IPA_RESOURCE_NAME_MAX];
+ size_t size;
+ off_t offset;
+};
+
+/**
+ * struct ipa_ioc_v4_nat_init - nat table initialization
+ * parameters
+ * @tbl_index: input parameter, index of the table
+ * @ipv4_rules_offset: input parameter, ipv4 rules address offset
+ * @expn_rules_offset: input parameter, ipv4 expansion rules address offset
+ * @index_offset: input parameter, index rules offset
+ * @index_expn_offset: input parameter, index expansion rules offset
+ * @table_entries: input parameter, ipv4 rules table size in entries
+ * @expn_table_entries: input parameter, ipv4 expansion rules table size
+ * @ip_addr: input parameter, public ip address
+ */
+struct ipa_ioc_v4_nat_init {
+ uint8_t tbl_index;
+ uint32_t ipv4_rules_offset;
+ uint32_t expn_rules_offset;
+
+ uint32_t index_offset;
+ uint32_t index_expn_offset;
+
+ uint16_t table_entries;
+ uint16_t expn_table_entries;
+ uint32_t ip_addr;
+};
+
+/**
+ * struct ipa_ioc_v4_nat_del - nat table delete parameter
+ * @table_index: input parameter, index of the table
+ * @public_ip_addr: input parameter, public ip address
+ */
+struct ipa_ioc_v4_nat_del {
+ uint8_t table_index;
+ uint32_t public_ip_addr;
+};
+
+/**
+ * struct ipa_ioc_nat_dma_one - nat dma command parameter
+ * @table_index: input parameter, index of the table
+ * @base_addr: type of table, from which the base address of the table
+ * can be inferred
+ * @offset: destination offset within the NAT table
+ * @data: data to be written.
+ */
+struct ipa_ioc_nat_dma_one {
+ uint8_t table_index;
+ uint8_t base_addr;
+
+ uint32_t offset;
+ uint16_t data;
+
+};
+
+/**
+ * struct ipa_ioc_nat_dma_cmd - To hold multiple nat dma commands
+ * @entries: number of dma commands in use
+ * @dma: data pointer to the dma commands
+ */
+struct ipa_ioc_nat_dma_cmd {
+ uint8_t entries;
+ struct ipa_ioc_nat_dma_one dma[0];
+
+};
+
+/**
+ * struct ipa_msg_meta - Format of the message meta-data.
+ * @msg_type: the type of the message
+ * @rsvd: reserved bits for future use.
+ * @msg_len: the length of the message in bytes
+ *
+ * For push model:
+ * Client in user-space should issue a read on the device (/dev/ipa) with a
+ * sufficiently large buffer in a continuous loop, call will block when there is
+ * no message to read. Upon return, client can read the ipa_msg_meta from start
+ * of buffer to find out type and length of message
+ * size of buffer supplied >= (size of largest message + size of metadata)
+ *
+ * For pull model:
+ * Client in user-space can also issue a pull msg IOCTL to device (/dev/ipa)
+ * with a payload containing space for the ipa_msg_meta and the message specific
+ * payload length.
+ * size of buffer supplied == (len of specific message + size of metadata)
+ */
+struct ipa_msg_meta {
+ uint8_t msg_type;
+ uint8_t rsvd;
+ uint16_t msg_len;
+};
+
+/**
+ * struct ipa_wlan_msg - To hold information about wlan client
+ * @name: name of the wlan interface
+ * @mac_addr: mac address of wlan client
+ *
+ * wlan drivers need to pass name of wlan iface and mac address of
+ * wlan client along with ipa_wlan_event, whenever a wlan client is
+ * connected/disconnected/moved to power save/come out of power save
+ */
+struct ipa_wlan_msg {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint8_t mac_addr[IPA_MAC_ADDR_SIZE];
+};
+
+/**
+ * enum ipa_wlan_hdr_attrib_type - attribute type
+ * in wlan client header
+ *
+ * WLAN_HDR_ATTRIB_MAC_ADDR: attrib type mac address
+ * WLAN_HDR_ATTRIB_STA_ID: attrib type station id
+ */
+enum ipa_wlan_hdr_attrib_type {
+ WLAN_HDR_ATTRIB_MAC_ADDR,
+ WLAN_HDR_ATTRIB_STA_ID
+};
+
+/**
+ * struct ipa_wlan_hdr_attrib_val - header attribute value
+ * @attrib_type: type of attribute
+ * @offset: offset of attribute within header
+ * @u.mac_addr: mac address
+ * @u.sta_id: station id
+ */
+struct ipa_wlan_hdr_attrib_val {
+ enum ipa_wlan_hdr_attrib_type attrib_type;
+ uint8_t offset;
+ union {
+ uint8_t mac_addr[IPA_MAC_ADDR_SIZE];
+ uint8_t sta_id;
+ } u;
+};
+
+/**
+ * struct ipa_wlan_msg_ex - To hold information about wlan client
+ * @name: name of the wlan interface
+ * @num_of_attribs: number of attributes
+ * @attrib_val: holds attribute values
+ *
+ * wlan drivers need to pass name of wlan iface and mac address
+ * of wlan client or station id along with ipa_wlan_event,
+ * whenever a wlan client is connected/disconnected/moved to
+ * power save/come out of power save
+ */
+struct ipa_wlan_msg_ex {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint8_t num_of_attribs;
+ struct ipa_wlan_hdr_attrib_val attribs[0];
+};
+
+struct ipa_ecm_msg {
+ char name[IPA_RESOURCE_NAME_MAX];
+ int ifindex;
+};
+
+/**
+ * struct ipa_wan_msg - To hold information about wan client
+ * @name: name of the wan interface
+ *
+ * CnE need to pass the name of default wan iface when connected/disconnected.
+ * CNE need to pass the gw info in wlan AP+STA mode.
+ * netmgr need to pass the name of wan eMBMS iface when connected.
+ */
+struct ipa_wan_msg {
+ char upstream_ifname[IPA_RESOURCE_NAME_MAX];
+ char tethered_ifname[IPA_RESOURCE_NAME_MAX];
+ enum ipa_ip_type ip;
+ uint32_t ipv4_addr_gw;
+ uint32_t ipv6_addr_gw[IPA_WAN_MSG_IPv6_ADDR_GW_LEN];
+};
+
+/**
+ * struct ipa_ioc_rm_dependency - parameters for add/delete dependency
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ */
+struct ipa_ioc_rm_dependency {
+ enum ipa_rm_resource_name resource_name;
+ enum ipa_rm_resource_name depends_on_name;
+};
+
+struct ipa_ioc_generate_flt_eq {
+ enum ipa_ip_type ip;
+ struct ipa_rule_attrib attrib;
+ struct ipa_ipfltri_rule_eq eq_attrib;
+};
+
+/**
+ * struct ipa_ioc_write_qmapid - to write mux id to endpoint meta register
+ * @mux_id: mux id of wan
+ */
+struct ipa_ioc_write_qmapid {
+ enum ipa_client_type client;
+ uint8_t qmap_id;
+};
+
+enum ipacm_client_enum {
+ IPACM_CLIENT_USB = 1,
+ IPACM_CLIENT_WLAN,
+ IPACM_CLIENT_MAX
+};
+/**
+ * actual IOCTLs supported by IPA driver
+ */
+#define IPA_IOC_ADD_HDR _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_HDR, \
+ struct ipa_ioc_add_hdr *)
+#define IPA_IOC_DEL_HDR _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_HDR, \
+ struct ipa_ioc_del_hdr *)
+#define IPA_IOC_ADD_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_RT_RULE, \
+ struct ipa_ioc_add_rt_rule *)
+#define IPA_IOC_ADD_RT_RULE_AFTER _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_RT_RULE_AFTER, \
+ struct ipa_ioc_add_rt_rule_after *)
+#define IPA_IOC_DEL_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_RT_RULE, \
+ struct ipa_ioc_del_rt_rule *)
+#define IPA_IOC_ADD_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_FLT_RULE, \
+ struct ipa_ioc_add_flt_rule *)
+#define IPA_IOC_ADD_FLT_RULE_AFTER _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_FLT_RULE_AFTER, \
+ struct ipa_ioc_add_flt_rule_after *)
+#define IPA_IOC_DEL_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_FLT_RULE, \
+ struct ipa_ioc_del_flt_rule *)
+#define IPA_IOC_COMMIT_HDR _IO(IPA_IOC_MAGIC,\
+ IPA_IOCTL_COMMIT_HDR)
+#define IPA_IOC_RESET_HDR _IO(IPA_IOC_MAGIC,\
+ IPA_IOCTL_RESET_HDR)
+#define IPA_IOC_COMMIT_RT _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_COMMIT_RT, \
+ enum ipa_ip_type)
+#define IPA_IOC_RESET_RT _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_RESET_RT, \
+ enum ipa_ip_type)
+#define IPA_IOC_COMMIT_FLT _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_COMMIT_FLT, \
+ enum ipa_ip_type)
+#define IPA_IOC_RESET_FLT _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_RESET_FLT, \
+ enum ipa_ip_type)
+#define IPA_IOC_DUMP _IO(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DUMP)
+#define IPA_IOC_GET_RT_TBL _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_RT_TBL, \
+ struct ipa_ioc_get_rt_tbl *)
+#define IPA_IOC_PUT_RT_TBL _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_PUT_RT_TBL, \
+ uint32_t)
+#define IPA_IOC_COPY_HDR _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_COPY_HDR, \
+ struct ipa_ioc_copy_hdr *)
+#define IPA_IOC_QUERY_INTF _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF, \
+ struct ipa_ioc_query_intf *)
+#define IPA_IOC_QUERY_INTF_TX_PROPS _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF_TX_PROPS, \
+ struct ipa_ioc_query_intf_tx_props *)
+#define IPA_IOC_QUERY_INTF_RX_PROPS _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF_RX_PROPS, \
+ struct ipa_ioc_query_intf_rx_props *)
+#define IPA_IOC_QUERY_INTF_EXT_PROPS _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
+ struct ipa_ioc_query_intf_ext_props *)
+#define IPA_IOC_GET_HDR _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_HDR, \
+ struct ipa_ioc_get_hdr *)
+#define IPA_IOC_PUT_HDR _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_PUT_HDR, \
+ uint32_t)
+#define IPA_IOC_ALLOC_NAT_MEM _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ALLOC_NAT_MEM, \
+ struct ipa_ioc_nat_alloc_mem *)
+#define IPA_IOC_V4_INIT_NAT _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_V4_INIT_NAT, \
+ struct ipa_ioc_v4_nat_init *)
+#define IPA_IOC_NAT_DMA _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_NAT_DMA, \
+ struct ipa_ioc_nat_dma_cmd *)
+#define IPA_IOC_V4_DEL_NAT _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_V4_DEL_NAT, \
+ struct ipa_ioc_v4_nat_del *)
+#define IPA_IOC_GET_NAT_OFFSET _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_NAT_OFFSET, \
+ uint32_t *)
+#define IPA_IOC_SET_FLT _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_SET_FLT, \
+ uint32_t)
+#define IPA_IOC_PULL_MSG _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_PULL_MSG, \
+ struct ipa_msg_meta *)
+#define IPA_IOC_RM_ADD_DEPENDENCY _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_RM_ADD_DEPENDENCY, \
+ struct ipa_ioc_rm_dependency *)
+#define IPA_IOC_RM_DEL_DEPENDENCY _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_RM_DEL_DEPENDENCY, \
+ struct ipa_ioc_rm_dependency *)
+#define IPA_IOC_GENERATE_FLT_EQ _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GENERATE_FLT_EQ, \
+ struct ipa_ioc_generate_flt_eq *)
+#define IPA_IOC_QUERY_EP_MAPPING _IOR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_EP_MAPPING, \
+ uint32_t)
+#define IPA_IOC_QUERY_RT_TBL_INDEX _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_RT_TBL_INDEX, \
+ struct ipa_ioc_get_rt_tbl_indx *)
+#define IPA_IOC_WRITE_QMAPID _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_WRITE_QMAPID, \
+ struct ipa_ioc_write_qmapid *)
+#define IPA_IOC_MDFY_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_MDFY_FLT_RULE, \
+ struct ipa_ioc_mdfy_flt_rule *)
+#define IPA_IOC_MDFY_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_MDFY_RT_RULE, \
+ struct ipa_ioc_mdfy_rt_rule *)
+
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
+ struct ipa_wan_msg *)
+
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
+ struct ipa_wan_msg *)
+#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
+ struct ipa_wan_msg *)
+#define IPA_IOC_ADD_HDR_PROC_CTX _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_HDR_PROC_CTX, \
+ struct ipa_ioc_add_hdr_proc_ctx *)
+#define IPA_IOC_DEL_HDR_PROC_CTX _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_HDR_PROC_CTX, \
+ struct ipa_ioc_del_hdr_proc_ctx *)
+
+#define IPA_IOC_GET_HW_VERSION _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_HW_VERSION, \
+ enum ipa_hw_type *)
+
+/*
+ * unique magic number of the Tethering bridge ioctls
+ */
+#define TETH_BRIDGE_IOC_MAGIC 0xCE
+
+/*
+ * Ioctls supported by Tethering bridge driver
+ */
+#define TETH_BRIDGE_IOCTL_SET_BRIDGE_MODE 0
+#define TETH_BRIDGE_IOCTL_SET_AGGR_PARAMS 1
+#define TETH_BRIDGE_IOCTL_GET_AGGR_PARAMS 2
+#define TETH_BRIDGE_IOCTL_GET_AGGR_CAPABILITIES 3
+#define TETH_BRIDGE_IOCTL_MAX 4
+
+
+/**
+ * enum teth_link_protocol_type - link protocol (IP / Ethernet)
+ */
+enum teth_link_protocol_type {
+ TETH_LINK_PROTOCOL_IP,
+ TETH_LINK_PROTOCOL_ETHERNET,
+ TETH_LINK_PROTOCOL_MAX,
+};
+
+/**
+ * enum teth_aggr_protocol_type - Aggregation protocol (MBIM / TLP)
+ */
+enum teth_aggr_protocol_type {
+ TETH_AGGR_PROTOCOL_NONE,
+ TETH_AGGR_PROTOCOL_MBIM,
+ TETH_AGGR_PROTOCOL_TLP,
+ TETH_AGGR_PROTOCOL_MAX,
+};
+
+/**
+ * struct teth_aggr_params_link - Aggregation parameters for uplink/downlink
+ * @aggr_prot: Aggregation protocol (MBIM / TLP)
+ * @max_transfer_size_byte: Maximal size of aggregated packet in bytes.
+ * Default value is 16*1024.
+ * @max_datagrams: Maximal number of IP packets in an aggregated
+ * packet. Default value is 16
+ */
+struct teth_aggr_params_link {
+ enum teth_aggr_protocol_type aggr_prot;
+ uint32_t max_transfer_size_byte;
+ uint32_t max_datagrams;
+};
+
+
+/**
+ * struct teth_aggr_params - Aggregation parmeters
+ * @ul: Uplink parameters
+ * @dl: Downlink parmaeters
+ */
+struct teth_aggr_params {
+ struct teth_aggr_params_link ul;
+ struct teth_aggr_params_link dl;
+};
+
+/**
+ * struct teth_aggr_capabilities - Aggregation capabilities
+ * @num_protocols: Number of protocols described in the array
+ * @prot_caps[]: Array of aggregation capabilities per protocol
+ */
+struct teth_aggr_capabilities {
+ uint16_t num_protocols;
+ struct teth_aggr_params_link prot_caps[0];
+};
+
+/**
+ * struct teth_ioc_set_bridge_mode
+ * @link_protocol: link protocol (IP / Ethernet)
+ * @lcid: logical channel number
+ */
+struct teth_ioc_set_bridge_mode {
+ enum teth_link_protocol_type link_protocol;
+ uint16_t lcid;
+};
+
+/**
+ * struct teth_ioc_set_aggr_params
+ * @aggr_params: Aggregation parmeters
+ * @lcid: logical channel number
+ */
+struct teth_ioc_aggr_params {
+ struct teth_aggr_params aggr_params;
+ uint16_t lcid;
+};
+
+
+#define TETH_BRIDGE_IOC_SET_BRIDGE_MODE _IOW(TETH_BRIDGE_IOC_MAGIC, \
+ TETH_BRIDGE_IOCTL_SET_BRIDGE_MODE, \
+ struct teth_ioc_set_bridge_mode *)
+#define TETH_BRIDGE_IOC_SET_AGGR_PARAMS _IOW(TETH_BRIDGE_IOC_MAGIC, \
+ TETH_BRIDGE_IOCTL_SET_AGGR_PARAMS, \
+ struct teth_ioc_aggr_params *)
+#define TETH_BRIDGE_IOC_GET_AGGR_PARAMS _IOR(TETH_BRIDGE_IOC_MAGIC, \
+ TETH_BRIDGE_IOCTL_GET_AGGR_PARAMS, \
+ struct teth_ioc_aggr_params *)
+#define TETH_BRIDGE_IOC_GET_AGGR_CAPABILITIES _IOWR(TETH_BRIDGE_IOC_MAGIC, \
+ TETH_BRIDGE_IOCTL_GET_AGGR_CAPABILITIES, \
+ struct teth_aggr_capabilities *)
+
+/*
+ * unique magic number of the ODU bridge ioctls
+ */
+#define ODU_BRIDGE_IOC_MAGIC 0xCD
+
+/*
+ * Ioctls supported by ODU bridge driver
+ */
+#define ODU_BRIDGE_IOCTL_SET_MODE 0
+#define ODU_BRIDGE_IOCTL_SET_LLV6_ADDR 1
+#define ODU_BRIDGE_IOCTL_MAX 2
+
+/**
+ * enum odu_bridge_mode - bridge mode
+ * (ROUTER MODE / BRIDGE MODE)
+ */
+enum odu_bridge_mode {
+ ODU_BRIDGE_MODE_ROUTER,
+ ODU_BRIDGE_MODE_BRIDGE,
+ ODU_BRIDGE_MODE_MAX,
+};
+
+#define ODU_BRIDGE_IOC_SET_MODE _IOW(ODU_BRIDGE_IOC_MAGIC, \
+ ODU_BRIDGE_IOCTL_SET_MODE, \
+ enum odu_bridge_mode)
+
+#define ODU_BRIDGE_IOC_SET_LLV6_ADDR _IOW(ODU_BRIDGE_IOC_MAGIC, \
+ ODU_BRIDGE_IOCTL_SET_LLV6_ADDR, \
+ struct in6_addr *)
+
+#endif /* _UAPI_MSM_IPA_H_ */